id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14,500
|
ansible_pytest_collections.py
|
ansible_ansible/test/lib/ansible_test/_util/target/pytest/plugins/ansible_pytest_collections.py
|
"""Enable unit testing of Ansible collections. PYTEST_DONT_REWRITE"""
from __future__ import annotations
import os
# set by ansible-test to a single directory, rather than a list of directories as supported by Ansible itself
ANSIBLE_COLLECTIONS_PATH = os.path.join(os.environ['ANSIBLE_COLLECTIONS_PATH'], 'ansible_collections')
# set by ansible-test to the minimum python version supported on the controller
ANSIBLE_CONTROLLER_MIN_PYTHON_VERSION = tuple(int(x) for x in os.environ['ANSIBLE_CONTROLLER_MIN_PYTHON_VERSION'].split('.'))
# this monkeypatch to _pytest.pathlib.resolve_package_path fixes PEP420 resolution for collections in pytest >= 6.0.0
# NB: this code should never run under py2
def collection_resolve_package_path(path):
"""Configure the Python package path so that pytest can find our collections."""
for parent in path.parents:
if str(parent) == ANSIBLE_COLLECTIONS_PATH:
return parent
raise Exception('File "%s" not found in collection path "%s".' % (path, ANSIBLE_COLLECTIONS_PATH))
# this monkeypatch to py.path.local.LocalPath.pypkgpath fixes PEP420 resolution for collections in pytest < 6.0.0
def collection_pypkgpath(self):
"""Configure the Python package path so that pytest can find our collections."""
for parent in self.parts(reverse=True):
if str(parent) == ANSIBLE_COLLECTIONS_PATH:
return parent
raise Exception('File "%s" not found in collection path "%s".' % (self.strpath, ANSIBLE_COLLECTIONS_PATH))
def enable_assertion_rewriting_hook(): # type: () -> None
"""
Enable pytest's AssertionRewritingHook on Python 3.x.
This is necessary because the Ansible collection loader intercepts imports before the pytest provided loader ever sees them.
"""
import sys
hook_name = '_pytest.assertion.rewrite.AssertionRewritingHook'
hooks = [hook for hook in sys.meta_path if hook.__class__.__module__ + '.' + hook.__class__.__qualname__ == hook_name]
if len(hooks) != 1:
raise Exception('Found {} instance(s) of "{}" in sys.meta_path.'.format(len(hooks), hook_name))
assertion_rewriting_hook = hooks[0]
# This is based on `_AnsibleCollectionPkgLoaderBase.exec_module` from `ansible/utils/collection_loader/_collection_finder.py`.
def exec_module(self, module):
# short-circuit redirect; avoid reinitializing existing modules
if self._redirect_module: # pylint: disable=protected-access
return
# execute the module's code in its namespace
code_obj = self.get_code(self._fullname) # pylint: disable=protected-access
if code_obj is not None: # things like NS packages that can't have code on disk will return None
# This logic is loosely based on `AssertionRewritingHook._should_rewrite` from pytest.
# See: https://github.com/pytest-dev/pytest/blob/779a87aada33af444f14841a04344016a087669e/src/_pytest/assertion/rewrite.py#L209
should_rewrite = self._package_to_load == 'conftest' or self._package_to_load.startswith('test_') # pylint: disable=protected-access
if should_rewrite:
# noinspection PyUnresolvedReferences
assertion_rewriting_hook.exec_module(module)
else:
exec(code_obj, module.__dict__) # pylint: disable=exec-used
# noinspection PyProtectedMember
from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionPkgLoaderBase
_AnsibleCollectionPkgLoaderBase.exec_module = exec_module
def pytest_configure():
"""Configure this pytest plugin."""
try:
if pytest_configure.executed:
return
except AttributeError:
pytest_configure.executed = True
enable_assertion_rewriting_hook()
# noinspection PyProtectedMember
from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder
# allow unit tests to import code from collections
# noinspection PyProtectedMember
_AnsibleCollectionFinder(paths=[os.path.dirname(ANSIBLE_COLLECTIONS_PATH)])._install() # pylint: disable=protected-access
try:
# noinspection PyProtectedMember
from _pytest import pathlib as _pytest_pathlib
except ImportError:
_pytest_pathlib = None
if hasattr(_pytest_pathlib, 'resolve_package_path'):
_pytest_pathlib.resolve_package_path = collection_resolve_package_path
else:
# looks like pytest <= 6.0.0, use the old hack against py.path
# noinspection PyProtectedMember
import py._path.local
# force collections unit tests to be loaded with the ansible_collections namespace
# original idea from https://stackoverflow.com/questions/50174130/how-do-i-pytest-a-project-using-pep-420-namespace-packages/50175552#50175552
# noinspection PyProtectedMember
py._path.local.LocalPath.pypkgpath = collection_pypkgpath # pylint: disable=protected-access
pytest_configure()
| 4,997
|
Python
|
.pyt
| 81
| 54.679012
| 150
| 0.724667
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,501
|
uninstall-Alpine-3-python-3.yml
|
ansible_ansible/test/integration/targets/setup_paramiko/uninstall-Alpine-3-python-3.yml
|
- name: Uninstall Paramiko for Python 3 on Alpine
command: apk del py3-paramiko
| 82
|
Python
|
.pyt
| 2
| 39
| 49
| 0.7875
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,502
|
uninstall-RedHat-8-python-3.yml
|
ansible_ansible/test/integration/targets/setup_paramiko/uninstall-RedHat-8-python-3.yml
|
- name: Uninstall Paramiko for Python 3 on RHEL 8
pip: # no python3-paramiko package exists for RHEL 8
name: paramiko
state: absent
| 142
|
Python
|
.pyt
| 4
| 32
| 54
| 0.73913
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,503
|
uninstall-zypper-python-3.yml
|
ansible_ansible/test/integration/targets/setup_paramiko/uninstall-zypper-python-3.yml
|
- name: Uninstall Paramiko for Python 3 using zypper
command: zypper --quiet --non-interactive remove --clean-deps python3-paramiko
| 134
|
Python
|
.pyt
| 2
| 65
| 80
| 0.787879
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,504
|
uninstall-Darwin-python-3.yml
|
ansible_ansible/test/integration/targets/setup_paramiko/uninstall-Darwin-python-3.yml
|
- name: Uninstall Paramiko for Python 3 on MacOS
pip:
name: paramiko
state: absent
| 93
|
Python
|
.pyt
| 4
| 19.75
| 48
| 0.719101
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,505
|
install-Alpine-3-python-3.yml
|
ansible_ansible/test/integration/targets/setup_paramiko/install-Alpine-3-python-3.yml
|
- name: Install Paramiko for Python 3 on Alpine
command: apk add py3-paramiko
| 80
|
Python
|
.pyt
| 2
| 38
| 47
| 0.782051
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,506
|
uninstall-RedHat-9-python-3.yml
|
ansible_ansible/test/integration/targets/setup_paramiko/uninstall-RedHat-9-python-3.yml
|
- name: Uninstall Paramiko for Python 3 on RHEL 9
pip: # no python3-paramiko package exists for RHEL 9
name: paramiko
state: absent
- name: Revert the crypto-policy back to DEFAULT
command: update-crypto-policies --set DEFAULT
| 240
|
Python
|
.pyt
| 6
| 36.833333
| 54
| 0.759657
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,507
|
install-python-3.yml
|
ansible_ansible/test/integration/targets/setup_paramiko/install-python-3.yml
|
- name: Install Paramiko for Python 3
package:
name: python3-paramiko
| 76
|
Python
|
.pyt
| 3
| 22.333333
| 37
| 0.753425
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,508
|
uninstall-FreeBSD-python-3.yml
|
ansible_ansible/test/integration/targets/setup_paramiko/uninstall-FreeBSD-python-3.yml
|
- name: Uninstall Paramiko for Python 3 on FreeBSD
pip:
name: paramiko
state: absent
| 95
|
Python
|
.pyt
| 4
| 20.25
| 50
| 0.725275
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,509
|
install-FreeBSD-python-3.yml
|
ansible_ansible/test/integration/targets/setup_paramiko/install-FreeBSD-python-3.yml
|
- name: Setup remote constraints
include_tasks: setup-remote-constraints.yml
- name: Install Paramiko for Python 3 on FreeBSD
pip: # no package in pkg, just use pip
name: paramiko
extra_args: "-c {{ remote_constraints }}"
environment:
SETUPTOOLS_USE_DISTUTILS: stdlib
| 286
|
Python
|
.pyt
| 8
| 32.5
| 48
| 0.741007
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,510
|
install-RedHat-9-python-3.yml
|
ansible_ansible/test/integration/targets/setup_paramiko/install-RedHat-9-python-3.yml
|
- name: Setup remote constraints
include_tasks: setup-remote-constraints.yml
- name: Install Paramiko for Python 3 on RHEL 9
pip: # no python3-paramiko package exists for RHEL 9
name: paramiko
extra_args: "-c {{ remote_constraints }}"
- name: Drop the crypto-policy to LEGACY for these tests
command: update-crypto-policies --set LEGACY
| 352
|
Python
|
.pyt
| 8
| 41.125
| 56
| 0.755102
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,511
|
uninstall-apt-python-3.yml
|
ansible_ansible/test/integration/targets/setup_paramiko/uninstall-apt-python-3.yml
|
- name: Uninstall Paramiko for Python 3 using apt
apt:
name: python3-paramiko
state: absent
autoremove: yes
| 122
|
Python
|
.pyt
| 5
| 20.6
| 49
| 0.726496
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,512
|
install-Darwin-python-3.yml
|
ansible_ansible/test/integration/targets/setup_paramiko/install-Darwin-python-3.yml
|
- name: Setup remote constraints
include_tasks: setup-remote-constraints.yml
- name: Install Paramiko for Python 3 on MacOS
pip: # no homebrew package manager in core, just use pip
name: paramiko
extra_args: "-c {{ remote_constraints }}"
environment:
# Not sure why this fixes the test, but it does.
SETUPTOOLS_USE_DISTUTILS: stdlib
| 355
|
Python
|
.pyt
| 9
| 36
| 58
| 0.736994
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,513
|
install-RedHat-8-python-3.yml
|
ansible_ansible/test/integration/targets/setup_paramiko/install-RedHat-8-python-3.yml
|
- name: Setup remote constraints
include_tasks: setup-remote-constraints.yml
- name: Install Paramiko for Python 3 on RHEL 8
pip: # no python3-paramiko package exists for RHEL 8
name: paramiko
extra_args: "-c {{ remote_constraints }}"
environment:
SETUPTOOLS_USE_DISTUTILS: stdlib
| 299
|
Python
|
.pyt
| 8
| 34.125
| 54
| 0.749141
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,514
|
venv-pythons.py
|
ansible_ansible/test/integration/targets/ansible-test/venv-pythons.py
|
#!/usr/bin/env python
"""Return target Python options for use with ansible-test."""
from __future__ import annotations
import argparse
import os
import shutil
import subprocess
import sys
from ansible import release
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--only-versions', action='store_true')
options = parser.parse_args()
ansible_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(release.__file__))))
source_root = os.path.join(ansible_root, 'test', 'lib')
sys.path.insert(0, source_root)
from ansible_test._internal import constants
args = []
for python_version in constants.SUPPORTED_PYTHON_VERSIONS:
executable = shutil.which(f'python{python_version}')
if executable:
if python_version.startswith('2.'):
cmd = [executable, '-m', 'virtualenv', '--version']
else:
cmd = [executable, '-m', 'venv', '--help']
process = subprocess.run(cmd, capture_output=True, check=False)
print(f'{executable} - {"fail" if process.returncode else "pass"}', file=sys.stderr)
if not process.returncode:
if options.only_versions:
args.append(python_version)
continue
args.extend(['--target-python', f'venv/{python_version}'])
print(' '.join(args))
if __name__ == '__main__':
main()
| 1,456
|
Python
|
.pyt
| 35
| 33.685714
| 103
| 0.632217
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,515
|
dataclasses.py
|
ansible_ansible/lib/ansible/galaxy/dependency_resolution/dataclasses.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2020-2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Dependency structs."""
# FIXME: add caching all over the place
from __future__ import annotations
import os
import typing as t
from collections import namedtuple
from collections.abc import MutableSequence, MutableMapping
from glob import iglob
from urllib.parse import urlparse
from yaml import safe_load
if t.TYPE_CHECKING:
from ansible.galaxy.collection.concrete_artifact_manager import (
ConcreteArtifactsManager,
)
Collection = t.TypeVar(
'Collection',
'Candidate', 'Requirement',
'_ComputedReqKindsMixin',
)
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.collection import HAS_PACKAGING, PkgReq
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils.common.arg_spec import ArgumentSpecValidator
from ansible.utils.collection_loader import AnsibleCollectionRef
from ansible.utils.display import Display
_ALLOW_CONCRETE_POINTER_IN_SOURCE = False # NOTE: This is a feature flag
_GALAXY_YAML = b'galaxy.yml'
_MANIFEST_JSON = b'MANIFEST.json'
_SOURCE_METADATA_FILE = b'GALAXY.yml'
display = Display()
def get_validated_source_info(b_source_info_path, namespace, name, version):
source_info_path = to_text(b_source_info_path, errors='surrogate_or_strict')
if not os.path.isfile(b_source_info_path):
return None
try:
with open(b_source_info_path, mode='rb') as fd:
metadata = safe_load(fd)
except OSError as e:
display.warning(
f"Error getting collection source information at '{source_info_path}': {to_text(e, errors='surrogate_or_strict')}"
)
return None
if not isinstance(metadata, MutableMapping):
display.warning(f"Error getting collection source information at '{source_info_path}': expected a YAML dictionary")
return None
schema_errors = _validate_v1_source_info_schema(namespace, name, version, metadata)
if schema_errors:
display.warning(f"Ignoring source metadata file at {source_info_path} due to the following errors:")
display.warning("\n".join(schema_errors))
display.warning("Correct the source metadata file by reinstalling the collection.")
return None
return metadata
def _validate_v1_source_info_schema(namespace, name, version, provided_arguments):
argument_spec_data = dict(
format_version=dict(choices=["1.0.0"]),
download_url=dict(),
version_url=dict(),
server=dict(),
signatures=dict(
type=list,
suboptions=dict(
signature=dict(),
pubkey_fingerprint=dict(),
signing_service=dict(),
pulp_created=dict(),
)
),
name=dict(choices=[name]),
namespace=dict(choices=[namespace]),
version=dict(choices=[version]),
)
if not isinstance(provided_arguments, dict):
raise AnsibleError(
f'Invalid offline source info for {namespace}.{name}:{version}, expected a dict and got {type(provided_arguments)}'
)
validator = ArgumentSpecValidator(argument_spec_data)
validation_result = validator.validate(provided_arguments)
return validation_result.error_messages
def _is_collection_src_dir(dir_path):
b_dir_path = to_bytes(dir_path, errors='surrogate_or_strict')
return os.path.isfile(os.path.join(b_dir_path, _GALAXY_YAML))
def _is_installed_collection_dir(dir_path):
b_dir_path = to_bytes(dir_path, errors='surrogate_or_strict')
return os.path.isfile(os.path.join(b_dir_path, _MANIFEST_JSON))
def _is_collection_dir(dir_path):
return (
_is_installed_collection_dir(dir_path) or
_is_collection_src_dir(dir_path)
)
def _find_collections_in_subdirs(dir_path):
b_dir_path = to_bytes(dir_path, errors='surrogate_or_strict')
subdir_glob_pattern = os.path.join(
b_dir_path,
# b'*', # namespace is supposed to be top-level per spec
b'*', # collection name
)
for subdir in iglob(subdir_glob_pattern):
if os.path.isfile(os.path.join(subdir, _MANIFEST_JSON)):
yield subdir
elif os.path.isfile(os.path.join(subdir, _GALAXY_YAML)):
yield subdir
def _is_collection_namespace_dir(tested_str):
return any(_find_collections_in_subdirs(tested_str))
def _is_file_path(tested_str):
return os.path.isfile(to_bytes(tested_str, errors='surrogate_or_strict'))
def _is_http_url(tested_str):
return urlparse(tested_str).scheme.lower() in {'http', 'https'}
def _is_git_url(tested_str):
return tested_str.startswith(('git+', 'git@'))
def _is_concrete_artifact_pointer(tested_str):
return any(
predicate(tested_str)
for predicate in (
# NOTE: Maintain the checks to be sorted from light to heavy:
_is_git_url,
_is_http_url,
_is_file_path,
_is_collection_dir,
_is_collection_namespace_dir,
)
)
class _ComputedReqKindsMixin:
UNIQUE_ATTRS = ('fqcn', 'ver', 'src', 'type')
def __init__(self, *args, **kwargs):
if not self.may_have_offline_galaxy_info:
self._source_info = None
else:
info_path = self.construct_galaxy_info_path(to_bytes(self.src, errors='surrogate_or_strict'))
self._source_info = get_validated_source_info(
info_path,
self.namespace,
self.name,
self.ver
)
def __hash__(self):
return hash(tuple(getattr(self, attr) for attr in _ComputedReqKindsMixin.UNIQUE_ATTRS))
def __eq__(self, candidate):
return hash(self) == hash(candidate)
@classmethod
def from_dir_path_as_unknown( # type: ignore[misc]
cls, # type: t.Type[Collection]
dir_path, # type: bytes
art_mgr, # type: ConcreteArtifactsManager
): # type: (...) -> Collection
"""Make collection from an unspecified dir type.
This alternative constructor attempts to grab metadata from the
given path if it's a directory. If there's no metadata, it
falls back to guessing the FQCN based on the directory path and
sets the version to "*".
It raises a ValueError immediately if the input is not an
existing directory path.
"""
if not os.path.isdir(dir_path):
raise ValueError(
"The collection directory '{path!s}' doesn't exist".
format(path=to_native(dir_path)),
)
try:
return cls.from_dir_path(dir_path, art_mgr)
except ValueError:
return cls.from_dir_path_implicit(dir_path)
@classmethod
def from_dir_path( # type: ignore[misc]
cls, # type: t.Type[Collection]
dir_path, # type: bytes
art_mgr, # type: ConcreteArtifactsManager
): # type: (...) -> Collection
"""Make collection from an directory with metadata."""
if dir_path.endswith(to_bytes(os.path.sep)):
dir_path = dir_path.rstrip(to_bytes(os.path.sep))
if not _is_collection_dir(dir_path):
display.warning(
u"Collection at '{path!s}' does not have a {manifest_json!s} "
u'file, nor has it {galaxy_yml!s}: cannot detect version.'.
format(
galaxy_yml=to_text(_GALAXY_YAML),
manifest_json=to_text(_MANIFEST_JSON),
path=to_text(dir_path, errors='surrogate_or_strict'),
),
)
raise ValueError(
'`dir_path` argument must be an installed or a source'
' collection directory.',
)
tmp_inst_req = cls(None, None, dir_path, 'dir', None)
req_version = art_mgr.get_direct_collection_version(tmp_inst_req)
try:
req_name = art_mgr.get_direct_collection_fqcn(tmp_inst_req)
except TypeError as err:
# Looks like installed/source dir but isn't: doesn't have valid metadata.
display.warning(
u"Collection at '{path!s}' has a {manifest_json!s} "
u"or {galaxy_yml!s} file but it contains invalid metadata.".
format(
galaxy_yml=to_text(_GALAXY_YAML),
manifest_json=to_text(_MANIFEST_JSON),
path=to_text(dir_path, errors='surrogate_or_strict'),
),
)
raise ValueError(
"Collection at '{path!s}' has invalid metadata".
format(path=to_text(dir_path, errors='surrogate_or_strict'))
) from err
return cls(req_name, req_version, dir_path, 'dir', None)
@classmethod
def from_dir_path_implicit( # type: ignore[misc]
cls, # type: t.Type[Collection]
dir_path, # type: bytes
): # type: (...) -> Collection
"""Construct a collection instance based on an arbitrary dir.
This alternative constructor infers the FQCN based on the parent
and current directory names. It also sets the version to "*"
regardless of whether any of known metadata files are present.
"""
# There is no metadata, but it isn't required for a functional collection. Determine the namespace.name from the path.
if dir_path.endswith(to_bytes(os.path.sep)):
dir_path = dir_path.rstrip(to_bytes(os.path.sep))
u_dir_path = to_text(dir_path, errors='surrogate_or_strict')
path_list = u_dir_path.split(os.path.sep)
req_name = '.'.join(path_list[-2:])
return cls(req_name, '*', dir_path, 'dir', None) # type: ignore[call-arg]
@classmethod
def from_string(cls, collection_input, artifacts_manager, supplemental_signatures):
req = {}
if _is_concrete_artifact_pointer(collection_input) or AnsibleCollectionRef.is_valid_collection_name(collection_input):
# Arg is a file path or URL to a collection, or just a collection
req['name'] = collection_input
elif ':' in collection_input:
req['name'], _sep, req['version'] = collection_input.partition(':')
if not req['version']:
del req['version']
else:
if not HAS_PACKAGING:
raise AnsibleError("Failed to import packaging, check that a supported version is installed")
try:
pkg_req = PkgReq(collection_input)
except Exception as e:
# packaging doesn't know what this is, let it fly, better errors happen in from_requirement_dict
req['name'] = collection_input
else:
req['name'] = pkg_req.name
if pkg_req.specifier:
req['version'] = to_text(pkg_req.specifier)
req['signatures'] = supplemental_signatures
return cls.from_requirement_dict(req, artifacts_manager)
@classmethod
def from_requirement_dict(cls, collection_req, art_mgr, validate_signature_options=True):
req_name = collection_req.get('name', None)
req_version = collection_req.get('version', '*')
req_type = collection_req.get('type')
# TODO: decide how to deprecate the old src API behavior
req_source = collection_req.get('source', None)
req_signature_sources = collection_req.get('signatures', None)
if req_signature_sources is not None:
if validate_signature_options and art_mgr.keyring is None:
raise AnsibleError(
f"Signatures were provided to verify {req_name} but no keyring was configured."
)
if not isinstance(req_signature_sources, MutableSequence):
req_signature_sources = [req_signature_sources]
req_signature_sources = frozenset(req_signature_sources)
if req_type is None:
if ( # FIXME: decide on the future behavior:
_ALLOW_CONCRETE_POINTER_IN_SOURCE
and req_source is not None
and _is_concrete_artifact_pointer(req_source)
):
src_path = req_source
elif (
req_name is not None
and AnsibleCollectionRef.is_valid_collection_name(req_name)
):
req_type = 'galaxy'
elif (
req_name is not None
and _is_concrete_artifact_pointer(req_name)
):
src_path, req_name = req_name, None
else:
dir_tip_tmpl = ( # NOTE: leading LFs are for concat
'\n\nTip: Make sure you are pointing to the right '
'subdirectory — `{src!s}` looks like a directory '
'but it is neither a collection, nor a namespace '
'dir.'
)
if req_source is not None and os.path.isdir(req_source):
tip = dir_tip_tmpl.format(src=req_source)
elif req_name is not None and os.path.isdir(req_name):
tip = dir_tip_tmpl.format(src=req_name)
elif req_name:
tip = '\n\nCould not find {0}.'.format(req_name)
else:
tip = ''
raise AnsibleError( # NOTE: I'd prefer a ValueError instead
'Neither the collection requirement entry key '
"'name', nor 'source' point to a concrete "
"resolvable collection artifact. Also 'name' is "
'not an FQCN. A valid collection name must be in '
'the format <namespace>.<collection>. Please make '
'sure that the namespace and the collection name '
'contain characters from [a-zA-Z0-9_] only.'
'{extra_tip!s}'.format(extra_tip=tip),
)
if req_type is None:
if _is_git_url(src_path):
req_type = 'git'
req_source = src_path
elif _is_http_url(src_path):
req_type = 'url'
req_source = src_path
elif _is_file_path(src_path):
req_type = 'file'
req_source = src_path
elif _is_collection_dir(src_path):
if _is_installed_collection_dir(src_path) and _is_collection_src_dir(src_path):
# Note that ``download`` requires a dir with a ``galaxy.yml`` and fails if it
# doesn't exist, but if a ``MANIFEST.json`` also exists, it would be used
# instead of the ``galaxy.yml``.
raise AnsibleError(
u"Collection requirement at '{path!s}' has both a {manifest_json!s} "
u"file and a {galaxy_yml!s}.\nThe requirement must either be an installed "
u"collection directory or a source collection directory, not both.".
format(
path=to_text(src_path, errors='surrogate_or_strict'),
manifest_json=to_text(_MANIFEST_JSON),
galaxy_yml=to_text(_GALAXY_YAML),
)
)
req_type = 'dir'
req_source = src_path
elif _is_collection_namespace_dir(src_path):
req_name = None # No name for a virtual req or "namespace."?
req_type = 'subdirs'
req_source = src_path
else:
raise AnsibleError( # NOTE: this is never supposed to be hit
'Failed to automatically detect the collection '
'requirement type.',
)
if req_type not in {'file', 'galaxy', 'git', 'url', 'dir', 'subdirs'}:
raise AnsibleError(
"The collection requirement entry key 'type' must be "
'one of file, galaxy, git, dir, subdirs, or url.'
)
if req_name is None and req_type == 'galaxy':
raise AnsibleError(
'Collections requirement entry should contain '
"the key 'name' if it's requested from a Galaxy-like "
'index server.',
)
if req_type != 'galaxy' and req_source is None:
req_source, req_name = req_name, None
if (
req_type == 'galaxy' and
isinstance(req_source, GalaxyAPI) and
not _is_http_url(req_source.api_server)
):
raise AnsibleError(
"Collections requirement 'source' entry should contain "
'a valid Galaxy API URL but it does not: {not_url!s} '
'is not an HTTP URL.'.
format(not_url=req_source.api_server),
)
if req_type == 'dir' and req_source.endswith(os.path.sep):
req_source = req_source.rstrip(os.path.sep)
tmp_inst_req = cls(req_name, req_version, req_source, req_type, req_signature_sources)
if req_type not in {'galaxy', 'subdirs'} and req_name is None:
req_name = art_mgr.get_direct_collection_fqcn(tmp_inst_req) # TODO: fix the cache key in artifacts manager?
if req_type not in {'galaxy', 'subdirs'} and req_version == '*':
req_version = art_mgr.get_direct_collection_version(tmp_inst_req)
return cls(
req_name, req_version,
req_source, req_type,
req_signature_sources,
)
def __repr__(self):
return (
'<{self!s} of type {coll_type!r} from {src!s}>'.
format(self=self, coll_type=self.type, src=self.src or 'Galaxy')
)
def __str__(self):
return to_native(self.__unicode__())
def __unicode__(self):
if self.fqcn is None:
return (
f'{self.type} collection from a Git repo' if self.is_scm
else f'{self.type} collection from a namespace'
)
return (
u'{fqcn!s}:{ver!s}'.
format(fqcn=to_text(self.fqcn), ver=to_text(self.ver))
)
@property
def may_have_offline_galaxy_info(self):
if self.fqcn is None:
# Virtual collection
return False
elif not self.is_dir or self.src is None or not _is_collection_dir(self.src):
# Not a dir or isn't on-disk
return False
return True
def construct_galaxy_info_path(self, b_collection_path):
if not self.may_have_offline_galaxy_info and not self.type == 'galaxy':
raise TypeError('Only installed collections from a Galaxy server have offline Galaxy info')
# Store Galaxy metadata adjacent to the namespace of the collection
# Chop off the last two parts of the path (/ns/coll) to get the dir containing the ns
b_src = to_bytes(b_collection_path, errors='surrogate_or_strict')
b_path_parts = b_src.split(to_bytes(os.path.sep))[0:-2]
b_metadata_dir = to_bytes(os.path.sep).join(b_path_parts)
# ns.coll-1.0.0.info
b_dir_name = to_bytes(f"{self.namespace}.{self.name}-{self.ver}.info", errors="surrogate_or_strict")
# collections/ansible_collections/ns.coll-1.0.0.info/GALAXY.yml
return os.path.join(b_metadata_dir, b_dir_name, _SOURCE_METADATA_FILE)
def _get_separate_ns_n_name(self): # FIXME: use LRU cache
return self.fqcn.split('.')
@property
def namespace(self):
if self.is_virtual:
raise TypeError(f'{self.type} collections do not have a namespace')
return self._get_separate_ns_n_name()[0]
@property
def name(self):
if self.is_virtual:
raise TypeError(f'{self.type} collections do not have a name')
return self._get_separate_ns_n_name()[-1]
@property
def canonical_package_id(self):
if not self.is_virtual:
return to_native(self.fqcn)
return (
'<virtual namespace from {src!s} of type {src_type!s}>'.
format(src=to_native(self.src), src_type=to_native(self.type))
)
@property
def is_virtual(self):
return self.is_scm or self.is_subdirs
@property
def is_file(self):
return self.type == 'file'
@property
def is_dir(self):
return self.type == 'dir'
@property
def namespace_collection_paths(self):
return [
to_native(path)
for path in _find_collections_in_subdirs(self.src)
]
@property
def is_subdirs(self):
return self.type == 'subdirs'
@property
def is_url(self):
return self.type == 'url'
@property
def is_scm(self):
return self.type == 'git'
@property
def is_concrete_artifact(self):
return self.type in {'git', 'url', 'file', 'dir', 'subdirs'}
@property
def is_online_index_pointer(self):
return not self.is_concrete_artifact
@property
def is_pinned(self):
"""Indicate if the version set is considered pinned.
This essentially computes whether the version field of the current
requirement explicitly requests a specific version and not an allowed
version range.
It is then used to help the resolvelib-based dependency resolver judge
whether it's acceptable to consider a pre-release candidate version
despite pre-release installs not being requested by the end-user
explicitly.
See https://github.com/ansible/ansible/pull/81606 for extra context.
"""
version_string = self.ver[0]
return version_string.isdigit() or not (
version_string == '*' or
version_string.startswith(('<', '>', '!='))
)
@property
def source_info(self):
return self._source_info
RequirementNamedTuple = namedtuple('Requirement', ('fqcn', 'ver', 'src', 'type', 'signature_sources')) # type: ignore[name-match]
CandidateNamedTuple = namedtuple('Candidate', ('fqcn', 'ver', 'src', 'type', 'signatures')) # type: ignore[name-match]
class Requirement(
_ComputedReqKindsMixin,
RequirementNamedTuple,
):
"""An abstract requirement request."""
def __new__(cls, *args, **kwargs):
self = RequirementNamedTuple.__new__(cls, *args, **kwargs)
return self
def __init__(self, *args, **kwargs):
super(Requirement, self).__init__()
class Candidate(
_ComputedReqKindsMixin,
CandidateNamedTuple,
):
"""A concrete collection candidate with its version resolved."""
def __new__(cls, *args, **kwargs):
self = CandidateNamedTuple.__new__(cls, *args, **kwargs)
return self
def __init__(self, *args, **kwargs):
super(Candidate, self).__init__()
def with_signatures_repopulated(self): # type: (Candidate) -> Candidate
"""Populate a new Candidate instance with Galaxy signatures.
:raises AnsibleAssertionError: If the supplied candidate is not sourced from a Galaxy-like index.
"""
if self.type != 'galaxy':
raise AnsibleAssertionError(f"Invalid collection type for {self!r}: unable to get signatures from a galaxy server.")
signatures = self.src.get_collection_signatures(self.namespace, self.name, self.ver)
return self.__class__(self.fqcn, self.ver, self.src, self.type, frozenset([*self.signatures, *signatures]))
| 23,958
|
Python
|
.tac
| 517
| 35.491296
| 130
| 0.599571
|
ansible/ansible
| 62,258
| 23,791
| 861
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,516
|
setup.py
|
coursera-dl_coursera-dl/setup.py
|
# -*- coding: utf-8 -*-
#
# you can install this to a local test virtualenv like so:
# virtualenv venv
# ./venv/bin/pip install --editable .
# ./venv/bin/pip install --editable .[dev] # with dev requirements, too
from __future__ import print_function
import os.path
import subprocess
import sys
# For compatibility with Python2.7
from io import open
from setuptools import setup
from coursera import __version__
def generate_readme_rst():
"""
Generate README.rst from README.md via pandoc.
In case of errors, we show a message having the error that we got and
exit the program.
"""
pandoc_cmd = [
'pandoc',
'--from=markdown',
'--to=rst',
'--output=README.rst',
'README.md'
]
if os.path.exists('README.rst'):
return
try:
subprocess.call(pandoc_cmd)
except (IOError, OSError) as e:
print('Could not run "pandoc". Error: %s' % e, file=sys.stderr)
print('Generating only a stub instead of the real documentation.')
def read_file(filename, alt=None):
"""
Read the contents of filename or give an alternative result instead.
"""
lines = None
try:
with open(filename, encoding='utf-8') as f:
lines = f.read()
except IOError:
lines = [] if alt is None else alt
return lines
generate_readme_rst()
long_description = read_file(
'README.md',
'Cannot read README.md'
)
requirements = read_file('requirements.txt')
dev_requirements = read_file('requirements-dev.txt')
trove_classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python',
'Topic :: Education',
]
setup(
name='coursera-dl',
version=__version__,
maintainer='Rogério Theodoro de Brito',
maintainer_email='rbrito@gmail.com',
license='LGPL',
url='https://github.com/coursera-dl/coursera-dl',
install_requires=requirements,
extras_require=dict(
dev=dev_requirements
),
description='Script for downloading Coursera.org videos and naming them.',
long_description=long_description,
long_description_content_type='text/markdown',
keywords=['coursera-dl', 'coursera',
'download', 'education', 'MOOCs', 'video'],
classifiers=trove_classifiers,
packages=["coursera"],
entry_points=dict(
console_scripts=[
'coursera-dl=coursera.coursera_dl:main'
]
),
platforms=['any'],
)
| 3,076
|
Python
|
.py
| 94
| 27.680851
| 89
| 0.658446
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,517
|
fabfile.py
|
coursera-dl_coursera-dl/fabfile.py
|
#
# Fabric configuration - http://www.fabfile.org/
#
from __future__ import print_function
import errno
import os
from fabric.api import (env, local, task)
MD2RST='pandoc --from=markdown --to=rst --output=README.rst README.md'
if not os.path.exists('README.rst'):
local(MD2RST)
env.projname = local("python setup.py --name", capture=True)
env.version = local("python setup.py --version", capture=True)
def mkdirs(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
@task
def create_rst_doc():
local(MD2RST)
@task
def clean():
create_rst_doc()
local("python setup.py clean")
local("rm -rf .tox coursera.egg-info htmlcov build dist README.rst")
local("rm -rf coursera/__pycache__/ coursera/test/__pycache__/")
local("find . -name '*.pyc' -delete")
@task
def build():
create_rst_doc()
local("python setup.py sdist")
local("gpg --detach-sign -a dist/coursera-%s.tar.gz" % env.version)
@task
def rebuild():
clean()
build()
@task
def coverage():
local("py.test coursera/test -v --cov coursera --cov-report html \
--cov-report term-missing")
@task
def pylint():
local("pylint %s tests" % env.projname)
@task
def tox():
local('tox')
@task
def release_check():
"""Check if there is a Git tag already in place"""
tags = local("git tag", capture=True)
tags = set(tags.splitlines())
if env.version in tags:
raise Exception("Already released v. %r" % env.version)
@task
def release():
"""Release a new version"""
release_check()
build()
print("Releasing %s version %s." % (env.projname, env.version))
local("git tag %s" % env.version)
local('gpg --detach-sign --armor dist/coursera-*.tar.gz*')
local('twine upload dist/coursera-*.tar.gz*')
local("git push")
local("git push --tags")
| 1,954
|
Python
|
.py
| 67
| 25
| 72
| 0.657711
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,518
|
cookies.py
|
coursera-dl_coursera-dl/coursera/cookies.py
|
# -*- coding: utf-8 -*-
"""
Cookie handling module.
"""
import logging
import os
import ssl
import requests
from requests.adapters import HTTPAdapter
try: # Workaround for broken Debian/Ubuntu packages? (See issue #331)
from requests.packages.urllib3.poolmanager import PoolManager
except ImportError:
from urllib3.poolmanager import PoolManager
from six.moves import StringIO
from six.moves import http_cookiejar as cookielib
from .define import CLASS_URL, AUTH_REDIRECT_URL, PATH_COOKIES, AUTH_URL_V3
from .utils import mkdir_p, random_string
# Monkey patch cookielib.Cookie.__init__.
# Reason: The expires value may be a decimal string,
# but the Cookie class uses int() ...
__original_init__ = cookielib.Cookie.__init__
def __fixed_init__(self, version, name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest,
rfc2109=False):
if expires is not None:
expires = float(expires)
__original_init__(self, version, name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest,
rfc2109=False)
cookielib.Cookie.__init__ = __fixed_init__
class ClassNotFound(BaseException):
"""
Raised if a course is not found in Coursera's site.
"""
class AuthenticationFailed(BaseException):
"""
Raised if we cannot authenticate on Coursera's site.
"""
def prepare_auth_headers(session, include_cauth=False):
"""
This function prepares headers with CSRF/CAUTH tokens that can
be used in POST requests such as login/get_quiz.
@param session: Requests session.
@type session: requests.Session
@param include_cauth: Flag that indicates whether CAUTH cookies should be
included as well.
@type include_cauth: bool
@return: Dictionary of headers.
@rtype: dict
"""
# csrftoken is simply a 20 char random string.
csrftoken = random_string(20)
# Now make a call to the authenticator url.
csrf2cookie = 'csrf2_token_%s' % random_string(8)
csrf2token = random_string(24)
cookie = "csrftoken=%s; %s=%s" % (csrftoken, csrf2cookie, csrf2token)
if include_cauth:
CAUTH = session.cookies.get('CAUTH')
cookie = "CAUTH=%s; %s" % (CAUTH, cookie)
logging.debug('Forging cookie header: %s.', cookie)
headers = {
'Cookie': cookie,
'X-CSRFToken': csrftoken,
'X-CSRF2-Cookie': csrf2cookie,
'X-CSRF2-Token': csrf2token
}
return headers
def login(session, username, password, class_name=None):
"""
Login on coursera.org with the given credentials.
This adds the following cookies to the session:
sessionid, maestro_login, maestro_login_flag
"""
logging.debug('Initiating login.')
try:
session.cookies.clear('.coursera.org')
logging.debug('Cleared .coursera.org cookies.')
except KeyError:
logging.debug('There were no .coursera.org cookies to be cleared.')
# Hit class url
if class_name is not None:
class_url = CLASS_URL.format(class_name=class_name)
r = requests.get(class_url, allow_redirects=False)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as e:
logging.error(e)
raise ClassNotFound(class_name)
headers = prepare_auth_headers(session, include_cauth=False)
data = {
'email': username,
'password': password,
'webrequest': 'true'
}
# Auth API V3
r = session.post(AUTH_URL_V3, data=data,
headers=headers, allow_redirects=False)
try:
r.raise_for_status()
# Some how the order of cookies parameters are important
# for coursera!!!
v = session.cookies.pop('CAUTH')
session.cookies.set('CAUTH', v)
except requests.exceptions.HTTPError as e:
raise AuthenticationFailed('Cannot login on coursera.org: %s' % e)
logging.info('Logged in on coursera.org.')
def down_the_wabbit_hole(session, class_name):
"""
Authenticate on class.coursera.org
"""
auth_redirector_url = AUTH_REDIRECT_URL.format(class_name=class_name)
r = session.get(auth_redirector_url)
logging.debug('Following %s to authenticate on class.coursera.org.',
auth_redirector_url)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as e:
raise AuthenticationFailed(
'Cannot login on class.coursera.org: %s' % e)
logging.debug('Exiting "deep" authentication.')
def get_authentication_cookies(session, class_name, username, password):
"""
Get the necessary cookies to authenticate on class.coursera.org.
To access the class pages we need two cookies on class.coursera.org:
csrf_token, session
"""
# First, check if we already have the .coursera.org cookies.
if session.cookies.get('CAUTH', domain=".coursera.org"):
logging.debug('Already logged in on accounts.coursera.org.')
else:
login(session, username, password, class_name=class_name)
try:
session.cookies.clear('class.coursera.org', '/' + class_name)
except KeyError:
pass
down_the_wabbit_hole(session, class_name)
enough = do_we_have_enough_cookies(session.cookies, class_name)
if not enough:
raise AuthenticationFailed('Did not find necessary cookies.')
logging.info('Found authentication cookies.')
def do_we_have_enough_cookies(cj, class_name):
"""
Check whether we have all the required cookies
to authenticate on class.coursera.org.
"""
domain = 'class.coursera.org'
path = "/" + class_name
return cj.get('csrf_token', domain=domain, path=path) is not None
def validate_cookies(session, class_name):
"""
Checks whether we have all the required cookies
to authenticate on class.coursera.org. Also check for and remove
stale session.
"""
if not do_we_have_enough_cookies(session.cookies, class_name):
return False
url = CLASS_URL.format(class_name=class_name) + '/class'
r = session.head(url, allow_redirects=False)
if r.status_code == 200:
return True
else:
logging.debug('Stale session.')
try:
session.cookies.clear('.coursera.org')
except KeyError:
pass
return False
def make_cookie_values(cj, class_name):
"""
Makes a string of cookie keys and values.
Can be used to set a Cookie header.
"""
path = "/" + class_name
cookies = [c.name + '=' + c.value
for c in cj
if c.domain == "class.coursera.org"
and c.path == path]
return '; '.join(cookies)
def find_cookies_for_class(cookies_file, class_name):
"""
Return a RequestsCookieJar containing the cookies for
.coursera.org and class.coursera.org found in the given cookies_file.
"""
path = "/" + class_name
def cookies_filter(c):
return c.domain == ".coursera.org" \
or (c.domain == "class.coursera.org" and c.path == path)
cj = get_cookie_jar(cookies_file)
new_cj = requests.cookies.RequestsCookieJar()
for c in filter(cookies_filter, cj):
new_cj.set_cookie(c)
return new_cj
def load_cookies_file(cookies_file):
"""
Load cookies file.
We pre-pend the file with the special Netscape header because the cookie
loader is very particular about this string.
"""
logging.debug('Loading cookie file %s into memory.', cookies_file)
cookies = StringIO()
cookies.write('# Netscape HTTP Cookie File')
cookies.write(open(cookies_file, 'rU').read())
cookies.flush()
cookies.seek(0)
return cookies
def get_cookie_jar(cookies_file):
cj = cookielib.MozillaCookieJar()
cookies = load_cookies_file(cookies_file)
# nasty hack: cj.load() requires a filename not a file, but if I use
# stringio, that file doesn't exist. I used NamedTemporaryFile before,
# but encountered problems on Windows.
cj._really_load(cookies, 'StringIO.cookies', False, False)
return cj
def get_cookies_cache_path(username):
return os.path.join(PATH_COOKIES, username + '.txt')
def get_cookies_from_cache(username):
"""
Returns a RequestsCookieJar containing the cached cookies for the given
user.
"""
logging.debug('Trying to get cookies from the cache.')
path = get_cookies_cache_path(username)
cj = requests.cookies.RequestsCookieJar()
try:
cached_cj = get_cookie_jar(path)
for cookie in cached_cj:
cj.set_cookie(cookie)
logging.debug(
'Loaded cookies from %s', get_cookies_cache_path(username))
except IOError:
logging.debug('Could not load cookies from the cache.')
return cj
def write_cookies_to_cache(cj, username):
"""
Save RequestsCookieJar to disk in Mozilla's cookies.txt file format.
This prevents us from repeated authentications on the
accounts.coursera.org and class.coursera.org/class_name sites.
"""
mkdir_p(PATH_COOKIES, 0o700)
path = get_cookies_cache_path(username)
cached_cj = cookielib.MozillaCookieJar()
for cookie in cj:
cached_cj.set_cookie(cookie)
cached_cj.save(path)
def get_cookies_for_class(session, class_name,
cookies_file=None,
username=None,
password=None):
"""
Get the cookies for the given class.
We do not validate the cookies if they are loaded from a cookies file
because this is intended for debugging purposes or if the coursera
authentication process has changed.
"""
if cookies_file:
cookies = find_cookies_for_class(cookies_file, class_name)
session.cookies.update(cookies)
logging.info('Loaded cookies from %s', cookies_file)
else:
cookies = get_cookies_from_cache(username)
session.cookies.update(cookies)
if validate_cookies(session, class_name):
logging.info('Already authenticated.')
else:
get_authentication_cookies(session, class_name, username, password)
write_cookies_to_cache(session.cookies, username)
class TLSAdapter(HTTPAdapter):
"""
A customized HTTP Adapter which uses TLS v1.2 for encrypted
connections.
"""
def init_poolmanager(self, connections, maxsize, block=False):
self.poolmanager = PoolManager(num_pools=connections,
maxsize=maxsize,
block=block,
ssl_version=ssl.PROTOCOL_TLSv1_2)
| 11,228
|
Python
|
.py
| 292
| 30.684932
| 79
| 0.64899
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,519
|
workflow.py
|
coursera-dl_coursera-dl/coursera/workflow.py
|
import os
import re
import abc
import time
import codecs
import logging
import subprocess
import requests
from .formatting import format_section, get_lecture_filename
from .playlist import create_m3u_playlist
from .utils import is_course_complete, mkdir_p, normalize_path
from .filtering import find_resources_to_get, skip_format_url
from .define import IN_MEMORY_MARKER
def _iter_modules(modules, class_name, path, ignored_formats, args):
"""
This huge function generates a hierarchy with hopefully more
clear structure of modules/sections/lectures.
"""
file_formats = args.file_formats
lecture_filter = args.lecture_filter
resource_filter = args.resource_filter
section_filter = args.section_filter
verbose_dirs = args.verbose_dirs
combined_section_lectures_nums = args.combined_section_lectures_nums
class IterModule(object):
def __init__(self, index, module):
self.index = index
self.name = '%02d_%s' % (index + 1, module[0])
self._module = module
@property
def sections(self):
sections = self._module[1]
for (secnum, (section, lectures)) in enumerate(sections):
if section_filter and not re.search(section_filter, section):
logging.debug('Skipping b/c of sf: %s %s',
section_filter, section)
continue
yield IterSection(self, secnum, section, lectures)
class IterSection(object):
def __init__(self, module_iter, secnum, section, lectures):
self.index = secnum
self.name = '%02d_%s' % (secnum, section)
self.dir = os.path.join(
path, class_name, module_iter.name,
format_section(secnum + 1, section,
class_name, verbose_dirs))
self._lectures = lectures
@property
def lectures(self):
for (lecnum, (lecname, lecture)) in enumerate(self._lectures):
if lecture_filter and not re.search(lecture_filter, lecname):
logging.debug('Skipping b/c of lf: %s %s',
lecture_filter, lecname)
continue
yield IterLecture(self, lecnum, lecname, lecture)
class IterLecture(object):
def __init__(self, section_iter, lecnum, lecname, lecture):
self.index = lecnum
self.name = lecname
self._lecture = lecture
self._section_iter = section_iter
def filename(self, fmt, title):
lecture_filename = get_lecture_filename(
combined_section_lectures_nums,
self._section_iter.dir, self._section_iter.index,
self.index, self.name, title, fmt)
return lecture_filename
@property
def resources(self):
resources_to_get = find_resources_to_get(
self._lecture, file_formats, resource_filter,
ignored_formats)
for fmt, url, title in resources_to_get:
yield IterResource(fmt, url, title)
class IterResource(object):
def __init__(self, fmt, url, title):
self.fmt = fmt
self.url = url
self.title = title
for index, module in enumerate(modules):
yield IterModule(index, module)
def _walk_modules(modules, class_name, path, ignored_formats, args):
"""
Helper generator that traverses modules in returns a flattened
iterator.
"""
for module in _iter_modules(modules=modules,
class_name=class_name,
path=path,
ignored_formats=ignored_formats,
args=args):
for section in module.sections:
for lecture in section.lectures:
for resource in lecture.resources:
yield module, section, lecture, resource
class CourseDownloader(object):
__metaclass__ = abc.ABCMeta
def __init__(self):
pass
@abc.abstractmethod
def download_modules(self, modules):
pass
class CourseraDownloader(CourseDownloader):
def __init__(self,
downloader,
commandline_args,
class_name,
path='',
ignored_formats=None,
disable_url_skipping=False):
super(CourseraDownloader, self).__init__()
self._downloader = downloader
self._args = commandline_args
self._class_name = class_name
self._path = path
self._ignored_formats = ignored_formats
self._disable_url_skipping = disable_url_skipping
self.skipped_urls = None if disable_url_skipping else []
self.failed_urls = []
def download_modules(self, modules):
completed = True
modules = _iter_modules(
modules, self._class_name, self._path,
self._ignored_formats, self._args)
for module in modules:
last_update = -1
for section in module.sections:
if not os.path.exists(section.dir):
mkdir_p(normalize_path(section.dir))
for lecture in section.lectures:
for resource in lecture.resources:
lecture_filename = normalize_path(
lecture.filename(resource.fmt, resource.title))
last_update = self._handle_resource(
resource.url, resource.fmt, lecture_filename,
self._download_completion_handler, last_update)
# After fetching resources, create a playlist in M3U format with the
# videos downloaded.
if self._args.playlist:
create_m3u_playlist(section.dir)
if self._args.hooks:
self._run_hooks(section, self._args.hooks)
# if we haven't updated any files in 1 month, we're probably
# done with this course
completed = completed and is_course_complete(last_update)
if completed:
logging.info('COURSE PROBABLY COMPLETE: ' + self._class_name)
# Wait for all downloads to complete
self._downloader.join()
return completed
def _download_completion_handler(self, url, result):
if isinstance(result, requests.exceptions.RequestException):
logging.error('The following error has occurred while '
'downloading URL %s: %s', url, str(result))
self.failed_urls.append(url)
elif isinstance(result, Exception):
logging.error('Unknown exception occurred: %s', result)
self.failed_urls.append(url)
def _handle_resource(self, url, fmt, lecture_filename, callback, last_update):
"""
Handle resource. This function builds up resource file name and
downloads it if necessary.
@param url: URL of the resource.
@type url: str
@param fmt: Format of the resource (pdf, csv, etc)
@type fmt: str
@param lecture_filename: File name of the lecture.
@type lecture_filename: str
@param callback: Callback that will be called when file has been
downloaded. It will be called even if exception occurred.
@type callback: callable(url, result) where result may be Exception
@param last_update: Timestamp of the newest file so far.
@type last_update: int
@return: Updated latest mtime.
@rtype: int
"""
overwrite = self._args.overwrite
resume = self._args.resume
skip_download = self._args.skip_download
# Decide whether we need to download it
if overwrite or not os.path.exists(lecture_filename) or resume:
if not skip_download:
if url.startswith(IN_MEMORY_MARKER):
page_content = url[len(IN_MEMORY_MARKER):]
logging.info('Saving page contents to: %s', lecture_filename)
with codecs.open(lecture_filename, 'w', 'utf-8') as file_object:
file_object.write(page_content)
else:
if self.skipped_urls is not None and skip_format_url(fmt, url):
self.skipped_urls.append(url)
else:
logging.info('Downloading: %s', lecture_filename)
self._downloader.download(callback, url, lecture_filename, resume=resume)
else:
open(lecture_filename, 'w').close() # touch
last_update = time.time()
else:
logging.info('%s already downloaded', lecture_filename)
# if this file hasn't been modified in a long time,
# record that time
last_update = max(last_update,
os.path.getmtime(lecture_filename))
return last_update
def _run_hooks(self, section, hooks):
original_dir = os.getcwd()
for hook in hooks:
logging.info('Running hook %s for section %s.',
hook, section.dir)
os.chdir(section.dir)
subprocess.call(hook)
os.chdir(original_dir)
| 9,473
|
Python
|
.py
| 211
| 32.322275
| 97
| 0.584617
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,520
|
credentials.py
|
coursera-dl_coursera-dl/coursera/credentials.py
|
# -*- coding: utf-8 -*-
"""
Manages the credential information (netrc, passwords, etc).
"""
import getpass
import logging
import netrc
import os
import platform
try:
import keyring
except ImportError:
keyring = None
KEYRING_SERVICE_NAME = 'coursera-dl'
class CredentialsError(BaseException):
"""
Class to be thrown if the credentials are not found.
"""
pass
def _getenv_or_empty(s):
"""
Helper function that converts None gotten from the environment to the
empty string.
"""
return os.getenv(s) or ""
def get_config_paths(config_name): # pragma: no test
"""
Return a list of config files paths to try in order, given config file
name and possibly a user-specified path.
For Windows platforms, there are several paths that can be tried to
retrieve the netrc file. There is, however, no "standard way" of doing
things.
A brief recap of the situation (all file paths are written in Unix
convention):
1. By default, Windows does not define a $HOME path. However, some
people might define one manually, and many command-line tools imported
from Unix will search the $HOME environment variable first. This
includes MSYSGit tools (bash, ssh, ...) and Emacs.
2. Windows defines two 'user paths': $USERPROFILE, and the
concatenation of the two variables $HOMEDRIVE and $HOMEPATH. Both of
these paths point by default to the same location, e.g.
C:\\Users\\Username
3. $USERPROFILE cannot be changed, however $HOMEDRIVE and $HOMEPATH
can be changed. They are originally intended to be the equivalent of
the $HOME path, but there are many known issues with them
4. As for the name of the file itself, most of the tools ported from
Unix will use the standard '.dotfile' scheme, but some of these will
instead use "_dotfile". Of the latter, the two notable exceptions are
vim, which will first try '_vimrc' before '.vimrc' (but it will try
both) and git, which will require the user to name its netrc file
'_netrc'.
Relevant links :
http://markmail.org/message/i33ldu4xl5aterrr
http://markmail.org/message/wbzs4gmtvkbewgxi
http://stackoverflow.com/questions/6031214/
Because the whole thing is a mess, I suggest we tried various sensible
defaults until we succeed or have depleted all possibilities.
"""
if platform.system() != 'Windows':
return [None]
# Now, we only treat the case of Windows
env_vars = [["HOME"],
["HOMEDRIVE", "HOMEPATH"],
["USERPROFILE"],
["SYSTEMDRIVE"]]
env_dirs = []
for var_list in env_vars:
var_values = [_getenv_or_empty(var) for var in var_list]
directory = ''.join(var_values)
if not directory:
logging.debug('Environment var(s) %s not defined, skipping',
var_list)
else:
env_dirs.append(directory)
additional_dirs = ["C:", ""]
all_dirs = env_dirs + additional_dirs
leading_chars = [".", "_"]
res = [''.join([directory, os.sep, lc, config_name])
for directory in all_dirs
for lc in leading_chars]
return res
def authenticate_through_netrc(path=None):
"""
Return the tuple user / password given a path for the .netrc file.
Raises CredentialsError if no valid netrc file is found.
"""
errors = []
netrc_machine = 'coursera-dl'
paths = [path] if path else get_config_paths("netrc")
for path in paths:
try:
logging.debug('Trying netrc file %s', path)
auths = netrc.netrc(path).authenticators(netrc_machine)
except (IOError, netrc.NetrcParseError) as e:
errors.append(e)
else:
if auths is None:
errors.append('Didn\'t find any credentials for ' +
netrc_machine)
else:
return auths[0], auths[2]
error_messages = '\n'.join(str(e) for e in errors)
raise CredentialsError(
'Did not find valid netrc file:\n' + error_messages +
'\nPlease run this command: chmod og-rw ~/.netrc')
def get_credentials(username=None, password=None, netrc=None, use_keyring=False):
"""
Return valid username, password tuple.
Raises CredentialsError if username or password is missing.
"""
if netrc:
path = None if netrc is True else netrc
return authenticate_through_netrc(path)
if not username:
raise CredentialsError(
'Please provide a username with the -u option, '
'or a .netrc file with the -n option.')
if not password and use_keyring:
password = keyring.get_password(KEYRING_SERVICE_NAME, username)
if not password:
password = getpass.getpass('Coursera password for {0}: '.format(username))
if use_keyring:
keyring.set_password(KEYRING_SERVICE_NAME, username, password)
return username, password
| 5,022
|
Python
|
.py
| 124
| 33.580645
| 82
| 0.663442
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,521
|
downloaders.py
|
coursera-dl_coursera-dl/coursera/downloaders.py
|
# -*- coding: utf-8 -*-
"""
Module for download-related classes and functions.
We currently support an internal downloader written in Python with just the
essential functionality and four "industrial-strength" external downloaders,
namely, aria2c, axel, curl, and wget.
"""
from __future__ import print_function
import logging
import math
import os
import subprocess
import sys
import time
import requests
from six import iteritems
#
# Below are file downloaders, they are wrappers for external downloaders.
#
class Downloader(object):
"""
Base downloader class.
Every subclass should implement the _start_download method.
Usage::
>>> import downloaders
>>> d = downloaders.SubclassFromDownloader()
>>> d.download('http://example.com', 'save/to/this/file')
"""
def _start_download(self, url, filename, resume):
"""
Actual method to download the given url to the given file.
This method should be implemented by the subclass.
"""
raise NotImplementedError("Subclasses should implement this")
def download(self, url, filename, resume=False):
"""
Download the given url to the given file. When the download
is aborted by the user, the partially downloaded file is also removed.
"""
try:
self._start_download(url, filename, resume)
except KeyboardInterrupt as e:
# keep the file if resume is True
if not resume:
logging.info('Keyboard Interrupt -- Removing partial file: %s',
filename)
try:
os.remove(filename)
except OSError:
pass
raise e
class ExternalDownloader(Downloader):
"""
Downloads files with an external downloader.
We could possibly use python to stream files to disk,
but this is slow compared to these external downloaders.
:param session: Requests session.
:param bin: External downloader binary.
"""
# External downloader binary
bin = None
def __init__(self, session, bin=None, downloader_arguments=None):
self.session = session
self.bin = bin or self.__class__.bin
self.downloader_arguments = downloader_arguments or []
if not self.bin:
raise RuntimeError("No bin specified")
def _prepare_cookies(self, command, url):
"""
Extract cookies from the requests session and add them to the command
"""
req = requests.models.Request()
req.method = 'GET'
req.url = url
cookie_values = requests.cookies.get_cookie_header(
self.session.cookies, req)
if cookie_values:
self._add_cookies(command, cookie_values)
def _enable_resume(self, command):
"""
Enable resume feature
"""
raise RuntimeError("Subclass should implement this")
def _add_cookies(self, command, cookie_values):
"""
Add the given cookie values to the command
"""
raise RuntimeError("Subclasses should implement this")
def _create_command(self, url, filename):
"""
Create command to execute in a subprocess.
"""
raise NotImplementedError("Subclasses should implement this")
def _start_download(self, url, filename, resume):
command = self._create_command(url, filename)
command.extend(self.downloader_arguments)
self._prepare_cookies(command, url)
if resume:
self._enable_resume(command)
logging.debug('Executing %s: %s', self.bin, command)
try:
subprocess.call(command)
except OSError as e:
msg = "{0}. Are you sure that '{1}' is the right bin?".format(
e, self.bin)
raise OSError(msg)
class WgetDownloader(ExternalDownloader):
"""
Uses wget, which is robust and gives nice visual feedback.
"""
bin = 'wget'
def _enable_resume(self, command):
command.append('-c')
def _add_cookies(self, command, cookie_values):
command.extend(['--header', "Cookie: " + cookie_values])
def _create_command(self, url, filename):
return [self.bin, url, '-O', filename, '--no-cookies',
'--no-check-certificate']
class CurlDownloader(ExternalDownloader):
"""
Uses curl, which is robust and gives nice visual feedback.
"""
bin = 'curl'
def _enable_resume(self, command):
command.extend(['-C', '-'])
def _add_cookies(self, command, cookie_values):
command.extend(['--cookie', cookie_values])
def _create_command(self, url, filename):
return [self.bin, url, '-k', '-#', '-L', '-o', filename]
class Aria2Downloader(ExternalDownloader):
"""
Uses aria2. Unfortunately, it does not give a nice visual feedback, but
gets the job done much faster than the alternatives.
"""
bin = 'aria2c'
def _enable_resume(self, command):
command.append('-c')
def _add_cookies(self, command, cookie_values):
command.extend(['--header', "Cookie: " + cookie_values])
def _create_command(self, url, filename):
return [self.bin, url, '-o', filename,
'--check-certificate=false', '--log-level=notice',
'--max-connection-per-server=4', '--min-split-size=1M']
class AxelDownloader(ExternalDownloader):
"""
Uses axel, which is robust and it both gives nice
visual feedback and get the job done fast.
"""
bin = 'axel'
def _enable_resume(self, command):
logging.warn('Resume download not implemented for this '
'downloader!')
def _add_cookies(self, command, cookie_values):
command.extend(['-H', "Cookie: " + cookie_values])
def _create_command(self, url, filename):
return [self.bin, '-o', filename, '-n', '4', '-a', url]
def format_bytes(bytes):
"""
Get human readable version of given bytes.
Ripped from https://github.com/rg3/youtube-dl
"""
if bytes is None:
return 'N/A'
if type(bytes) is str:
bytes = float(bytes)
if bytes == 0.0:
exponent = 0
else:
exponent = int(math.log(bytes, 1024.0))
suffix = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'][exponent]
converted = float(bytes) / float(1024 ** exponent)
return '{0:.2f}{1}'.format(converted, suffix)
class DownloadProgress(object):
"""
Report download progress.
Inspired by https://github.com/rg3/youtube-dl
"""
def __init__(self, total):
if total in [0, '0', None]:
self._total = None
else:
self._total = int(total)
self._current = 0
self._start = 0
self._now = 0
self._finished = False
def start(self):
self._now = time.time()
self._start = self._now
def stop(self):
self._now = time.time()
self._finished = True
self._total = self._current
self.report_progress()
def read(self, bytes):
self._now = time.time()
self._current += bytes
self.report_progress()
def report(self, bytes):
self._now = time.time()
self._current = bytes
self.report_progress()
def calc_percent(self):
if self._total is None:
return '--%'
if self._total == 0:
return '100% done'
percentage = int(float(self._current) / float(self._total) * 100.0)
done = int(percentage / 2)
return '[{0: <50}] {1}%'.format(done * '#', percentage)
def calc_speed(self):
dif = self._now - self._start
if self._current == 0 or dif < 0.001: # One millisecond
return '---b/s'
return '{0}/s'.format(format_bytes(float(self._current) / dif))
def report_progress(self):
"""Report download progress."""
percent = self.calc_percent()
total = format_bytes(self._total)
speed = self.calc_speed()
total_speed_report = '{0} at {1}'.format(total, speed)
report = '\r{0: <56} {1: >30}'.format(percent, total_speed_report)
if self._finished:
print(report)
else:
print(report, end="")
sys.stdout.flush()
class NativeDownloader(Downloader):
"""
'Native' python downloader -- slower than the external downloaders.
:param session: Requests session.
"""
def __init__(self, session):
self.session = session
def _start_download(self, url, filename, resume=False):
# resume has no meaning if the file doesn't exists!
resume = resume and os.path.exists(filename)
headers = {}
filesize = None
if resume:
filesize = os.path.getsize(filename)
headers['Range'] = 'bytes={}-'.format(filesize)
logging.info('Resume downloading %s -> %s', url, filename)
else:
logging.info('Downloading %s -> %s', url, filename)
max_attempts = 3
attempts_count = 0
error_msg = ''
while attempts_count < max_attempts:
r = self.session.get(url, stream=True, headers=headers)
if r.status_code != 200:
# because in resume state we are downloading only a
# portion of requested file, server may return
# following HTTP codes:
# 206: Partial Content
# 416: Requested Range Not Satisfiable
# which are OK for us.
if resume and r.status_code == 206:
pass
elif resume and r.status_code == 416:
logging.info('%s already downloaded', filename)
r.close()
return True
else:
print('%s %s %s' % (r.status_code, url, filesize))
logging.warn('Probably the file is missing from the AWS '
'repository... waiting.')
if r.reason:
error_msg = r.reason + ' ' + str(r.status_code)
else:
error_msg = 'HTTP Error ' + str(r.status_code)
wait_interval = 2 ** (attempts_count + 1)
msg = 'Error downloading, will retry in {0} seconds ...'
print(msg.format(wait_interval))
time.sleep(wait_interval)
attempts_count += 1
continue
if resume and r.status_code == 200:
# if the server returns HTTP code 200 while we are in
# resume mode, it means that the server does not support
# partial downloads.
resume = False
content_length = r.headers.get('content-length')
chunk_sz = 1048576
progress = DownloadProgress(content_length)
progress.start()
f = open(filename, 'ab') if resume else open(filename, 'wb')
while True:
data = r.raw.read(chunk_sz, decode_content=True)
if not data:
progress.stop()
break
progress.report(r.raw.tell())
f.write(data)
f.close()
r.close()
return True
if attempts_count == max_attempts:
logging.warn('Skipping, can\'t download file ...')
logging.error(error_msg)
return False
def get_downloader(session, class_name, args):
"""
Decides which downloader to use.
"""
external = {
'wget': WgetDownloader,
'curl': CurlDownloader,
'aria2': Aria2Downloader,
'axel': AxelDownloader,
}
for bin, class_ in iteritems(external):
if getattr(args, bin):
return class_(session, bin=getattr(args, bin),
downloader_arguments=args.downloader_arguments)
return NativeDownloader(session)
| 12,143
|
Python
|
.py
| 316
| 28.892405
| 79
| 0.583369
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,522
|
network.py
|
coursera-dl_coursera-dl/coursera/network.py
|
"""
This module contains utility functions that operate on the network, download
some data and so on.
"""
import json
import logging
import requests
def get_reply(session, url, post=False, data=None, headers=None, quiet=False):
"""
Download an HTML page using the requests session. Low-level function
that allows for flexible request configuration.
@param session: Requests session.
@type session: requests.Session
@param url: URL pattern with optional keywords to format.
@type url: str
@param post: Flag that indicates whether POST request should be sent.
@type post: bool
@param data: Payload data that is sent with request (in request body).
@type data: object
@param headers: Additional headers to send with request.
@type headers: dict
@param quiet: Flag that tells whether to print error message when status
code != 200.
@type quiet: bool
@return: Requests response.
@rtype: requests.Response
"""
request_headers = {} if headers is None else headers
request = requests.Request('POST' if post else 'GET',
url,
data=data,
headers=request_headers)
prepared_request = session.prepare_request(request)
reply = session.send(prepared_request)
try:
reply.raise_for_status()
except requests.exceptions.HTTPError as e:
if not quiet:
logging.error("Error %s getting page %s", e, url)
logging.error("The server replied: %s", reply.text)
raise
return reply
def get_page(session,
url,
json=False,
post=False,
data=None,
headers=None,
quiet=False,
**kwargs):
"""
Download an HTML page using the requests session.
@param session: Requests session.
@type session: requests.Session
@param url: URL pattern with optional keywords to format.
@type url: str
@param post: Flag that indicates whether POST request should be sent.
@type post: bool
@param data: Payload data that is sent with request (in request body).
@type data: object
@param headers: Additional headers to send with request.
@type headers: dict
@return: Response body.
@rtype: str
"""
url = url.format(**kwargs)
reply = get_reply(session, url, post=post, data=data, headers=headers,
quiet=quiet)
return reply.json() if json else reply.text
def get_page_and_url(session, url):
"""
Download an HTML page using the requests session and return
the final URL after following redirects.
"""
reply = get_reply(session, url)
return reply.text, reply.url
def post_page_and_reply(session, url, data=None, headers=None, **kwargs):
url = url.format(**kwargs)
reply = get_reply(session, url, post=True, data=data, headers=headers)
return reply.text, reply
| 2,994
|
Python
|
.py
| 80
| 30.2
| 78
| 0.660083
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,523
|
extractors.py
|
coursera-dl_coursera-dl/coursera/extractors.py
|
"""
This module contains implementation for extractors. Extractors know how
to parse site of MOOC platform and return a list of modules to download.
Usually they do not download heavy content, except when necessary
to parse course syllabus.
"""
import abc
import json
import logging
from .api import (CourseraOnDemand, OnDemandCourseMaterialItemsV1,
ModulesV1, LessonsV1, ItemsV2)
from .define import OPENCOURSE_ONDEMAND_COURSE_MATERIALS_V2
from .network import get_page
from .utils import is_debug_run, spit_json
class PlatformExtractor(object):
__metaclass__ = abc.ABCMeta
def get_modules(self):
"""
Get course modules.
"""
pass
class CourseraExtractor(PlatformExtractor):
def __init__(self, session):
self._notebook_downloaded = False
self._session = session
def list_courses(self):
"""
List enrolled courses.
@return: List of enrolled courses.
@rtype: [str]
"""
course = CourseraOnDemand(session=self._session,
course_id=None,
course_name=None)
return course.list_courses()
def get_modules(self, class_name,
reverse=False, unrestricted_filenames=False,
subtitle_language='en', video_resolution=None,
download_quizzes=False, mathjax_cdn_url=None,
download_notebooks=False):
page = self._get_on_demand_syllabus(class_name)
error_occurred, modules = self._parse_on_demand_syllabus(
class_name,
page, reverse, unrestricted_filenames,
subtitle_language, video_resolution,
download_quizzes, mathjax_cdn_url, download_notebooks)
return error_occurred, modules
def _get_on_demand_syllabus(self, class_name):
"""
Get the on-demand course listing webpage.
"""
url = OPENCOURSE_ONDEMAND_COURSE_MATERIALS_V2.format(
class_name=class_name)
page = get_page(self._session, url)
logging.debug('Downloaded %s (%d bytes)', url, len(page))
return page
def _parse_on_demand_syllabus(self, course_name, page, reverse=False,
unrestricted_filenames=False,
subtitle_language='en',
video_resolution=None,
download_quizzes=False,
mathjax_cdn_url=None,
download_notebooks=False
):
"""
Parse a Coursera on-demand course listing/syllabus page.
@return: Tuple of (bool, list), where bool indicates whether
there was at least on error while parsing syllabus, the list
is a list of parsed modules.
@rtype: (bool, list)
"""
dom = json.loads(page)
class_id = dom['elements'][0]['id']
logging.info('Parsing syllabus of on-demand course (id=%s). '
'This may take some time, please be patient ...',
class_id)
modules = []
json_modules = dom['linked']['onDemandCourseMaterialItems.v2']
course = CourseraOnDemand(
session=self._session, course_id=class_id,
course_name=course_name,
unrestricted_filenames=unrestricted_filenames,
mathjax_cdn_url=mathjax_cdn_url)
course.obtain_user_id()
ondemand_material_items = OnDemandCourseMaterialItemsV1.create(
session=self._session, course_name=course_name)
if is_debug_run():
spit_json(dom, '%s-syllabus-raw.json' % course_name)
spit_json(json_modules, '%s-material-items-v2.json' % course_name)
spit_json(ondemand_material_items._items,
'%s-course-material-items.json' % course_name)
error_occurred = False
all_modules = ModulesV1.from_json(
dom['linked']['onDemandCourseMaterialModules.v1'])
all_lessons = LessonsV1.from_json(
dom['linked']['onDemandCourseMaterialLessons.v1'])
all_items = ItemsV2.from_json(
dom['linked']['onDemandCourseMaterialItems.v2'])
for module in all_modules:
logging.info('Processing module %s', module.slug)
lessons = []
for section in module.children(all_lessons):
logging.info('Processing section %s', section.slug)
lectures = []
available_lectures = section.children(all_items)
# Certain modules may be empty-looking programming assignments
# e.g. in data-structures, algorithms-on-graphs ondemand
# courses
if not available_lectures:
lecture = ondemand_material_items.get(section.id)
if lecture is not None:
available_lectures = [lecture]
for lecture in available_lectures:
typename = lecture.type_name
logging.info('Processing lecture %s (%s)',
lecture.slug, typename)
# Empty dictionary means there were no data
# None means an error occurred
links = {}
if typename == 'lecture':
# lecture_video_id = lecture['content']['definition']['videoId']
# assets = lecture['content']['definition'].get(
# 'assets', [])
lecture_video_id = lecture.id
# assets = []
links = course.extract_links_from_lecture(
class_id,
lecture_video_id, subtitle_language,
video_resolution)
elif typename == 'supplement':
links = course.extract_links_from_supplement(
lecture.id)
elif typename == 'phasedPeer':
links = course.extract_links_from_peer_assignment(
lecture.id)
elif typename in ('gradedProgramming', 'ungradedProgramming'):
links = course.extract_links_from_programming(
lecture.id)
elif typename == 'quiz':
if download_quizzes:
links = course.extract_links_from_quiz(
lecture.id)
elif typename == 'exam':
if download_quizzes:
links = course.extract_links_from_exam(
lecture.id)
elif typename == 'programming':
if download_quizzes:
links = course.extract_links_from_programming_immediate_instructions(
lecture.id)
elif typename == 'notebook':
if download_notebooks and not self._notebook_downloaded:
logging.warning(
'According to notebooks platform, content will be downloaded first')
links = course.extract_links_from_notebook(
lecture.id)
self._notebook_downloaded = True
else:
logging.info(
'Unsupported typename "%s" in lecture "%s" (lecture id "%s")',
typename, lecture.slug, lecture.id)
continue
if links is None:
error_occurred = True
elif links:
lectures.append((lecture.slug, links))
if lectures:
lessons.append((section.slug, lectures))
if lessons:
modules.append((module.slug, lessons))
if modules and reverse:
modules.reverse()
# Processing resources section
json_references = course.extract_references_poll()
references = []
if json_references:
logging.info('Processing resources')
for json_reference in json_references:
reference = []
reference_slug = json_reference['slug']
logging.info('Processing resource %s',
reference_slug)
links = course.extract_links_from_reference(
json_reference['shortId'])
if links is None:
error_occurred = True
elif links:
reference.append(('', links))
if reference:
references.append((reference_slug, reference))
if references:
modules.append(("Resources", references))
return error_occurred, modules
| 9,215
|
Python
|
.py
| 193
| 31.07772
| 100
| 0.528409
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,524
|
formatting.py
|
coursera-dl_coursera-dl/coursera/formatting.py
|
import os
from .define import FORMAT_MAX_LENGTH, TITLE_MAX_LENGTH
def format_section(num, section, class_name, verbose_dirs):
sec = '%02d_%s' % (num, section)
if verbose_dirs:
sec = class_name.upper() + '_' + sec
return sec
def format_resource(num, name, title, fmt):
if title:
title = '_' + title
return '%02d_%s%s.%s' % (num, name, title, fmt)
def format_combine_number_resource(secnum, lecnum, lecname, title, fmt):
if title:
title = '_' + title
return '%02d_%02d_%s%s.%s' % (secnum, lecnum, lecname, title, fmt)
def get_lecture_filename(combined_section_lectures_nums,
section_dir,
secnum,
lecnum,
lecname,
title,
fmt):
"""
Prepare a destination lecture filename.
@param combined_section_lectures_nums: Flag that indicates whether
section lectures should have combined numbering.
@type combined_section_lectures_nums: bool
@param section_dir: Path to current section directory.
@type section_dir: str
@param secnum: Section number.
@type secnum: int
@param lecnum: Lecture number.
@type lecnum: int
@param lecname: Lecture name.
@type lecname: str
@param title: Resource title.
@type title: str
@param fmt: Format of the resource (pdf, csv, etc)
@type fmt: str
@return: Lecture file name.
@rtype: str
"""
# FIXME: this is a quick and dirty solution to Filename too long
# problem. We need to think of a more general way to solve this
# issue.
fmt = fmt[:FORMAT_MAX_LENGTH]
title = title[:TITLE_MAX_LENGTH]
# Format lecture file name
if combined_section_lectures_nums:
lecture_filename = os.path.join(
section_dir,
format_combine_number_resource(
secnum + 1, lecnum + 1, lecname, title, fmt))
else:
lecture_filename = os.path.join(
section_dir, format_resource(lecnum + 1, lecname, title, fmt))
return lecture_filename
| 2,131
|
Python
|
.py
| 57
| 29.210526
| 74
| 0.617518
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,525
|
parallel.py
|
coursera-dl_coursera-dl/coursera/parallel.py
|
import abc
import logging
import traceback
from multiprocessing.dummy import Pool
class AbstractDownloader(object):
"""
Base class for download wrappers. Two methods should be implemented:
`download` and `join`.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, file_downloader):
super(AbstractDownloader, self).__init__()
self._file_downloader = file_downloader
@abc.abstractmethod
def download(self, *args, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def join(self):
raise NotImplementedError()
def _download_wrapper(self, url, *args, **kwargs):
"""
Actual download call. Calls the underlying file downloader,
catches all exceptions and returns the result.
"""
try:
return url, self._file_downloader.download(url, *args, **kwargs)
except Exception as e:
logging.error("AbstractDownloader: %s", traceback.format_exc())
return url, e
class ConsecutiveDownloader(AbstractDownloader):
"""
This class calls underlying file downloader in a sequential order
in the same thread where it was created.
"""
def download(self, callback, url, *args, **kwargs):
_, result = self._download_wrapper(url, *args, **kwargs)
callback(url, result)
return result
def join(self):
pass
class ParallelDownloader(AbstractDownloader):
"""
This class uses threading.Pool to run download requests in parallel.
"""
def __init__(self, file_downloader, processes=1):
super(ParallelDownloader, self).__init__(file_downloader)
self._pool = Pool(processes=processes)
def download(self, callback, url, *args, **kwargs):
callback_wrapper = lambda payload: callback(*payload)
return self._pool.apply_async(
self._download_wrapper, (url,) + args, kwargs,
callback=callback_wrapper)
def join(self):
self._pool.close()
self._pool.join()
| 2,039
|
Python
|
.py
| 55
| 30.163636
| 76
| 0.660071
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,526
|
api.py
|
coursera-dl_coursera-dl/coursera/api.py
|
# vim: set fileencoding=utf8 :
"""
This module contains implementations of different APIs that are used by the
downloader.
"""
import os
import re
import json
import base64
import logging
import time
import requests
import urllib
from collections import namedtuple, OrderedDict
from six import iterkeys, iteritems
from six.moves.urllib_parse import quote_plus
import attr
from .utils import (BeautifulSoup, make_coursera_absolute_url,
extend_supplement_links, clean_url, clean_filename,
is_debug_run, unescape_html)
from .network import get_reply, get_page, post_page_and_reply
from .define import (OPENCOURSE_SUPPLEMENT_URL,
OPENCOURSE_PROGRAMMING_ASSIGNMENTS_URL,
OPENCOURSE_ASSET_URL,
OPENCOURSE_ASSETS_URL,
OPENCOURSE_API_ASSETS_V1_URL,
OPENCOURSE_ONDEMAND_COURSE_MATERIALS,
OPENCOURSE_ONDEMAND_COURSE_MATERIALS_V2,
OPENCOURSE_ONDEMAND_COURSES_V1,
OPENCOURSE_ONDEMAND_LECTURE_VIDEOS_URL,
OPENCOURSE_ONDEMAND_LECTURE_ASSETS_URL,
OPENCOURSE_ONDEMAND_SPECIALIZATIONS_V1,
OPENCOURSE_MEMBERSHIPS,
OPENCOURSE_REFERENCES_POLL_URL,
OPENCOURSE_REFERENCE_ITEM_URL,
OPENCOURSE_PROGRAMMING_IMMEDIATE_INSTRUCTIOINS_URL,
OPENCOURSE_PEER_ASSIGNMENT_INSTRUCTIONS,
# New feature, Notebook (Python Jupyter)
OPENCOURSE_NOTEBOOK_DESCRIPTIONS,
OPENCOURSE_NOTEBOOK_LAUNCHES,
OPENCOURSE_NOTEBOOK_TREE,
OPENCOURSE_NOTEBOOK_DOWNLOAD,
POST_OPENCOURSE_API_QUIZ_SESSION,
POST_OPENCOURSE_API_QUIZ_SESSION_GET_STATE,
POST_OPENCOURSE_ONDEMAND_EXAM_SESSIONS,
POST_OPENCOURSE_ONDEMAND_EXAM_SESSIONS_GET_STATE,
INSTRUCTIONS_HTML_INJECTION_PRE,
INSTRUCTIONS_HTML_MATHJAX_URL,
INSTRUCTIONS_HTML_INJECTION_AFTER,
IN_MEMORY_EXTENSION,
IN_MEMORY_MARKER)
from .cookies import prepare_auth_headers
class QuizExamToMarkupConverter(object):
"""
Converts quiz/exam JSON into semi HTML (Coursera Markup) for local viewing.
The output needs to be further processed by MarkupToHTMLConverter.
"""
KNOWN_QUESTION_TYPES = ('mcq',
'mcqReflect',
'checkbox',
'singleNumeric',
'textExactMatch',
'mathExpression',
'regex',
'reflect')
# TODO: support live MathJAX preview rendering for mathExpression
# and regex question types
KNOWN_INPUT_TYPES = ('textExactMatch',
'singleNumeric',
'mathExpression',
'regex',
'reflect')
def __init__(self, session):
self._session = session
def __call__(self, quiz_or_exam_json):
result = []
for question_index, question_json in enumerate(quiz_or_exam_json['questions']):
question_type = question_json['question']['type']
if question_type not in self.KNOWN_QUESTION_TYPES:
logging.info('Unknown question type: %s', question_type)
logging.info('Question json: %s', question_json)
logging.info('Please report class name, quiz name and the data'
' above to coursera-dl authors')
prompt = question_json['variant']['definition']['prompt']
options = question_json['variant']['definition'].get('options', [])
# Question number
result.append('<h3>Question %d</h3>' % (question_index + 1))
# Question text
question_text = unescape_html(prompt['definition']['value'])
result.append(question_text)
# Input for answer
if question_type in self.KNOWN_INPUT_TYPES:
result.extend(self._generate_input_field())
# Convert input_type from JSON reply to HTML input type
input_type = {
'mcq': 'radio',
'mcqReflect': 'radio',
'checkbox': 'checkbox'
}.get(question_type, '')
# Convert options, they are either checkboxes or radio buttons
result.extend(self._convert_options(
question_index, options, input_type))
result.append('<hr>')
return '\n'.join(result)
def _convert_options(self, question_index, options, input_type):
if not options:
return []
result = ['<form>']
for option in options:
option_text = unescape_html(
option['display']['definition']['value'])
# We need to replace <text> with <span> so that answer text
# stays on the same line with checkbox/radio button
option_text = self._replace_tag(option_text, 'text', 'span')
result.append('<label><input type="%s" name="%s">'
'%s<br></label>' % (
input_type, question_index, option_text))
result.append('</form>')
return result
def _replace_tag(self, text, initial_tag, target_tag):
soup = BeautifulSoup(text)
while soup.find(initial_tag):
soup.find(initial_tag).name = target_tag
return soup.prettify()
def _generate_input_field(self):
return ['<form><label>Enter answer here:<input type="text" '
'name=""><br></label></form>']
class MarkupToHTMLConverter(object):
def __init__(self, session, mathjax_cdn_url=None):
self._session = session
self._asset_retriever = AssetRetriever(session)
if not mathjax_cdn_url:
mathjax_cdn_url = INSTRUCTIONS_HTML_MATHJAX_URL
self._mathjax_cdn_url = mathjax_cdn_url
def __call__(self, markup):
"""
Convert instructions markup to make it more suitable for
offline reading.
@param markup: HTML (kinda) markup to prettify.
@type markup: str
@return: Prettified HTML with several markup tags replaced with HTML
equivalents.
@rtype: str
"""
soup = BeautifulSoup(markup)
self._convert_markup_basic(soup)
self._convert_markup_images(soup)
self._convert_markup_audios(soup)
return soup.prettify()
def _convert_markup_basic(self, soup):
"""
Perform basic conversion of instructions markup. This includes
replacement of several textual markup tags with their HTML equivalents.
@param soup: BeautifulSoup instance.
@type soup: BeautifulSoup
"""
# Inject meta charset tag
meta = soup.new_tag('meta', charset='UTF-8')
soup.insert(0, meta)
# 1. Inject basic CSS style
css = "".join([
INSTRUCTIONS_HTML_INJECTION_PRE,
self._mathjax_cdn_url,
INSTRUCTIONS_HTML_INJECTION_AFTER])
css_soup = BeautifulSoup(css)
soup.append(css_soup)
# 2. Replace <text> with <p>
while soup.find('text'):
soup.find('text').name = 'p'
# 3. Replace <heading level="1"> with <h1>
while soup.find('heading'):
heading = soup.find('heading')
heading.name = 'h%s' % heading.attrs.get('level', '1')
# 4. Replace <code> with <pre>
while soup.find('code'):
soup.find('code').name = 'pre'
# 5. Replace <list> with <ol> or <ul>
while soup.find('list'):
list_ = soup.find('list')
type_ = list_.attrs.get('bullettype', 'numbers')
list_.name = 'ol' if type_ == 'numbers' else 'ul'
def _convert_markup_images(self, soup):
"""
Convert images of instructions markup. Images are downloaded,
base64-encoded and inserted into <img> tags.
@param soup: BeautifulSoup instance.
@type soup: BeautifulSoup
"""
# 6. Replace <img> assets with actual image contents
images = [image for image in soup.find_all('img')
if image.attrs.get('assetid') is not None]
if not images:
return
# Get assetid attribute from all images
asset_ids = [image.attrs.get('assetid') for image in images]
self._asset_retriever(asset_ids)
for image in images:
# Encode each image using base64
asset = self._asset_retriever[image['assetid']]
if asset.data is not None:
encoded64 = base64.b64encode(asset.data).decode()
image['src'] = 'data:%s;base64,%s' % (
asset.content_type, encoded64)
def _convert_markup_audios(self, soup):
"""
Convert audios of instructions markup. Audios are downloaded,
base64-encoded and inserted as <audio controls> <source> tag.
@param soup: BeautifulSoup instance.
@type soup: BeautifulSoup
"""
# 7. Replace <asset> audio assets with actual audio contents
audios = [audio for audio in soup.find_all('asset')
if audio.attrs.get('id') is not None
and audio.attrs.get('assettype') == 'audio']
if not audios:
return
# Get assetid attribute from all audios
asset_ids = [audio.attrs.get('id') for audio in audios]
self._asset_retriever(asset_ids)
for audio in audios:
# Encode each audio using base64
asset = self._asset_retriever[audio['id']]
if asset.data is not None:
encoded64 = base64.b64encode(asset.data).decode()
data_string = 'data:%s;base64,%s' % (
asset.content_type, encoded64)
source_tag = soup.new_tag(
'source', src=data_string, type=asset.content_type)
controls_tag = soup.new_tag('audio', controls="")
controls_tag.string = 'Your browser does not support the audio element.'
controls_tag.append(source_tag)
audio.insert_after(controls_tag)
class OnDemandCourseMaterialItemsV1(object):
"""
Helper class that allows accessing lecture JSONs by lesson IDs.
"""
def __init__(self, items):
"""
Initialization. Build a map from lessonId to Lecture (item)
@param items: linked.OnDemandCourseMaterialItems key of
OPENCOURSE_ONDEMAND_COURSE_MATERIALS response.
@type items: dict
"""
# Build a map of lessonId => Item
self._items = dict((item['lessonId'], item) for item in items)
@staticmethod
def create(session, course_name):
"""
Create an instance using a session and a course_name.
@param session: Requests session.
@type session: requests.Session
@param course_name: Course name (slug) from course json.
@type course_name: str
@return: Instance of OnDemandCourseMaterialItems
@rtype: OnDemandCourseMaterialItems
"""
dom = get_page(session, OPENCOURSE_ONDEMAND_COURSE_MATERIALS,
json=True,
class_name=course_name)
return OnDemandCourseMaterialItemsV1(
dom['linked']['onDemandCourseMaterialItems.v1'])
def get(self, lesson_id):
"""
Return lecture by lesson ID.
@param lesson_id: Lesson ID.
@type lesson_id: str
@return: Lesson JSON.
@rtype: dict
Example:
{
"id": "AUd0k",
"moduleId": "0MGvs",
"lessonId": "QgCuM",
"name": "Programming Assignment 1: Decomposition of Graphs",
"slug": "programming-assignment-1-decomposition-of-graphs",
"timeCommitment": 10800000,
"content": {
"typeName": "gradedProgramming",
"definition": {
"programmingAssignmentId": "zHzR5yhHEeaE0BKOcl4zJQ@2",
"gradingWeight": 20
}
},
"isLocked": true,
"itemLockedReasonCode": "PREMIUM",
"trackId": "core"
},
"""
return self._items.get(lesson_id)
class Asset(namedtuple('Asset', 'id name type_name url content_type data')):
"""
This class contains information about an asset.
"""
__slots__ = ()
def __repr__(self):
return 'Asset(id="%s", name="%s", type_name="%s", url="%s", content_type="%s", data="<...>")' % (
self.id, self.name, self.type_name, self.url, self.content_type)
class AssetRetriever(object):
"""
This class helps download assets by their ID.
"""
def __init__(self, session):
self._session = session
self._asset_mapping = {}
def __getitem__(self, asset_id):
return self._asset_mapping[asset_id]
def __call__(self, asset_ids, download=True):
result = []
# Download information about assets (by IDs)
asset_list = get_page(self._session, OPENCOURSE_API_ASSETS_V1_URL,
json=True,
id=','.join(asset_ids))
# Create a map "asset_id => asset" for easier access
asset_map = dict((asset['id'], asset)
for asset in asset_list['elements'])
for asset_id in asset_ids:
# Download each asset
asset_dict = asset_map[asset_id]
url = asset_dict['url']['url'].strip()
data, content_type = None, None
if download:
reply = get_reply(self._session, url)
if reply.status_code == 200:
data = reply.content
content_type = reply.headers.get('Content-Type')
asset = Asset(id=asset_dict['id'].strip(),
name=asset_dict['name'].strip(),
type_name=asset_dict['typeName'].strip(),
url=url,
content_type=content_type,
data=data)
self._asset_mapping[asset.id] = asset
result.append(asset)
return result
@attr.s
class ModuleV1(object):
name = attr.ib()
id = attr.ib()
slug = attr.ib()
child_ids = attr.ib()
def children(self, all_children):
return [all_children[child] for child in self.child_ids]
@attr.s
class ModulesV1(object):
children = attr.ib()
@staticmethod
def from_json(data):
return ModulesV1(OrderedDict(
(item['id'],
ModuleV1(item['name'],
item['id'],
item['slug'],
item['lessonIds']))
for item in data
))
def __getitem__(self, key):
return self.children[key]
def __iter__(self):
return iter(self.children.values())
@attr.s
class LessonV1(object):
name = attr.ib()
id = attr.ib()
slug = attr.ib()
child_ids = attr.ib()
def children(self, all_children):
return [all_children[child] for child in self.child_ids]
@attr.s
class LessonsV1(object):
children = attr.ib()
@staticmethod
def from_json(data):
return LessonsV1(OrderedDict(
(item['id'],
LessonV1(item['name'],
item['id'],
item['slug'],
item['itemIds']))
for item in data
))
def __getitem__(self, key):
return self.children[key]
@attr.s
class ItemV2(object):
name = attr.ib()
id = attr.ib()
slug = attr.ib()
type_name = attr.ib()
lesson_id = attr.ib()
module_id = attr.ib()
@attr.s
class ItemsV2(object):
children = attr.ib()
@staticmethod
def from_json(data):
return ItemsV2(OrderedDict(
(item['id'],
ItemV2(item['name'],
item['id'],
item['slug'],
item['contentSummary']['typeName'],
item['lessonId'],
item['moduleId']))
for item in data
))
def __getitem__(self, key):
return self.children[key]
@attr.s
class VideoV1(object):
resolution = attr.ib()
mp4_video_url = attr.ib()
@attr.s
class VideosV1(object):
children = attr.ib()
@staticmethod
def from_json(data):
videos = [VideoV1(resolution, links['mp4VideoUrl'])
for resolution, links
in data['sources']['byResolution'].items()]
videos.sort(key=lambda video: video.resolution, reverse=True)
videos = OrderedDict(
(video.resolution, video)
for video in videos
)
return VideosV1(videos)
def __contains__(self, key):
return key in self.children
def __getitem__(self, key):
return self.children[key]
def get_best(self):
return next(iter(self.children.values()))
def expand_specializations(session, class_names):
"""
Checks whether any given name is not a class but a specialization.
If it's a specialization, expand the list of class names with the child
class names.
"""
result = []
for class_name in class_names:
specialization = SpecializationV1.create(session, class_name)
if specialization is None:
result.append(class_name)
else:
result.extend(specialization.children)
logging.info('Expanded specialization "%s" into the following'
' classes: %s',
class_name, ' '.join(specialization.children))
return result
@attr.s
class SpecializationV1(object):
children = attr.ib()
@staticmethod
def create(session, class_name):
try:
dom = get_page(session, OPENCOURSE_ONDEMAND_SPECIALIZATIONS_V1,
json=True, quiet=True,
class_name=class_name)
except requests.exceptions.HTTPError as e:
logging.debug('Could not expand %s: %s', class_name, e)
return None
return SpecializationV1(
[course['slug'] for course in dom['linked']['courses.v1']])
class CourseraOnDemand(object):
"""
This is a class that provides a friendly interface to extract certain
parts of on-demand courses. On-demand class is a new format that Coursera
is using, they contain `/learn/' in their URLs. This class does not support
old-style Coursera classes. This API is by no means complete.
"""
def __init__(self, session, course_id, course_name,
unrestricted_filenames=False,
mathjax_cdn_url=None):
"""
Initialize Coursera OnDemand API.
@param session: Current session that holds cookies and so on.
@type session: requests.Session
@param course_id: Course ID from course json.
@type course_id: str
@param unrestricted_filenames: Flag that indicates whether grabbed
file names should endure stricter character filtering. @see
`clean_filename` for the details.
@type unrestricted_filenames: bool
"""
self._session = session
self._notebook_cookies = None
self._course_id = course_id
self._course_name = course_name
self._unrestricted_filenames = unrestricted_filenames
self._user_id = None
self._quiz_to_markup = QuizExamToMarkupConverter(session)
self._markup_to_html = MarkupToHTMLConverter(
session, mathjax_cdn_url=mathjax_cdn_url)
self._asset_retriever = AssetRetriever(session)
def obtain_user_id(self):
reply = get_page(self._session, OPENCOURSE_MEMBERSHIPS, json=True)
elements = reply['elements']
user_id = elements[0]['userId'] if elements else None
self._user_id = user_id
def list_courses(self):
"""
List enrolled courses.
@return: List of enrolled courses.
@rtype: [str]
"""
reply = get_page(self._session, OPENCOURSE_MEMBERSHIPS, json=True)
course_list = reply['linked']['courses.v1']
slugs = [element['slug'] for element in course_list]
return slugs
def extract_links_from_exam(self, exam_id):
try:
session_id = self._get_exam_session_id(exam_id)
exam_json = self._get_exam_json(exam_id, session_id)
return self._convert_quiz_json_to_links(exam_json, 'exam')
except requests.exceptions.HTTPError as exception:
logging.error('Could not download exam %s: %s', exam_id, exception)
if is_debug_run():
logging.exception(
'Could not download exam %s: %s', exam_id, exception)
return None
def _get_notebook_folder(self, url, jupyterId, **kwargs):
supplement_links = {}
url = url.format(**kwargs)
reply = get_page(self._session, url, json=True)
for content in reply['content']:
if content['type'] == 'directory':
a = self._get_notebook_folder(
OPENCOURSE_NOTEBOOK_TREE, jupyterId, jupId=jupyterId,
path=content['path'], timestamp=int(time.time()))
supplement_links.update(a)
elif content['type'] == 'file':
tmp_url = OPENCOURSE_NOTEBOOK_DOWNLOAD.format(
path=content['path'], jupId=jupyterId,
timestamp=int(time.time()))
filename, extension = os.path.splitext(clean_url(tmp_url))
head, tail = os.path.split(content['path'])
# '/' in the following line is for a reason:
# @noureddin says: "I split head using split('/') not
# os.path.split() because it's seems to me that it comes from a
# web page, so the separator will always be /, so using the
# native path splitting function is not the most portable
# way to do it."
# Original pull request:
# https://github.com/coursera-dl/coursera-dl/pull/654
head = '/'.join([clean_filename(dir, minimal_change=True)
for dir in head.split('/')])
tail = clean_filename(tail, minimal_change=True)
if not os.path.isdir(self._course_name + "/notebook/" + head + "/"):
logging.info('Creating [%s] directories...', head)
os.makedirs(self._course_name + "/notebook/" + head + "/")
r = requests.get(tmp_url.replace(" ", "%20"),
cookies=self._session.cookies)
if not os.path.exists(self._course_name + "/notebook/" + head + "/" + tail):
logging.info('Downloading %s into %s', tail, head)
with open(self._course_name + "/notebook/" + head + "/" + tail, 'wb+') as f:
f.write(r.content)
else:
logging.info('Skipping %s... (file exists)', tail)
if str(extension[1:]) not in supplement_links:
supplement_links[str(extension[1:])] = []
supplement_links[str(extension[1:])].append(
(tmp_url.replace(" ", "%20"), filename))
elif content['type'] == 'notebook':
tmp_url = OPENCOURSE_NOTEBOOK_DOWNLOAD.format(
path=content['path'], jupId=jupyterId, timestamp=int(time.time()))
filename, extension = os.path.splitext(clean_url(tmp_url))
head, tail = os.path.split(content['path'])
if not os.path.isdir(self._course_name + "/notebook/" + head + "/"):
logging.info('Creating [%s] directories...', head)
os.makedirs(self._course_name + "/notebook/" + head + "/")
r = requests.get(tmp_url.replace(" ", "%20"),
cookies=self._session.cookies)
if not os.path.exists(self._course_name + "/notebook/" + head + "/" + tail):
logging.info(
'Downloading Jupyter %s into %s', tail, head)
with open(self._course_name + "/notebook/" + head + "/" + tail, 'wb+') as f:
f.write(r.content)
else:
logging.info('Skipping %s... (file exists)', tail)
if "ipynb" not in supplement_links:
supplement_links["ipynb"] = []
supplement_links["ipynb"].append(
(tmp_url.replace(" ", "%20"), filename))
else:
logging.info(
'Unsupported typename %s in notebook', content['type'])
return supplement_links
def _get_notebook_json(self, notebook_id, authorizationId):
headers = self._auth_headers_with_json()
reply = get_page(
self._session,
OPENCOURSE_NOTEBOOK_DESCRIPTIONS,
json=False,
authId=authorizationId,
headers=headers
)
jupyted_id = re.findall(r"\"\/user\/(.*)\/tree\"", reply)
if len(jupyted_id) == 0:
logging.error('Could not download notebook %s', notebook_id)
return None
jupyted_id = jupyted_id[0]
newReq = requests.Session()
req = newReq.get(OPENCOURSE_NOTEBOOK_TREE.format(
jupId=jupyted_id, path="/", timestamp=int(time.time())),
headers=headers)
return self._get_notebook_folder(
OPENCOURSE_NOTEBOOK_TREE, jupyted_id, jupId=jupyted_id,
path="/", timestamp=int(time.time()))
def extract_links_from_notebook(self, notebook_id):
try:
authorizationId = self._extract_notebook_text(notebook_id)
ret = self._get_notebook_json(notebook_id, authorizationId)
return ret
except requests.exceptions.HTTPError as exception:
logging.error('Could not download notebook %s: %s',
notebook_id, exception)
if is_debug_run():
logging.exception(
'Could not download notebook %s: %s', notebook_id, exception)
return None
def extract_links_from_quiz(self, quiz_id):
try:
session_id = self._get_quiz_session_id(quiz_id)
quiz_json = self._get_quiz_json(quiz_id, session_id)
return self._convert_quiz_json_to_links(quiz_json, 'quiz')
except requests.exceptions.HTTPError as exception:
logging.error('Could not download quiz %s: %s', quiz_id, exception)
if is_debug_run():
logging.exception(
'Could not download quiz %s: %s', quiz_id, exception)
return None
def _convert_quiz_json_to_links(self, quiz_json, filename_suffix):
markup = self._quiz_to_markup(quiz_json)
html = self._markup_to_html(markup)
supplement_links = {}
instructions = (IN_MEMORY_MARKER + html, filename_suffix)
extend_supplement_links(
supplement_links, {IN_MEMORY_EXTENSION: [instructions]})
return supplement_links
def _get_exam_json(self, exam_id, session_id):
headers = self._auth_headers_with_json()
data = {"name": "getState", "argument": []}
reply = get_page(self._session,
POST_OPENCOURSE_ONDEMAND_EXAM_SESSIONS_GET_STATE,
json=True,
post=True,
data=json.dumps(data),
headers=headers,
session_id=session_id)
return reply['elements'][0]['result']
def _get_exam_session_id(self, exam_id):
headers = self._auth_headers_with_json()
data = {'courseId': self._course_id, 'itemId': exam_id}
_body, reply = post_page_and_reply(self._session,
POST_OPENCOURSE_ONDEMAND_EXAM_SESSIONS,
data=json.dumps(data),
headers=headers)
return reply.headers.get('X-Coursera-Id')
def _get_quiz_json(self, quiz_id, session_id):
headers = self._auth_headers_with_json()
data = {"contentRequestBody": {"argument": []}}
reply = get_page(self._session,
POST_OPENCOURSE_API_QUIZ_SESSION_GET_STATE,
json=True,
post=True,
data=json.dumps(data),
headers=headers,
user_id=self._user_id,
class_name=self._course_name,
quiz_id=quiz_id,
session_id=session_id)
return reply['contentResponseBody']['return']
def _get_quiz_session_id(self, quiz_id):
headers = self._auth_headers_with_json()
data = {"contentRequestBody": []}
reply = get_page(self._session,
POST_OPENCOURSE_API_QUIZ_SESSION,
json=True,
post=True,
data=json.dumps(data),
headers=headers,
user_id=self._user_id,
class_name=self._course_name,
quiz_id=quiz_id)
return reply['contentResponseBody']['session']['id']
def _auth_headers_with_json(self):
headers = prepare_auth_headers(self._session, include_cauth=True)
headers.update({
'Content-Type': 'application/json; charset=UTF-8'
})
return headers
def extract_links_from_lecture(self, course_id,
video_id, subtitle_language='en',
resolution='540p'):
"""
Return the download URLs of on-demand course video.
@param video_id: Video ID.
@type video_id: str
@param subtitle_language: Subtitle language.
@type subtitle_language: str
@param resolution: Preferred video resolution.
@type resolution: str
@return: @see CourseraOnDemand._extract_links_from_text
"""
try:
links = self._extract_videos_and_subtitles_from_lecture(
course_id, video_id, subtitle_language, resolution)
assets = self._get_lecture_asset_ids(course_id, video_id)
assets = self._normalize_assets(assets)
extend_supplement_links(
links, self._extract_links_from_lecture_assets(assets))
return links
except requests.exceptions.HTTPError as exception:
logging.error('Could not download lecture %s: %s',
video_id, exception)
if is_debug_run():
logging.exception(
'Could not download lecture %s: %s', video_id, exception)
return None
def _get_lecture_asset_ids(self, course_id, video_id):
"""
Obtain a list of asset ids from a lecture.
"""
dom = get_page(self._session, OPENCOURSE_ONDEMAND_LECTURE_ASSETS_URL,
json=True, course_id=course_id, video_id=video_id)
# Note that we extract here "id", not definition -> assetId, as it
# be extracted later.
return [asset['id']
for asset in dom['linked']['openCourseAssets.v1']]
def _normalize_assets(self, assets):
"""
Perform asset normalization. For some reason, assets that are sometimes
present in lectures, have "@1" at the end of their id. Such "uncut"
asset id when fed to OPENCOURSE_ASSETS_URL results in error that says:
"Routing error: 'get-all' not implemented". To avoid that, the last
two characters from asset id are cut off and after that that method
works fine. It looks like, Web UI is doing the same.
@param assets: List of asset ids.
@type assets: [str]
@return: Normalized list of asset ids (without trailing "@1")
@rtype: [str]
"""
new_assets = []
for asset in assets:
# For example: giAxucdaEeWJTQ5WTi8YJQ@1
if len(asset) == 24:
# Turn it into: giAxucdaEeWJTQ5WTi8YJQ
asset = asset[:-2]
new_assets.append(asset)
return new_assets
def _extract_links_from_lecture_assets(self, asset_ids):
"""
Extract links to files of the asset ids.
@param asset_ids: List of asset ids.
@type asset_ids: [str]
@return: @see CourseraOnDemand._extract_links_from_text
"""
links = {}
def _add_asset(name, url, destination):
filename, extension = os.path.splitext(clean_url(name))
if extension is '':
return
extension = clean_filename(
extension.lower().strip('.').strip(),
self._unrestricted_filenames)
basename = clean_filename(
os.path.basename(filename),
self._unrestricted_filenames)
url = url.strip()
if extension not in destination:
destination[extension] = []
destination[extension].append((url, basename))
for asset_id in asset_ids:
for asset in self._get_asset_urls(asset_id):
_add_asset(asset['name'], asset['url'], links)
return links
def _get_asset_urls(self, asset_id):
"""
Get list of asset urls and file names. This method may internally
use AssetRetriever to extract `asset` element types.
@param asset_id: Asset ID.
@type asset_id: str
@return List of dictionaries with asset file names and urls.
@rtype [{
'name': '<filename.ext>'
'url': '<url>'
}]
"""
dom = get_page(self._session, OPENCOURSE_ASSETS_URL,
json=True, id=asset_id)
logging.debug('Parsing JSON for asset_id <%s>.', asset_id)
urls = []
for element in dom['elements']:
typeName = element['typeName']
definition = element['definition']
# Elements of `asset` types look as follows:
#
# {'elements': [{'definition': {'assetId': 'gtSfvscoEeW7RxKvROGwrw',
# 'name': 'Презентация к лекции'},
# 'id': 'phxNlMcoEeWXCQ4nGuQJXw',
# 'typeName': 'asset'}],
# 'linked': None,
# 'paging': None}
#
if typeName == 'asset':
open_course_asset_id = definition['assetId']
for asset in self._asset_retriever([open_course_asset_id],
download=False):
urls.append({'name': asset.name, 'url': asset.url})
# Elements of `url` types look as follows:
#
# {'elements': [{'definition': {'name': 'What motivates you.pptx',
# 'url': 'https://d396qusza40orc.cloudfront.net/learning/Powerpoints/2-4A_What_motivates_you.pptx'},
# 'id': '0hixqpWJEeWQkg5xdHApow',
# 'typeName': 'url'}],
# 'linked': None,
# 'paging': None}
#
elif typeName == 'url':
urls.append({'name': definition['name'].strip(),
'url': definition['url'].strip()})
else:
logging.warning(
'Unknown asset typeName: %s\ndom: %s\n'
'If you think the downloader missed some '
'files, please report the issue here:\n'
'https://github.com/coursera-dl/coursera-dl/issues/new',
typeName, json.dumps(dom, indent=4))
return urls
def _extract_videos_and_subtitles_from_lecture(self,
course_id,
video_id,
subtitle_language='en',
resolution='540p'):
logging.debug('Parsing JSON for video_id <%s>.', video_id)
dom = get_page(self._session, OPENCOURSE_ONDEMAND_LECTURE_VIDEOS_URL,
json=True,
course_id=course_id,
video_id=video_id)
dom = dom['linked']['onDemandVideos.v1'][0]
videos = VideosV1.from_json(dom)
video_content = {}
if resolution in videos:
source = videos[resolution]
logging.debug('Proceeding with download of resolution %s of <%s>.',
resolution, video_id)
else:
source = videos.get_best()
logging.warning(
'Requested resolution %s not available for <%s>. '
'Downloading highest resolution (%s) available instead.',
resolution, video_id, source.resolution)
video_content['mp4'] = source.mp4_video_url
subtitle_link = self._extract_subtitles_from_video_dom(
dom, subtitle_language, video_id)
for key, value in iteritems(subtitle_link):
video_content[key] = value
lecture_video_content = {}
for key, value in iteritems(video_content):
lecture_video_content[key] = [(value, '')]
return lecture_video_content
def _extract_subtitles_from_video_dom(self, video_dom,
subtitle_language, video_id):
# subtitles and transcripts
subtitle_nodes = [
('subtitles', 'srt', 'subtitle'),
('subtitlesTxt', 'txt', 'transcript'),
]
subtitle_set_download = set()
subtitle_set_nonexist = set()
subtitle_links = {}
for (subtitle_node, subtitle_extension, subtitle_description) \
in subtitle_nodes:
logging.debug('Gathering %s URLs for video_id <%s>.',
subtitle_description, video_id)
subtitles = video_dom.get(subtitle_node)
download_all_subtitle = False
if subtitles is not None:
subtitles_set = set(subtitles)
requested_subtitle_list = [s.strip() for s in
subtitle_language.split(",")]
for language_with_alts in requested_subtitle_list:
if download_all_subtitle:
break
grouped_language_list = [l.strip() for l in
language_with_alts.split("|")]
for language in grouped_language_list:
if language == "all":
download_all_subtitle = True
break
elif language in subtitles_set:
subtitle_set_download.update([language])
break
else:
subtitle_set_nonexist.update([language])
if download_all_subtitle and subtitles is not None:
subtitle_set_download = set(subtitles)
if not download_all_subtitle and subtitle_set_nonexist:
logging.warning("%s unavailable in '%s' language for video "
"with video id: [%s],"
"%s", subtitle_description.capitalize(),
", ".join(subtitle_set_nonexist), video_id,
subtitle_description)
if not subtitle_set_download:
logging.warning("%s all requested subtitles are unavailable,"
"with video id: [%s], falling back to 'en' "
"%s", subtitle_description.capitalize(),
video_id,
subtitle_description)
subtitle_set_download = set(['en'])
for current_subtitle_language in subtitle_set_download:
subtitle_url = subtitles.get(current_subtitle_language)
if subtitle_url is not None:
# some subtitle urls are relative!
subtitle_links[
"%s.%s" % (current_subtitle_language,
subtitle_extension)
] = make_coursera_absolute_url(subtitle_url)
return subtitle_links
def extract_links_from_programming_immediate_instructions(self, element_id):
"""
Return a dictionary with links to supplement files (pdf, csv, zip,
ipynb, html and so on) extracted from graded programming assignment.
@param element_id: Element ID to extract files from.
@type element_id: str
@return: @see CourseraOnDemand._extract_links_from_text
"""
logging.debug('Extracting links from programming immediate '
'instructions for element_id <%s>.', element_id)
try:
# Assignment text (instructions) contains asset tags which describe
# supplementary files.
text = ''.join(
self._extract_programming_immediate_instructions_text(element_id))
if not text:
return {}
supplement_links = self._extract_links_from_text(text)
instructions = (IN_MEMORY_MARKER + self._markup_to_html(text),
'instructions')
extend_supplement_links(
supplement_links, {IN_MEMORY_EXTENSION: [instructions]})
return supplement_links
except requests.exceptions.HTTPError as exception:
logging.error('Could not download programming assignment %s: %s',
element_id, exception)
if is_debug_run():
logging.exception('Could not download programming assignment %s: %s',
element_id, exception)
return None
def extract_links_from_programming(self, element_id):
"""
Return a dictionary with links to supplement files (pdf, csv, zip,
ipynb, html and so on) extracted from graded programming assignment.
@param element_id: Element ID to extract files from.
@type element_id: str
@return: @see CourseraOnDemand._extract_links_from_text
"""
logging.debug(
'Gathering supplement URLs for element_id <%s>.', element_id)
try:
# Assignment text (instructions) contains asset tags which describe
# supplementary files.
text = ''.join(self._extract_assignment_text(element_id))
if not text:
return {}
supplement_links = self._extract_links_from_text(text)
instructions = (IN_MEMORY_MARKER + self._markup_to_html(text),
'instructions')
extend_supplement_links(
supplement_links, {IN_MEMORY_EXTENSION: [instructions]})
return supplement_links
except requests.exceptions.HTTPError as exception:
logging.error('Could not download programming assignment %s: %s',
element_id, exception)
if is_debug_run():
logging.exception('Could not download programming assignment %s: %s',
element_id, exception)
return None
def extract_links_from_peer_assignment(self, element_id):
"""
Return a dictionary with links to supplement files (pdf, csv, zip,
ipynb, html and so on) extracted from peer assignment.
@param element_id: Element ID to extract files from.
@type element_id: str
@return: @see CourseraOnDemand._extract_links_from_text
"""
logging.debug(
'Gathering supplement URLs for element_id <%s>.', element_id)
try:
# Assignment text (instructions) contains asset tags which describe
# supplementary files.
text = ''.join(self._extract_peer_assignment_text(element_id))
if not text:
return {}
supplement_links = self._extract_links_from_text(text)
instructions = (IN_MEMORY_MARKER + self._markup_to_html(text),
'peer_assignment_instructions')
extend_supplement_links(
supplement_links, {IN_MEMORY_EXTENSION: [instructions]})
return supplement_links
except requests.exceptions.HTTPError as exception:
logging.error('Could not download peer assignment %s: %s',
element_id, exception)
if is_debug_run():
logging.exception('Could not download peer assignment %s: %s',
element_id, exception)
return None
def extract_links_from_supplement(self, element_id):
"""
Return a dictionary with supplement files (pdf, csv, zip, ipynb, html
and so on) extracted from supplement page.
@return: @see CourseraOnDemand._extract_links_from_text
"""
logging.debug(
'Gathering supplement URLs for element_id <%s>.', element_id)
try:
dom = get_page(self._session, OPENCOURSE_SUPPLEMENT_URL,
json=True,
course_id=self._course_id,
element_id=element_id)
supplement_content = {}
# Supplement content has structure as follows:
# 'linked' {
# 'openCourseAssets.v1' [ {
# 'definition' {
# 'value'
for asset in dom['linked']['openCourseAssets.v1']:
value = asset['definition']['value']
# Supplement lecture types are known to contain both <asset> tags
# and <a href> tags (depending on the course), so we extract
# both of them.
extend_supplement_links(
supplement_content, self._extract_links_from_text(value))
instructions = (IN_MEMORY_MARKER + self._markup_to_html(value),
'instructions')
extend_supplement_links(
supplement_content, {IN_MEMORY_EXTENSION: [instructions]})
return supplement_content
except requests.exceptions.HTTPError as exception:
logging.error('Could not download supplement %s: %s',
element_id, exception)
if is_debug_run():
logging.exception('Could not download supplement %s: %s',
element_id, exception)
return None
def _extract_asset_tags(self, text):
"""
Extract asset tags from text into a convenient form.
@param text: Text to extract asset tags from. This text contains HTML
code that is parsed by BeautifulSoup.
@type text: str
@return: Asset map.
@rtype: {
'<id>': {
'name': '<name>',
'extension': '<extension>'
},
...
}
"""
soup = BeautifulSoup(text)
asset_tags_map = {}
for asset in soup.find_all('asset'):
asset_tags_map[asset['id']] = {'name': asset['name'],
'extension': asset['extension']}
return asset_tags_map
def _extract_asset_urls(self, asset_ids):
"""
Extract asset URLs along with asset ids.
@param asset_ids: List of ids to get URLs for.
@type assertn: [str]
@return: List of dictionaries with asset URLs and ids.
@rtype: [{
'id': '<id>',
'url': '<url>'
}]
"""
dom = get_page(self._session, OPENCOURSE_ASSET_URL,
json=True,
ids=quote_plus(','.join(asset_ids)))
return [{'id': element['id'],
'url': element['url'].strip()}
for element in dom['elements']]
def extract_references_poll(self):
try:
dom = get_page(self._session,
OPENCOURSE_REFERENCES_POLL_URL.format(
course_id=self._course_id),
json=True
)
logging.info('Downloaded resource poll (%d bytes)', len(dom))
return dom['elements']
except requests.exceptions.HTTPError as exception:
logging.error('Could not download resource section: %s',
exception)
if is_debug_run():
logging.exception('Could not download resource section: %s',
exception)
return None
def extract_links_from_reference(self, short_id):
"""
Return a dictionary with supplement files (pdf, csv, zip, ipynb, html
and so on) extracted from supplement page.
@return: @see CourseraOnDemand._extract_links_from_text
"""
logging.debug('Gathering resource URLs for short_id <%s>.', short_id)
try:
dom = get_page(self._session, OPENCOURSE_REFERENCE_ITEM_URL,
json=True,
course_id=self._course_id,
short_id=short_id)
resource_content = {}
# Supplement content has structure as follows:
# 'linked' {
# 'openCourseAssets.v1' [ {
# 'definition' {
# 'value'
for asset in dom['linked']['openCourseAssets.v1']:
value = asset['definition']['value']
# Supplement lecture types are known to contain both <asset> tags
# and <a href> tags (depending on the course), so we extract
# both of them.
extend_supplement_links(
resource_content, self._extract_links_from_text(value))
instructions = (IN_MEMORY_MARKER + self._markup_to_html(value),
'resources')
extend_supplement_links(
resource_content, {IN_MEMORY_EXTENSION: [instructions]})
return resource_content
except requests.exceptions.HTTPError as exception:
logging.error('Could not download supplement %s: %s',
short_id, exception)
if is_debug_run():
logging.exception('Could not download supplement %s: %s',
short_id, exception)
return None
def _extract_programming_immediate_instructions_text(self, element_id):
"""
Extract assignment text (instructions).
@param element_id: Element id to extract assignment instructions from.
@type element_id: str
@return: List of assignment text (instructions).
@rtype: [str]
"""
dom = get_page(self._session, OPENCOURSE_PROGRAMMING_IMMEDIATE_INSTRUCTIOINS_URL,
json=True,
course_id=self._course_id,
element_id=element_id)
return [element['assignmentInstructions']['definition']['value']
for element in dom['elements']]
def _extract_notebook_text(self, element_id):
"""
Extract notebook text (instructions).
@param element_id: Element id to extract notebook links.
@type element_id: str
@return: Notebook URL.
@rtype: [str]
"""
headers = self._auth_headers_with_json()
data = {'courseId': self._course_id,
'learnerId': self._user_id, 'itemId': element_id}
dom = get_page(self._session, OPENCOURSE_NOTEBOOK_LAUNCHES,
post=True,
json=True,
user_id=self._user_id,
course_id=self._course_id,
headers=headers,
element_id=element_id,
data=json.dumps(data)
)
# Return authorization id. This id changes on each request
return dom['elements'][0]['authorizationId']
def _extract_assignment_text(self, element_id):
"""
Extract assignment text (instructions).
@param element_id: Element id to extract assignment instructions from.
@type element_id: str
@return: List of assignment text (instructions).
@rtype: [str]
"""
dom = get_page(self._session, OPENCOURSE_PROGRAMMING_ASSIGNMENTS_URL,
json=True,
course_id=self._course_id,
element_id=element_id)
return [element['submissionLearnerSchema']['definition']
['assignmentInstructions']['definition']['value']
for element in dom['elements']]
def _extract_peer_assignment_text(self, element_id):
"""
Extract peer assignment text (instructions).
@param element_id: Element id to extract peer assignment instructions from.
@type element_id: str
@return: List of peer assignment text (instructions).
@rtype: [str]
"""
dom = get_page(self._session, OPENCOURSE_PEER_ASSIGNMENT_INSTRUCTIONS,
json=True,
user_id=self._user_id,
course_id=self._course_id,
element_id=element_id)
result = []
for element in dom['elements']:
# There is only one section with Instructions
if 'introduction' in element['instructions']:
result.append(element['instructions']
['introduction']['definition']['value'])
# But there may be multiple sections in Sections
for section in element['instructions'].get('sections', []):
section_value = section['content']['definition']['value']
section_title = section.get('title')
if section_title is not None:
# If section title is present, put it in the beginning of
# section value as if it was there.
section_value = ('<heading level="3">%s</heading>' %
section_title) + section_value
result.append(section_value)
return result
def _extract_links_from_text(self, text):
"""
Extract supplement links from the html text. Links may be provided
in two ways:
1. <a> tags with href attribute
2. <asset> tags with id attribute (requires additional request
to get the direct URL to the asset file)
@param text: HTML text.
@type text: str
@return: Dictionary with supplement links grouped by extension.
@rtype: {
'<extension1>': [
('<link1>', '<title1>'),
('<link2>', '<title2')
],
'extension2': [
('<link3>', '<title3>'),
('<link4>', '<title4>')
],
...
}
"""
supplement_links = self._extract_links_from_a_tags_in_text(text)
extend_supplement_links(
supplement_links,
self._extract_links_from_asset_tags_in_text(text))
return supplement_links
def _extract_links_from_asset_tags_in_text(self, text):
"""
Scan the text and extract asset tags and links to corresponding
files.
@param text: Page text.
@type text: str
@return: @see CourseraOnDemand._extract_links_from_text
"""
# Extract asset tags from instructions text
asset_tags_map = self._extract_asset_tags(text)
ids = list(iterkeys(asset_tags_map))
if not ids:
return {}
# asset tags contain asset names and ids. We need to make another
# HTTP request to get asset URL.
asset_urls = self._extract_asset_urls(ids)
supplement_links = {}
# Build supplement links, providing nice titles along the way
for asset in asset_urls:
title = clean_filename(
asset_tags_map[asset['id']]['name'],
self._unrestricted_filenames)
extension = clean_filename(
asset_tags_map[asset['id']]['extension'].strip(),
self._unrestricted_filenames)
url = asset['url'].strip()
if extension not in supplement_links:
supplement_links[extension] = []
supplement_links[extension].append((url, title))
return supplement_links
def _extract_links_from_a_tags_in_text(self, text):
"""
Extract supplement links from the html text that contains <a> tags
with href attribute.
@param text: HTML text.
@type text: str
@return: Dictionary with supplement links grouped by extension.
@rtype: {
'<extension1>': [
('<link1>', '<title1>'),
('<link2>', '<title2')
],
'extension2': [
('<link3>', '<title3>'),
('<link4>', '<title4>')
]
}
"""
soup = BeautifulSoup(text)
links = [item['href'].strip()
for item in soup.find_all('a') if 'href' in item.attrs]
links = sorted(list(set(links)))
supplement_links = {}
for link in links:
filename, extension = os.path.splitext(clean_url(link))
# Some courses put links to sites in supplement section, e.g.:
# http://pandas.pydata.org/
if extension is '':
continue
# Make lowercase and cut the leading/trailing dot
extension = clean_filename(
extension.lower().strip('.').strip(),
self._unrestricted_filenames)
basename = clean_filename(
os.path.basename(filename),
self._unrestricted_filenames)
if extension not in supplement_links:
supplement_links[extension] = []
# Putting basename into the second slot of the tuple is important
# because that will allow to download many supplements within a
# single lecture, e.g.:
# 01_slides-presented-in-this-module.pdf
# 01_slides-presented-in-this-module_Dalal-cvpr05.pdf
# 01_slides-presented-in-this-module_LM-3dtexton.pdf
supplement_links[extension].append((link, basename))
return supplement_links
| 60,036
|
Python
|
.py
| 1,319
| 31.946171
| 142
| 0.548754
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,527
|
playlist.py
|
coursera-dl_coursera-dl/coursera/playlist.py
|
import os
import glob
def create_m3u_playlist(section_dir):
"""
Create M3U playlist with contents of `section_dir`/*.mp4. The playlist
will be created in that directory.
@param section_dir: Path where to scan for *.mp4 files.
@type section_dir: str
"""
path_to_return = os.getcwd()
for (_path, subdirs, files) in os.walk(section_dir):
os.chdir(_path)
globbed_videos = sorted(glob.glob("*.mp4"))
m3u_name = os.path.split(_path)[1] + ".m3u"
if len(globbed_videos):
with open(m3u_name, "w") as m3u:
for video in globbed_videos:
m3u.write(video + "\n")
os.chdir(path_to_return)
os.chdir(path_to_return)
| 733
|
Python
|
.py
| 20
| 28.95
| 74
| 0.60396
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,528
|
utils.py
|
coursera-dl_coursera-dl/coursera/utils.py
|
# -*- coding: utf-8 -*-
"""
This module provides utility functions that are used within the script.
"""
import os
import re
import sys
import time
import json
import errno
import random
import string
import logging
import datetime
from bs4 import BeautifulSoup as BeautifulSoup_
from xml.sax.saxutils import escape, unescape
import six
from six import iteritems
from six.moves import html_parser
from six.moves.urllib.parse import ParseResult
from six.moves.urllib_parse import unquote_plus
# six.moves doesn’t support urlparse
if six.PY3: # pragma: no cover
from urllib.parse import urlparse, urljoin
else:
from urlparse import urlparse, urljoin
# Python3 (and six) don't provide string
if six.PY3:
from string import ascii_letters as string_ascii_letters
from string import digits as string_digits
else:
from string import letters as string_ascii_letters
from string import digits as string_digits
from .define import COURSERA_URL, WINDOWS_UNC_PREFIX
# Force us of bs4 with html.parser
def BeautifulSoup(page): return BeautifulSoup_(page, 'html.parser')
if six.PY2:
def decode_input(x):
stdin_encoding = sys.stdin.encoding
if stdin_encoding is None:
stdin_encoding = "UTF-8"
return x.decode(stdin_encoding)
else:
def decode_input(x):
return x
def spit_json(obj, filename):
with open(filename, 'w') as file_object:
json.dump(obj, file_object, indent=4)
def slurp_json(filename):
with open(filename) as file_object:
return json.load(file_object)
def is_debug_run():
"""
Check whether we're running with DEBUG loglevel.
@return: True if running with DEBUG loglevel.
@rtype: bool
"""
return logging.getLogger().isEnabledFor(logging.DEBUG)
def random_string(length):
"""
Return a pseudo-random string of specified length.
"""
valid_chars = string_ascii_letters + string_digits
return ''.join(random.choice(valid_chars) for i in range(length))
# Taken from: https://wiki.python.org/moin/EscapingHtml
# escape() and unescape() takes care of &, < and >.
HTML_ESCAPE_TABLE = {
'"': """,
"'": "'"
}
HTML_UNESCAPE_TABLE = dict((v, k) for k, v in HTML_ESCAPE_TABLE.items())
def unescape_html(s):
h = html_parser.HTMLParser()
s = h.unescape(s)
s = unquote_plus(s)
return unescape(s, HTML_UNESCAPE_TABLE)
def clean_filename(s, minimal_change=False):
"""
Sanitize a string to be used as a filename.
If minimal_change is set to true, then we only strip the bare minimum of
characters that are problematic for filesystems (namely, ':', '/' and
'\x00', '\n').
"""
# First, deal with URL encoded strings
h = html_parser.HTMLParser()
s = h.unescape(s)
s = unquote_plus(s)
# Strip forbidden characters
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
s = (
s.replace(':', '-')
.replace('/', '-')
.replace('<', '-')
.replace('>', '-')
.replace('"', '-')
.replace('\\', '-')
.replace('|', '-')
.replace('?', '-')
.replace('*', '-')
.replace('\x00', '-')
.replace('\n', ' ')
)
# Remove trailing dots and spaces; forbidden on Windows
s = s.rstrip(' .')
if minimal_change:
return s
s = s.replace('(', '').replace(')', '')
s = s.rstrip('.') # Remove excess of trailing dots
s = s.strip().replace(' ', '_')
valid_chars = '-_.()%s%s' % (string.ascii_letters, string.digits)
return ''.join(c for c in s if c in valid_chars)
def normalize_path(path):
"""
Normalizes path on Windows OS. This means prepending
<backslash><backslash>?<backslash> to the path to get access to
Win32 device namespace instead of Win32 file namespace.
See https://msdn.microsoft.com/en-us/library/aa365247%28v=vs.85%29.aspx#maxpath
@param path: Path to normalize.
@type path: str
@return: Normalized path.
@rtype str
"""
if sys.platform != 'win32':
return path
if path.startswith(WINDOWS_UNC_PREFIX):
return path
return WINDOWS_UNC_PREFIX + os.path.abspath(path)
def get_anchor_format(a):
"""
Extract the resource file-type format from the anchor.
"""
# (. or format=) then (file_extension) then (? or $)
# e.g. "...format=txt" or "...download.mp4?..."
fmt = re.search(r"(?:\.|format=)(\w+)(?:\?.*)?$", a)
return fmt.group(1) if fmt else None
def mkdir_p(path, mode=0o777):
"""
Create subdirectory hierarchy given in the paths argument.
"""
try:
os.makedirs(path, mode)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def clean_url(url):
"""
Remove params, query and fragment parts from URL so that `os.path.basename`
and `os.path.splitext` can work correctly.
@param url: URL to clean.
@type url: str
@return: Cleaned URL.
@rtype: str
"""
parsed = urlparse(url.strip())
reconstructed = ParseResult(
parsed.scheme, parsed.netloc, parsed.path,
params='', query='', fragment='')
return reconstructed.geturl()
def fix_url(url):
"""
Strip whitespace characters from the beginning and the end of the url
and add a default scheme.
"""
if url is None:
return None
url = url.strip()
if url and not urlparse(url).scheme:
url = "http://" + url
return url
def is_course_complete(last_update):
"""
Determine is the course is likely to have been terminated or not.
We return True if the timestamp given by last_update is 30 days or older
than today's date. Otherwise, we return True.
The intended use case for this is to detect if a given courses has not
seen any update in the last 30 days or more. Otherwise, we return True,
since it is probably too soon to declare the course complete.
"""
rv = False
if last_update >= 0:
delta = time.time() - last_update
max_delta = total_seconds(datetime.timedelta(days=30))
if delta > max_delta:
rv = True
return rv
def total_seconds(td):
"""
Compute total seconds for a timedelta.
Added for backward compatibility, pre 2.7.
"""
return (td.microseconds +
(td.seconds + td.days * 24 * 3600) * 10 ** 6) // 10 ** 6
def make_coursera_absolute_url(url):
"""
If given url is relative adds coursera netloc,
otherwise returns it without any changes.
"""
if not bool(urlparse(url).netloc):
return urljoin(COURSERA_URL, url)
return url
def extend_supplement_links(destination, source):
"""
Extends (merges) destination dictionary with supplement_links
from source dictionary. Values are expected to be lists, or any
data structure that has `extend` method.
@param destination: Destination dictionary that will be extended.
@type destination: @see CourseraOnDemand._extract_links_from_text
@param source: Source dictionary that will be used to extend
destination dictionary.
@type source: @see CourseraOnDemand._extract_links_from_text
"""
for key, value in iteritems(source):
if key not in destination:
destination[key] = value
else:
destination[key].extend(value)
def print_ssl_error_message(exception):
"""
Print SSLError message with URL to instructions on how to fix it.
"""
message = """
#####################################################################
# ATTENTION! PLEASE READ THIS!
#
# The following error has just occurred:
# %s %s
#
# Please read instructions on how to fix this error here:
# https://github.com/coursera-dl/coursera-dl#sslerror-errno-1-_sslc504-error14094410ssl-routinesssl3_read_bytessslv3-alert-handshake-failure
#####################################################################
""" % (type(exception).__name__, str(exception))
logging.error(message)
| 8,099
|
Python
|
.py
| 234
| 29.547009
| 140
| 0.654557
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,529
|
coursera_dl.py
|
coursera-dl_coursera-dl/coursera/coursera_dl.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Authors and copyright:
# © 2012-2013, John Lehmann (first last at geemail dotcom or @jplehmann)
# © 2012-2020, Rogério Theodoro de Brito
# © 2013, Jonas De Taeye (first dt at fastmail fm)
#
# Contributions are welcome, but please add new unit tests to test your changes
# and/or features. Also, please try to make changes platform independent and
# backward compatible.
#
# Legalese:
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Module for downloading lecture resources such as videos for Coursera classes.
Given a class name, username and password, it scrapes the course listing
page to get the section (week) and lecture names, and then downloads the
related materials into appropriately named files and directories.
Examples:
coursera-dl -u <user> -p <passwd> saas
coursera-dl -u <user> -p <passwd> -l listing.html -o saas --skip-download
For further documentation and examples, visit the project's home at:
https://github.com/coursera-dl/coursera
"""
import json
import logging
import os
import re
import time
import shutil
from distutils.version import LooseVersion as V
# Test versions of some critical modules.
# We may, perhaps, want to move these elsewhere.
import bs4
import six
import requests
from .cookies import (
AuthenticationFailed, ClassNotFound,
get_cookies_for_class, make_cookie_values, TLSAdapter, login)
from .define import (CLASS_URL, ABOUT_URL, PATH_CACHE)
from .downloaders import get_downloader
from .workflow import CourseraDownloader
from .parallel import ConsecutiveDownloader, ParallelDownloader
from .utils import (clean_filename, get_anchor_format, mkdir_p, fix_url,
print_ssl_error_message,
decode_input, BeautifulSoup, is_debug_run,
spit_json, slurp_json)
from .api import expand_specializations
from .network import get_page, get_page_and_url
from .commandline import parse_args
from .extractors import CourseraExtractor
from coursera import __version__
# URL containing information about outdated modules
_SEE_URL = " See https://github.com/coursera-dl/coursera/issues/139"
assert V(requests.__version__) >= V('2.4'), "Upgrade requests!" + _SEE_URL
assert V(six.__version__) >= V('1.5'), "Upgrade six!" + _SEE_URL
assert V(bs4.__version__) >= V('4.1'), "Upgrade bs4!" + _SEE_URL
def get_session():
"""
Create a session with TLS v1.2 certificate.
"""
session = requests.Session()
session.mount('https://', TLSAdapter())
return session
def list_courses(args):
"""
List enrolled courses.
@param args: Command-line arguments.
@type args: namedtuple
"""
session = get_session()
login(session, args.username, args.password)
extractor = CourseraExtractor(session)
courses = extractor.list_courses()
logging.info('Found %d courses', len(courses))
for course in courses:
logging.info(course)
def download_on_demand_class(session, args, class_name):
"""
Download all requested resources from the on-demand class given
in class_name.
@return: Tuple of (bool, bool), where the first bool indicates whether
errors occurred while parsing syllabus, the second bool indicates
whether the course appears to be completed.
@rtype: (bool, bool)
"""
error_occurred = False
extractor = CourseraExtractor(session)
cached_syllabus_filename = '%s-syllabus-parsed.json' % class_name
if args.cache_syllabus and os.path.isfile(cached_syllabus_filename):
modules = slurp_json(cached_syllabus_filename)
else:
error_occurred, modules = extractor.get_modules(
class_name,
args.reverse,
args.unrestricted_filenames,
args.subtitle_language,
args.video_resolution,
args.download_quizzes,
args.mathjax_cdn_url,
args.download_notebooks
)
if is_debug_run or args.cache_syllabus():
spit_json(modules, cached_syllabus_filename)
if args.only_syllabus:
return error_occurred, False
downloader = get_downloader(session, class_name, args)
downloader_wrapper = ParallelDownloader(downloader, args.jobs) \
if args.jobs > 1 else ConsecutiveDownloader(downloader)
# obtain the resources
ignored_formats = []
if args.ignore_formats:
ignored_formats = args.ignore_formats.split(",")
course_downloader = CourseraDownloader(
downloader_wrapper,
commandline_args=args,
class_name=class_name,
path=args.path,
ignored_formats=ignored_formats,
disable_url_skipping=args.disable_url_skipping
)
completed = course_downloader.download_modules(modules)
# Print skipped URLs if any
if course_downloader.skipped_urls:
print_skipped_urls(course_downloader.skipped_urls)
# Print failed URLs if any
# FIXME: should we set non-zero exit code if we have failed URLs?
if course_downloader.failed_urls:
print_failed_urls(course_downloader.failed_urls)
return error_occurred, completed
def print_skipped_urls(skipped_urls):
logging.info('The following URLs (%d) have been skipped and not '
'downloaded:', len(skipped_urls))
logging.info('(if you want to download these URLs anyway, please '
'add "--disable-url-skipping" option)')
logging.info('-' * 80)
for url in skipped_urls:
logging.info(url)
logging.info('-' * 80)
def print_failed_urls(failed_urls):
logging.info('The following URLs (%d) could not be downloaded:',
len(failed_urls))
logging.info('-' * 80)
for url in failed_urls:
logging.info(url)
logging.info('-' * 80)
def download_class(session, args, class_name):
"""
Try to download on-demand class.
@return: Tuple of (bool, bool), where the first bool indicates whether
errors occurred while parsing syllabus, the second bool indicates
whether the course appears to be completed.
@rtype: (bool, bool)
"""
logging.debug('Downloading new style (on demand) class %s', class_name)
return download_on_demand_class(session, args, class_name)
def main():
"""
Main entry point for execution as a program (instead of as a module).
"""
args = parse_args()
logging.info('coursera_dl version %s', __version__)
completed_classes = []
classes_with_errors = []
mkdir_p(PATH_CACHE, 0o700)
if args.clear_cache:
shutil.rmtree(PATH_CACHE)
if args.list_courses:
logging.info('Listing enrolled courses')
list_courses(args)
return
session = get_session()
if args.cookies_cauth:
session.cookies.set('CAUTH', args.cookies_cauth)
else:
login(session, args.username, args.password)
if args.specialization:
args.class_names = expand_specializations(session, args.class_names)
for class_index, class_name in enumerate(args.class_names):
try:
logging.info('Downloading class: %s (%d / %d)',
class_name, class_index + 1, len(args.class_names))
error_occurred, completed = download_class(
session, args, class_name)
if completed:
completed_classes.append(class_name)
if error_occurred:
classes_with_errors.append(class_name)
except requests.exceptions.HTTPError as e:
logging.error('HTTPError %s', e)
if is_debug_run():
logging.exception('HTTPError %s', e)
except requests.exceptions.SSLError as e:
logging.error('SSLError %s', e)
print_ssl_error_message(e)
if is_debug_run():
raise
except ClassNotFound as e:
logging.error('Could not find class: %s', e)
except AuthenticationFailed as e:
logging.error('Could not authenticate: %s', e)
if class_index + 1 != len(args.class_names):
logging.info('Sleeping for %d seconds before downloading next course. '
'You can change this with --download-delay option.',
args.download_delay)
time.sleep(args.download_delay)
if completed_classes:
logging.info('-' * 80)
logging.info(
"Classes which appear completed: " + " ".join(completed_classes))
if classes_with_errors:
logging.info('-' * 80)
logging.info('The following classes had errors during the syllabus'
' parsing stage. You may want to review error messages and'
' courses (sometimes enrolling to the course or switching'
' session helps):')
for class_name in classes_with_errors:
logging.info('%s (https://www.coursera.org/learn/%s)',
class_name, class_name)
if __name__ == '__main__':
main()
| 9,648
|
Python
|
.py
| 233
| 34.669528
| 83
| 0.675112
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,530
|
commandline.py
|
coursera-dl_coursera-dl/coursera/commandline.py
|
"""
This module contains code that is related to command-line argument
handling. The primary candidate is argument parser.
"""
import os
import sys
import logging
import configargparse as argparse
from coursera import __version__
from .credentials import get_credentials, CredentialsError, keyring
from .utils import decode_input
LOCAL_CONF_FILE_NAME = 'coursera-dl.conf'
def class_name_arg_required(args):
"""
Evaluates whether class_name arg is required.
@param args: Command-line arguments.
@type args: namedtuple
"""
no_class_name_flags = ['list_courses', 'version']
return not any(
getattr(args, flag)
for flag in no_class_name_flags
)
def parse_args(args=None):
"""
Parse the arguments/options passed to the program on the command line.
"""
parse_kwargs = {
"description": 'Download Coursera.org lecture material and resources.'
}
conf_file_path = os.path.join(os.getcwd(), LOCAL_CONF_FILE_NAME)
if os.path.isfile(conf_file_path):
parse_kwargs["default_config_files"] = [conf_file_path]
parser = argparse.ArgParser(**parse_kwargs)
# Basic options
group_basic = parser.add_argument_group('Basic options')
group_basic.add_argument(
'class_names',
action='store',
nargs='*',
help='name(s) of the class(es) (e.g. "ml-005")')
group_basic.add_argument(
'-u',
'--username',
dest='username',
action='store',
default=None,
help='username (email) that you use to login to Coursera')
group_basic.add_argument(
'-p',
'--password',
dest='password',
action='store',
default=None,
help='coursera password')
group_basic.add_argument(
'--jobs',
dest='jobs',
action='store',
default=1,
type=int,
help='number of parallel jobs to use for '
'downloading resources. (Default: 1)')
group_basic.add_argument(
'--download-delay',
dest='download_delay',
action='store',
default=60,
type=int,
help='number of seconds to wait before downloading '
'next course. (Default: 60)')
group_basic.add_argument(
'-b', # FIXME: kill this one-letter option
'--preview',
dest='preview',
action='store_true',
default=False,
help='get videos from preview pages. (Default: False)')
group_basic.add_argument(
'--path',
dest='path',
action='store',
default='',
help='path to where to save the file. (Default: current directory)')
group_basic.add_argument(
'-sl', # FIXME: deprecate this option
'--subtitle-language',
dest='subtitle_language',
action='store',
default='all',
help='Choose language to download subtitles and transcripts.'
'(Default: all) Use special value "all" to download all available.'
'To download subtitles and transcripts of multiple languages,'
'use comma(s) (without spaces) to seperate the names of the languages,'
' i.e., "en,zh-CN".'
'To download subtitles and transcripts of alternative language(s) '
'if only the current language is not available,'
'put an "|<lang>" for each of the alternative languages after '
'the current language, i.e., "en|fr,zh-CN|zh-TW|de", and make sure '
'the parameter are wrapped with quotes when "|" presents.'
)
# Selection of material to download
group_material = parser.add_argument_group(
'Selection of material to download')
group_material.add_argument(
'--specialization',
dest='specialization',
action='store_true',
default=False,
help='treat given class names as specialization names and try to '
'download its courses, if available. Note that there are name '
'clashes, e.g. "machine-learning" is both a course and a '
'specialization (Default: False)')
group_material.add_argument(
'--only-syllabus',
dest='only_syllabus',
action='store_true',
default=False,
help='download only syllabus, skip course content. '
'(Default: False)')
group_material.add_argument(
'--download-quizzes',
dest='download_quizzes',
action='store_true',
default=False,
help='download quiz and exam questions. (Default: False)')
group_material.add_argument(
'--download-notebooks',
dest='download_notebooks',
action='store_true',
default=False,
help='download Python Jupyther Notebooks. (Default: False)')
group_material.add_argument(
'--about', # FIXME: should be --about-course
dest='about',
action='store_true',
default=False,
help='download "about" metadata. (Default: False)')
group_material.add_argument(
'-f',
'--formats',
dest='file_formats',
action='store',
default='all',
help='file format extensions to be downloaded in'
' quotes space separated, e.g. "mp4 pdf" '
'(default: special value "all")')
group_material.add_argument(
'--ignore-formats',
dest='ignore_formats',
action='store',
default=None,
help='file format extensions of resources to ignore'
' (default: None)')
group_material.add_argument(
'-sf', # FIXME: deprecate this option
'--section_filter',
dest='section_filter',
action='store',
default=None,
help='only download sections which contain this'
' regex (default: disabled)')
group_material.add_argument(
'-lf', # FIXME: deprecate this option
'--lecture_filter',
dest='lecture_filter',
action='store',
default=None,
help='only download lectures which contain this regex'
' (default: disabled)')
group_material.add_argument(
'-rf', # FIXME: deprecate this option
'--resource_filter',
dest='resource_filter',
action='store',
default=None,
help='only download resources which match this regex'
' (default: disabled)')
group_material.add_argument(
'--video-resolution',
dest='video_resolution',
action='store',
default='540p',
help='video resolution to download (default: 540p); '
'only valid for on-demand courses; '
'only values allowed: 360p, 540p, 720p')
group_material.add_argument(
'--disable-url-skipping',
dest='disable_url_skipping',
action='store_true',
default=False,
help='disable URL skipping, all URLs will be '
'downloaded (default: False)')
# Parameters related to external downloaders
group_external_dl = parser.add_argument_group('External downloaders')
group_external_dl.add_argument(
'--wget',
dest='wget',
action='store',
nargs='?',
const='wget',
default=None,
help='use wget for downloading,'
'optionally specify wget bin')
group_external_dl.add_argument(
'--curl',
dest='curl',
action='store',
nargs='?',
const='curl',
default=None,
help='use curl for downloading,'
' optionally specify curl bin')
group_external_dl.add_argument(
'--aria2',
dest='aria2',
action='store',
nargs='?',
const='aria2c',
default=None,
help='use aria2 for downloading,'
' optionally specify aria2 bin')
group_external_dl.add_argument(
'--axel',
dest='axel',
action='store',
nargs='?',
const='axel',
default=None,
help='use axel for downloading,'
' optionally specify axel bin')
group_external_dl.add_argument(
'--downloader-arguments',
dest='downloader_arguments',
default='',
help='additional arguments passed to the'
' downloader')
parser.add_argument(
'--list-courses',
dest='list_courses',
action='store_true',
default=False,
help='list course names (slugs) and quit. Listed '
'course names can be put into program arguments')
parser.add_argument(
'--resume',
dest='resume',
action='store_true',
default=False,
help='resume incomplete downloads (default: False)')
parser.add_argument(
'-o',
'--overwrite',
dest='overwrite',
action='store_true',
default=False,
help='whether existing files should be overwritten'
' (default: False)')
parser.add_argument(
'--verbose-dirs',
dest='verbose_dirs',
action='store_true',
default=False,
help='include class name in section directory name')
parser.add_argument(
'--quiet',
dest='quiet',
action='store_true',
default=False,
help='omit as many messages as possible'
' (only printing errors)')
parser.add_argument(
'-r',
'--reverse',
dest='reverse',
action='store_true',
default=False,
help='download sections in reverse order')
parser.add_argument(
'--combined-section-lectures-nums',
dest='combined_section_lectures_nums',
action='store_true',
default=False,
help='include lecture and section name in final files')
parser.add_argument(
'--unrestricted-filenames',
dest='unrestricted_filenames',
action='store_true',
default=False,
help='Do not limit filenames to be ASCII-only')
# Advanced authentication
group_adv_auth = parser.add_argument_group(
'Advanced authentication options')
group_adv_auth.add_argument(
'-ca',
'--cauth',
dest='cookies_cauth',
action='store',
default=None,
help='cauth cookie value from browser')
group_adv_auth.add_argument(
'-c',
'--cookies_file',
dest='cookies_file',
action='store',
default=None,
help='full path to the cookies.txt file')
group_adv_auth.add_argument(
'-n',
'--netrc',
dest='netrc',
nargs='?',
action='store',
const=True,
default=False,
help='use netrc for reading passwords, uses default'
' location if no path specified')
group_adv_auth.add_argument(
'-k',
'--keyring',
dest='use_keyring',
action='store_true',
default=False,
help='use keyring provided by operating system to '
'save and load credentials')
group_adv_auth.add_argument(
'--clear-cache',
dest='clear_cache',
action='store_true',
default=False,
help='clear cached cookies')
# Advanced miscellaneous options
group_adv_misc = parser.add_argument_group(
'Advanced miscellaneous options')
group_adv_misc.add_argument(
'--hook',
dest='hooks',
action='append',
default=[],
help='hooks to run when finished')
group_adv_misc.add_argument(
'-pl',
'--playlist',
dest='playlist',
action='store_true',
default=False,
help='generate M3U playlists for course weeks')
group_adv_misc.add_argument(
'--mathjax-cdn',
dest='mathjax_cdn_url',
default='https://cdn.mathjax.org/mathjax/latest/MathJax.js',
help='the cdn address of MathJax.js'
)
# Debug options
group_debug = parser.add_argument_group('Debugging options')
group_debug.add_argument(
'--skip-download',
dest='skip_download',
action='store_true',
default=False,
help='for debugging: skip actual downloading of files')
group_debug.add_argument(
'--debug',
dest='debug',
action='store_true',
default=False,
help='print lots of debug information')
group_debug.add_argument(
'--cache-syllabus',
dest='cache_syllabus',
action='store_true',
default=False,
help='cache course syllabus into a file')
group_debug.add_argument(
'--version',
dest='version',
action='store_true',
default=False,
help='display version and exit')
group_debug.add_argument(
'-l', # FIXME: remove short option from rarely used ones
'--process_local_page',
dest='local_page',
help='uses or creates local cached version of syllabus'
' page')
# Final parsing of the options
args = parser.parse_args(args)
# Initialize the logging system first so that other functions
# can use it right away
if args.debug:
logging.basicConfig(level=logging.DEBUG,
format='%(name)s[%(funcName)s] %(message)s')
elif args.quiet:
logging.basicConfig(level=logging.ERROR,
format='%(name)s: %(message)s')
else:
logging.basicConfig(level=logging.INFO,
format='%(message)s')
if class_name_arg_required(args) and not args.class_names:
parser.print_usage()
logging.error('You must supply at least one class name')
sys.exit(1)
# show version?
if args.version:
# we use print (not logging) function because version may be used
# by some external script while logging may output excessive
# information
print(__version__)
sys.exit(0)
# turn list of strings into list
args.downloader_arguments = args.downloader_arguments.split()
# turn list of strings into list
args.file_formats = args.file_formats.split()
# decode path so we can work properly with cyrillic symbols on different
# versions on Python
args.path = decode_input(args.path)
# check arguments
if args.use_keyring and args.password:
logging.warning(
'--keyring and --password cannot be specified together')
args.use_keyring = False
if args.use_keyring and not keyring:
logging.warning('The python module `keyring` not found.')
args.use_keyring = False
if args.cookies_file and not os.path.exists(args.cookies_file):
logging.error('Cookies file not found: %s', args.cookies_file)
sys.exit(1)
if not args.cookies_file and not args.cookies_cauth:
try:
args.username, args.password = get_credentials(
username=args.username, password=args.password,
netrc=args.netrc, use_keyring=args.use_keyring)
except CredentialsError as e:
logging.error(e)
sys.exit(1)
return args
| 15,039
|
Python
|
.py
| 436
| 26.272936
| 79
| 0.607421
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,531
|
filtering.py
|
coursera-dl_coursera-dl/coursera/filtering.py
|
"""
This module contains filtering functions.
"""
import re
import logging
from six import iteritems
from six.moves.urllib_parse import urlparse
# These formats are trusted and are not skipped
VALID_FORMATS = r"""^mp4$|
^pdf$|
^.?.?\.?txt$|
^.?.?\.?srt$|
.*txt$|
.*srt$|
^html?$|
^zip$|
^rar$|
^[ct]sv$|
^xlsx$|
^ipynb$|
^json$|
^pptx?$|
^docx?$|
^xls$|
^py$|
^Rmd$|
^Rdata$|
^wf1$"""
# Non simple format contains characters besides letters, numbers, "_" and "-"
NON_SIMPLE_FORMAT = r".*[^a-zA-Z0-9_-]"
RE_VALID_FORMATS = re.compile(VALID_FORMATS, re.VERBOSE)
RE_NON_SIMPLE_FORMAT = re.compile(NON_SIMPLE_FORMAT)
def skip_format_url(format_, url):
"""
Checks whether a give format/url should be skipped and not downloaded.
@param format_: Filename format (extension).
@type format_: str (e.g. html, txt, zip, pdf)
@param url: URL.
@type url: str
@return: True if format/url should be skipped, False otherwise.
@rtype bool
"""
# Do not download empty formats
if format_ == '':
return True
# Do not download email addresses
if ('mailto:' in url) and ('@' in url):
return True
# Is this localhost?
parsed = urlparse(url)
if parsed.hostname == 'localhost':
return True
# These are trusted manually added formats, do not skip them
if RE_VALID_FORMATS.match(format_):
return False
# Simple formats only contain letters, numbers, "_" and "-"
# If this a non simple format?
if RE_NON_SIMPLE_FORMAT.match(format_):
return True
# Is this a link to the site root?
if parsed.path in ('', '/'):
return True
# Do not skip
return False
def find_resources_to_get(lecture, file_formats, resource_filter, ignored_formats=None):
"""
Select formats to download.
"""
resources_to_get = []
if ignored_formats is None:
ignored_formats = []
if len(ignored_formats):
logging.info("The following file formats will be ignored: " + ",".join(ignored_formats))
for fmt, resources in iteritems(lecture):
fmt0 = fmt
short_fmt = None
if '.' in fmt:
short_fmt = fmt.split('.')[1]
if fmt in ignored_formats or (short_fmt != None and short_fmt in ignored_formats) :
continue
if fmt in file_formats or (short_fmt != None and short_fmt in file_formats) or 'all' in file_formats:
for r in resources:
if resource_filter and r[1] and not re.search(resource_filter, r[1]):
logging.debug('Skipping b/c of rf: %s %s',
resource_filter, r[1])
continue
resources_to_get.append((fmt0, r[0], r[1]))
else:
logging.debug(
'Skipping b/c format %s not in %s', fmt, file_formats)
return resources_to_get
| 3,270
|
Python
|
.py
| 91
| 25.923077
| 109
| 0.544561
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,532
|
define.py
|
coursera-dl_coursera-dl/coursera/define.py
|
# -*- coding: utf-8 -*-
"""
This module defines the global constants.
"""
import os
import getpass
import tempfile
HTTP_FORBIDDEN = 403
COURSERA_URL = 'https://api.coursera.org'
AUTH_URL = 'https://accounts.coursera.org/api/v1/login'
AUTH_URL_V3 = 'https://api.coursera.org/api/login/v3'
CLASS_URL = 'https://class.coursera.org/{class_name}'
# The following link is left just for illustrative purposes:
# https://api.coursera.org/api/courses.v1?fields=display%2CpartnerIds%2CphotoUrl%2CstartDate%2Cpartners.v1(homeLink%2Cname)&includes=partnerIds&q=watchlist&start=0
# Reply is as follows:
# {
# "elements": [
# {
# "courseType": "v1.session",
# "name": "Computational Photography",
# "id": "v1-87",
# "slug": "compphoto"
# }
# ],
# "paging": {
# "next": "100",
# "total": 154
# },
# "linked": {}
# }
OPENCOURSE_LIST_COURSES = 'https://api.coursera.org/api/courses.v1?q=watchlist&start={start}'
# The following link is left just for illustrative purposes:
# https://api.coursera.org/api/memberships.v1?fields=courseId,enrolledTimestamp,grade,id,lastAccessedTimestamp,onDemandSessionMembershipIds,onDemandSessionMemberships,role,v1SessionId,vc,vcMembershipId,courses.v1(courseStatus,display,partnerIds,photoUrl,specializations,startDate,v1Details,v2Details),partners.v1(homeLink,name),v1Details.v1(sessionIds),v1Sessions.v1(active,certificatesReleased,dbEndDate,durationString,hasSigTrack,startDay,startMonth,startYear),v2Details.v1(onDemandSessions,plannedLaunchDate,sessionsEnabledAt),specializations.v1(logo,name,partnerIds,shortName)&includes=courseId,onDemandSessionMemberships,vcMembershipId,courses.v1(partnerIds,specializations,v1Details,v2Details),v1Details.v1(sessionIds),v2Details.v1(onDemandSessions),specializations.v1(partnerIds)&q=me&showHidden=true&filter=current,preEnrolled
# Sample reply:
# {
# "elements": [
# {
# id: "4958~bVgqTevEEeWvGQrWsIkLlw",
# userId: 4958,
# courseId: "bVgqTevEEeWvGQrWsIkLlw",
# role: "LEARNER"
# },
# ],
# "paging": null,
# "linked": {
# "courses.v1": [
# {
# "id": "0w0JAG9JEeSp0iIAC12Jpw",
# "slug": "computational-neurosciencecompneuro",
# "courseType": "v2.ondemand",
# "name": "Computational Neuroscience"
# }
# ]
# }
# }
OPENCOURSE_MEMBERSHIPS = 'https://api.coursera.org/api/memberships.v1?includes=courseId,courses.v1&q=me&showHidden=true&filter=current,preEnrolled'
OPENCOURSE_ONDEMAND_LECTURE_VIDEOS_URL = \
'https://api.coursera.org/api/onDemandLectureVideos.v1/'\
'{course_id}~{video_id}?includes=video&'\
'fields=onDemandVideos.v1(sources%2Csubtitles%2CsubtitlesVtt%2CsubtitlesTxt)'
OPENCOURSE_SUPPLEMENT_URL = 'https://api.coursera.org/api/onDemandSupplements.v1/'\
'{course_id}~{element_id}?includes=asset&fields=openCourseAssets.v1%28typeName%29,openCourseAssets.v1%28definition%29'
OPENCOURSE_PROGRAMMING_ASSIGNMENTS_URL = \
'https://api.coursera.org/api/onDemandProgrammingLearnerAssignments.v1/{course_id}~{element_id}?fields=submissionLearnerSchema'
OPENCOURSE_PROGRAMMING_IMMEDIATE_INSTRUCTIOINS_URL = \
'https://api.coursera.org/api/onDemandProgrammingImmediateInstructions.v1/{course_id}~{element_id}'
OPENCOURSE_REFERENCES_POLL_URL = \
"https://api.coursera.org/api/onDemandReferences.v1/?courseId={course_id}&q=courseListed&fields=name%2CshortId%2Cslug%2Ccontent&includes=assets"
OPENCOURSE_REFERENCE_ITEM_URL = \
"https://api.coursera.org/api/onDemandReferences.v1/?courseId={course_id}&q=shortId&shortId={short_id}&fields=name%2CshortId%2Cslug%2Ccontent&includes=assets"
# These are ids that are present in <asset> tag in assignment text:
#
# <asset id=\"yeJ7Q8VAEeWPRQ4YsSEORQ\"
# name=\"statement-pca\"
# extension=\"pdf\"
# assetType=\"generic\"/>
#
# Sample response:
#
# {
# "elements": [
# {
# "id": "yeJ7Q8VAEeWPRQ4YsSEORQ",
# "url": "<some url>",
# "expires": 1454371200000
# }
# ],
# "paging": null,
# "linked": null
# }
OPENCOURSE_ASSET_URL = \
'https://api.coursera.org/api/assetUrls.v1?ids={ids}'
# Sample response:
# "linked": {
# "openCourseAssets.v1": [
# {
# "typeName": "asset",
# "definition": {
# "assetId": "fytYX5rYEeedWRLokafKRg",
# "name": "Lecture slides"
# },
# "id": "j6g7VZrYEeeUVgpv-dYMig"
# }
# ]
# }
OPENCOURSE_ONDEMAND_LECTURE_ASSETS_URL = \
'https://api.coursera.org/api/onDemandLectureAssets.v1/'\
'{course_id}~{video_id}/?includes=openCourseAssets'
# These ids are provided in lecture json:
#
# {
# "id": "6ydIh",
# "name": "Введение в теорию игр",
# "elements": [
# {
# "id": "ujNfj",
# "name": "Что изучает теория игр?",
# "content": {
# "typeName": "lecture",
# "definition": {
# "duration": 536000,
# "videoId": "pGNiQYo-EeWNvA632PIn3w",
# "optional": false,
# "assets": [
# "giAxucdaEeWJTQ5WTi8YJQ@1"
# ]
# }
# },
# "slug": "chto-izuchaiet-tieoriia-ighr",
# "timeCommitment": 536000
# }
# ],
# "slug": "vviedieniie-v-tieoriiu-ighr",
# "timeCommitment": 536000,
# "optional": false
# }
#
# Sample response:
#
# {
# "elements": [
# {
# "id": "giAxucdaEeWJTQ5WTi8YJQ",
# "typeName": "asset",
# "definition": {
# "name": "",
# "assetId": "Vq8hwsdaEeWGlA7xclFASw"
# }
# }
# ],
# "paging": null,
# "linked": null
# }
OPENCOURSE_ASSETS_URL = \
'https://api.coursera.org/api/openCourseAssets.v1/{id}'
# These asset ids are ids returned from OPENCOURSE_ASSETS_URL request:
# See example above.
#
# Sample response:
#
# {
# "elements": [
# {
# "id": "Vq8hwsdaEeWGlA7xclFASw",
# "name": "1_Strategic_Interactions.pdf",
# "typeName": "generic",
# "url": {
# "url": "<some url>",
# "expires": 1454371200000
# }
# }
# ],
# "paging": null,
# "linked": null
# }
OPENCOURSE_API_ASSETS_V1_URL = \
'https://api.coursera.org/api/assets.v1?ids={id}'
OPENCOURSE_ONDEMAND_COURSE_MATERIALS = \
'https://api.coursera.org/api/onDemandCourseMaterials.v1/?'\
'q=slug&slug={class_name}&includes=moduleIds%2ClessonIds%2CpassableItemGroups%2CpassableItemGroupChoices%2CpassableLessonElements%2CitemIds%2Ctracks'\
'&fields=moduleIds%2ConDemandCourseMaterialModules.v1(name%2Cslug%2Cdescription%2CtimeCommitment%2ClessonIds%2Coptional)%2ConDemandCourseMaterialLessons.v1(name%2Cslug%2CtimeCommitment%2CelementIds%2Coptional%2CtrackId)%2ConDemandCourseMaterialPassableItemGroups.v1(requiredPassedCount%2CpassableItemGroupChoiceIds%2CtrackId)%2ConDemandCourseMaterialPassableItemGroupChoices.v1(name%2Cdescription%2CitemIds)%2ConDemandCourseMaterialPassableLessonElements.v1(gradingWeight)%2ConDemandCourseMaterialItems.v1(name%2Cslug%2CtimeCommitment%2Ccontent%2CisLocked%2ClockableByItem%2CitemLockedReasonCode%2CtrackId)%2ConDemandCourseMaterialTracks.v1(passablesCount)'\
'&showLockedItems=true'
OPENCOURSE_ONDEMAND_COURSE_MATERIALS_V2 = \
'https://api.coursera.org/api/onDemandCourseMaterials.v2/?q=slug&slug={class_name}'\
'&includes=modules%2Clessons%2CpassableItemGroups%2CpassableItemGroupChoices%2CpassableLessonElements%2Citems%2Ctracks%2CgradePolicy&'\
'&fields=moduleIds%2ConDemandCourseMaterialModules.v1(name%2Cslug%2Cdescription%2CtimeCommitment%2ClessonIds%2Coptional%2ClearningObjectives)%2ConDemandCourseMaterialLessons.v1(name%2Cslug%2CtimeCommitment%2CelementIds%2Coptional%2CtrackId)%2ConDemandCourseMaterialPassableItemGroups.v1(requiredPassedCount%2CpassableItemGroupChoiceIds%2CtrackId)%2ConDemandCourseMaterialPassableItemGroupChoices.v1(name%2Cdescription%2CitemIds)%2ConDemandCourseMaterialPassableLessonElements.v1(gradingWeight%2CisRequiredForPassing)%2ConDemandCourseMaterialItems.v2(name%2Cslug%2CtimeCommitment%2CcontentSummary%2CisLocked%2ClockableByItem%2CitemLockedReasonCode%2CtrackId%2ClockedStatus%2CitemLockSummary)%2ConDemandCourseMaterialTracks.v1(passablesCount)'\
'&showLockedItems=true'
OPENCOURSE_ONDEMAND_SPECIALIZATIONS_V1 = \
'https://api.coursera.org/api/onDemandSpecializations.v1?q=slug'\
'&slug={class_name}&fields=courseIds,interchangeableCourseIds,launchedAt,'\
'logo,memberships,metadata,partnerIds,premiumExperienceVariant,'\
'onDemandSpecializationMemberships.v1(suggestedSessionSchedule),'\
'onDemandSpecializationSuggestedSchedule.v1(suggestedSessions),'\
'partners.v1(homeLink,name),courses.v1(courseProgress,description,'\
'membershipIds,startDate,v2Details,vcMembershipIds),v2Details.v1('\
'onDemandSessions,plannedLaunchDate),memberships.v1(grade,'\
'vcMembershipId),vcMemberships.v1(certificateCodeWithGrade)'\
'&includes=courseIds,memberships,partnerIds,'\
'onDemandSpecializationMemberships.v1(suggestedSessionSchedule),'\
'courses.v1(courseProgress,membershipIds,v2Details,vcMembershipIds),'\
'v2Details.v1(onDemandSessions)'
OPENCOURSE_ONDEMAND_COURSES_V1 = \
'https://api.coursera.org/api/onDemandCourses.v1?q=slug&slug={class_name}&'\
'includes=instructorIds%2CpartnerIds%2C_links&'\
'fields=brandingImage%2CcertificatePurchaseEnabledAt%2Cpartners.v1(squareLogo%2CrectangularLogo)%2Cinstructors.v1(fullName)%2CoverridePartnerLogos%2CsessionsEnabledAt%2CdomainTypes%2CpremiumExperienceVariant%2CisRestrictedMembership'
ABOUT_URL = ('https://api.coursera.org/api/catalog.v1/courses?'
'fields=largeIcon,photo,previewLink,shortDescription,smallIcon,'
'smallIconHover,universityLogo,universityLogoSt,video,videoId,'
'aboutTheCourse,targetAudience,faq,courseSyllabus,courseFormat,'
'suggestedReadings,instructor,estimatedClassWorkload,'
'aboutTheInstructor,recommendedBackground,subtitleLanguagesCsv&'
'q=search&query={class_name}')
AUTH_REDIRECT_URL = ('https://class.coursera.org/{class_name}'
'/auth/auth_redirector?type=login&subtype=normal')
# Sample URL:
#
# https://api.coursera.org/api/onDemandPeerAssignmentInstructions.v1/?q=latest&userId=4958&courseId=RcnRZHHtEeWxvQr3acyajw&itemId=2yTvX&includes=gradingMetadata%2CreviewSchemas%2CsubmissionSchemas&fields=instructions%2ConDemandPeerAssignmentGradingMetadata.v1(requiredAuthoredReviewCount%2CisMentorGraded%2CassignmentDetails)%2ConDemandPeerReviewSchemas.v1(reviewSchema)%2ConDemandPeerSubmissionSchemas.v1(submissionSchema)
#
# Sample response:
#
# {
# "elements": [
# {
# "instructions": {
# "introduction": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>Ваше первое задание заключается в установке Python и библиотек..</text></li></list></co-content>"
# }
# },
# "sections": [
# {
# "typeId": "unknown",
# "title": "Review criteria",
# "content": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>В результате работы вы установите на компьютер Python и библиотеки, необходимые для дальнейшего прохождения курса..</text></co-content>"
# }
# }
# }
# ]
# },
# "id": "4958~RcnRZHHtEeWxvQr3acyajw~2yTvX~8x7Qhs66EeW2Tw715xhIPQ@13"
# }
# ],
# "paging": {},
# "linked": {
# "onDemandPeerSubmissionSchemas.v1": [
# {
# "submissionSchema": {
# "parts": [
# {
# "details": {
# "typeName": "fileUpload",
# "definition": {
# "required": false
# }
# },
# "id": "_fcfP3bPT5W4pkfkshmUAQ",
# "prompt": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>Загрузите скриншот №1.</text></co-content>"
# }
# }
# },
# {
# "details": {
# "typeName": "fileUpload",
# "definition": {
# "required": false
# }
# },
# "id": "92ea4b4e-3492-41eb-ee32-2624ee807bd3",
# "prompt": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>Загрузите скриншот №2.</text></co-content>"
# }
# }
# }
# ]
# },
# "id": "4958~RcnRZHHtEeWxvQr3acyajw~2yTvX~8x7Qhs66EeW2Tw715xhIPQ@13"
# }
# ],
# "onDemandPeerAssignmentGradingMetadata.v1": [
# {
# "assignmentDetails": {
# "typeName": "phased",
# "definition": {
# "receivedReviewCutoffs": {
# "count": 3
# },
# "passingFraction": 0.8
# }
# },
# "requiredAuthoredReviewCount": 3,
# "isMentorGraded": false,
# "id": "4958~RcnRZHHtEeWxvQr3acyajw~2yTvX~8x7Qhs66EeW2Tw715xhIPQ@13"
# }
# ],
# "onDemandPeerReviewSchemas.v1": []
# }
# }
#
# This URL is used to retrieve "phasedPeer" typename instructions' contents
OPENCOURSE_PEER_ASSIGNMENT_INSTRUCTIONS = (
'https://api.coursera.org/api/onDemandPeerAssignmentInstructions.v1/?'
'q=latest&userId={user_id}&courseId={course_id}&itemId={element_id}&'
'includes=gradingMetadata%2CreviewSchemas%2CsubmissionSchemas&'
'fields=instructions%2ConDemandPeerAssignmentGradingMetadata.v1(requiredAuthoredReviewCount%2CisMentorGraded%2CassignmentDetails)%2ConDemandPeerReviewSchemas.v1(reviewSchema)%2ConDemandPeerSubmissionSchemas.v1(submissionSchema)')
#POST_OPENCOURSE_API_QUIZ_SESSION = 'https://api.coursera.org/api/opencourse.v1/user/4958/course/text-mining/item/7OQHc/quiz/session'
# Sample response:
#
# {
# "contentResponseBody": {
# "session": {
# "id": "opencourse~bVgqTevEEeWvGQrWsIkLlw:4958:BiNDdOvPEeWAkwqbKEEh3w@13:1468773901987@1",
# "open": true
# }
# },
# "itemProgress": {
# "contentVersionedId": "BiNDdOvPEeWAkwqbKEEh3w@13",
# "timestamp": 1468774458435,
# "progressState": "Started"
# }
# }
POST_OPENCOURSE_API_QUIZ_SESSION = 'https://api.coursera.org/api/opencourse.v1/user/{user_id}/course/{class_name}/item/{quiz_id}/quiz/session'
#POST_OPENCOURSE_API_QUIZ_SESSION_GET_STATE = 'https://api.coursera.org/api/opencourse.v1/user/4958/course/text-mining/item/7OQHc/quiz/session/opencourse~bVgqTevEEeWvGQrWsIkLlw:4958:BiNDdOvPEeWAkwqbKEEh3w@13:1468773901987@1/action/getState?autoEnroll=false'
# Sample response:
#
# {
# "contentResponseBody": {
# "return": {
# "questions": [
# {
# "id": "89424f6873744b5c0b92da2936327bb4",
# "question": {
# "type": "mcq"
# },
# "variant": {
# "definition": {
# "prompt": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text hasMath=\"true\">You are given a unigram language model $$\\theta$$ distributed over a vocabulary set $$V$$ composed of <strong>only</strong> 4 words: “the”, “machine”, “learning”, and “data”. The distribution of $$\\theta$$ is given in the table below:</text><table rows=\"5\" columns=\"2\"><tr><th><text>$$w$$</text></th><th><text>$$P(w|\\theta)$$</text></th></tr><tr><td><text>machine</text></td><td><text>0.1</text></td></tr><tr><td><text>learning</text></td><td><text>0.2</text></td></tr><tr><td><text>data</text></td><td><text>0.3</text></td></tr><tr><td><text>the</text></td><td><text>0.4</text></td></tr></table><text hasMath=\"true\"> $$P(\\text{“machine learning”}|\\theta) = $$</text></co-content>"
# }
# },
# "options": [
# {
# "id": "717bd78dec2b817bed4b2d6096cbc9fc",
# "display": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>0.004</text></co-content>"
# }
# }
# },
# {
# "id": "a06c614cbb15b4e54212296b16fc4e62",
# "display": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>0.2</text></co-content>"
# }
# }
# },
# {
# "id": "029fe0fee932d6ad260f292dd05dc5c9",
# "display": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>0.3</text></co-content>"
# }
# }
# },
# {
# "id": "b6af6403d4ddde3b1e58599c12b6397a",
# "display": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>0.02</text></co-content>"
# }
# }
# }
# ]
# },
# "detailLevel": "Full"
# },
# "weightedScoring": {
# "maxScore": 1
# },
# "isSubmitAllowed": true
# }
# ],
# "evaluation": null
# }
# },
# "itemProgress": {
# "contentVersionedId": "BiNDdOvPEeWAkwqbKEEh3w@13",
# "timestamp": 1468774458894,
# "progressState": "Started"
# }
# }
#
POST_OPENCOURSE_API_QUIZ_SESSION_GET_STATE = 'https://api.coursera.org/api/opencourse.v1/user/{user_id}/course/{class_name}/item/{quiz_id}/quiz/session/{session_id}/action/getState?autoEnroll=false'
#POST_OPENCOURSE_ONDEMAND_EXAM_SESSIONS = 'https://api.coursera.org/api/onDemandExamSessions.v1/-N44X0IJEeWpogr5ZO8qxQ~YV0W4~10!~1467462079068/actions?includes=gradingAttempts'
# Sample response:
#
# {
# "elements": [
# {
# "id": 0,
# "result": {
# "questions": [
# {
# "id": "8uUpMzm_EeaetxLgjw7H8Q@0",
# "question": {
# "type": "mcq"
# },
# "variant": {
# "definition": {
# "prompt": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>\n\nSuppose you’d like to perform nearest neighbor search from the following set of houses:</text><table rows=\"5\" columns=\"4\"><tr><td><text>\n\n\n\n\n\n</text></td><td><text>\n\n\nPrice (USD)</text></td><td><text>\n\n\nNumber of rooms</text></td><td><text>\n\n\nLot size (sq. ft.)</text></td></tr><tr><td><text>\n\n\nHouse 1</text></td><td><text>\n\n\n500000</text></td><td><text>\n\n\n3</text></td><td><text>\n\n\n1840</text></td></tr><tr><td><text>\n\n\nHouse 2</text></td><td><text>\n\n\n350000</text></td><td><text>\n\n\n2</text></td><td><text>\n\n\n1600</text></td></tr><tr><td><text>House 3</text></td><td><text>\n\n600000</text></td><td><text>\n\n4</text></td><td><text>\n\n2000</text></td></tr><tr><td><text>House 4</text></td><td><text>\n400000</text></td><td><text>\n2</text></td><td><text>\n1900</text></td></tr></table><text>\n\nSince the features come in wildly different scales, you decide to use scaled Euclidean distances. Choose the set of weights a_i (as presented in the video lecture) that properly incorporates the relative amount of variation of the feature.</text><text>Note: </text><code language=\"plain_text\">a_price = weight assigned to price (USD)\na_room = weight assigned to number of rooms\na_lot = weight assigned to lot size (sq.ft.)</code></co-content>"
# }
# },
# "options": [
# {
# "id": "0.9109180361318947",
# "display": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>a_price = 1, a_room = 1, a_lot = 1</text></co-content>"
# }
# }
# },
# {
# "id": "0.11974743029080992",
# "display": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>a_price = 1, a_room = 1, a_lot = 1e-6</text></co-content>"
# }
# }
# },
# {
# "id": "0.8214165539451299",
# "display": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>a_price = 1e-10, a_room = 1, a_lot = 1e-6</text></co-content>"
# }
# }
# },
# {
# "id": "0.6784789645868041",
# "display": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>a_price = 1e-5, a_room = 1, a_lot = 1e-3</text></co-content>"
# }
# }
# },
# {
# "id": "0.9664001374497642",
# "display": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>a_price = 1e5, a_room = 1, a_lot = 1e3</text></co-content>"
# }
# }
# }
# ]
# },
# "detailLevel": "Full"
# },
# "weightedScoring": {
# "maxScore": 1
# },
# "isSubmitAllowed": true
# },
# {
# "id": "jeVDBjnNEeaetxLgjw7H8Q@0",
# "question": {
# "type": "singleNumeric"
# },
# "variant": {
# "definition": {
# "prompt": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>\n\nConsider the following two sentences.\n</text><list bulletType=\"bullets\"><li><text>Sentence 1: The quick brown fox jumps over the lazy dog.\n</text></li><li><text>Sentence 2: A quick brown dog outpaces a quick fox.\n</text></li></list><text>\n\nCompute the Euclidean distance using word counts. Round your answer to 3 decimal places.</text><text>Note. To compute word counts, turn all words into lower case and strip all punctuation, so that \"The\" and \"the\" are counted as the same token.</text></co-content>"
# }
# }
# },
# "detailLevel": "Full"
# },
# "weightedScoring": {
# "maxScore": 1
# },
# "isSubmitAllowed": true
# },
# {
# "id": "-tI-EjnNEeaPCw5NUSdt1w@0",
# "question": {
# "type": "singleNumeric"
# },
# "variant": {
# "definition": {
# "prompt": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>Refer back to the two sentences given in Question 2 to answer the following:</text><text>Recall that we can use cosine similarity to define a distance. We call that distance cosine distance. </text><text>Compute the <strong>cosine distance</strong> using word counts. Round your answer to 3 decimal places.\n</text><text>Note: To compute word counts, turn all words into lower case and strip all punctuation, so that \"The\" and \"the\" are counted as the same token.</text><text>Hint. Recall that we can use cosine similarity to define a distance. We call that distance cosine distance.</text></co-content>"
# }
# }
# },
# "detailLevel": "Full"
# },
# "weightedScoring": {
# "maxScore": 1
# },
# "isSubmitAllowed": true
# }
# ],
# "evaluation": null
# }
# }
# ],
# "paging": null,
# "linked": {
# "gradingAttempts.v1": []
# }
# }
#
# Request payload:
# {"courseId":"-N44X0IJEeWpogr5ZO8qxQ","itemId":"YV0W4"}
#
#POST_OPENCOURSE_ONDEMAND_EXAM_SESSIONS = 'https://api.coursera.org/api/onDemandExamSessions.v1/-N44X0IJEeWpogr5ZO8qxQ~YV0W4~10!~1467462079068/actions?includes=gradingAttempts'
# Response for this request is empty. Result (session_id) should be taken
# either from Location header or from X-Coursera-Id header.
#
# Request payload:
# {"courseId":"-N44X0IJEeWpogr5ZO8qxQ","itemId":"YV0W4"}
POST_OPENCOURSE_ONDEMAND_EXAM_SESSIONS = 'https://api.coursera.org/api/onDemandExamSessions.v1'
# Sample response:
# {
# "elements": [
# {
# "id": 0,
# "result": {
# "questions": [
# {
# "id": "8uUpMzm_EeaetxLgjw7H8Q@0",
# "question": {
# "type": "mcq"
# },
# "variant": {
# "definition": {
# "prompt": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>\n\nSuppose you’d like to perform nearest neighbor search from the following set of houses:</text><table rows=\"5\" columns=\"4\"><tr><td><text>\n\n\n\n\n\n</text></td><td><text>\n\n\nPrice (USD)</text></td><td><text>\n\n\nNumber of rooms</text></td><td><text>\n\n\nLot size (sq. ft.)</text></td></tr><tr><td><text>\n\n\nHouse 1</text></td><td><text>\n\n\n500000</text></td><td><text>\n\n\n3</text></td><td><text>\n\n\n1840</text></td></tr><tr><td><text>\n\n\nHouse 2</text></td><td><text>\n\n\n350000</text></td><td><text>\n\n\n2</text></td><td><text>\n\n\n1600</text></td></tr><tr><td><text>House 3</text></td><td><text>\n\n600000</text></td><td><text>\n\n4</text></td><td><text>\n\n2000</text></td></tr><tr><td><text>House 4</text></td><td><text>\n400000</text></td><td><text>\n2</text></td><td><text>\n1900</text></td></tr></table><text>\n\nSince the features come in wildly different scales, you decide to use scaled Euclidean distances. Choose the set of weights a_i (as presented in the video lecture) that properly incorporates the relative amount of variation of the feature.</text><text>Note: </text><code language=\"plain_text\">a_price = weight assigned to price (USD)\na_room = weight assigned to number of rooms\na_lot = weight assigned to lot size (sq.ft.)</code></co-content>"
# }
# },
# "options": [
# {
# "id": "0.9109180361318947",
# "display": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>a_price = 1, a_room = 1, a_lot = 1</text></co-content>"
# }
# }
# },
# {
# "id": "0.11974743029080992",
# "display": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>a_price = 1, a_room = 1, a_lot = 1e-6</text></co-content>"
# }
# }
# },
# {
# "id": "0.8214165539451299",
# "display": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>a_price = 1e-10, a_room = 1, a_lot = 1e-6</text></co-content>"
# }
# }
# },
# {
# "id": "0.6784789645868041",
# "display": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>a_price = 1e-5, a_room = 1, a_lot = 1e-3</text></co-content>"
# }
# }
# },
# {
# "id": "0.9664001374497642",
# "display": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>a_price = 1e5, a_room = 1, a_lot = 1e3</text></co-content>"
# }
# }
# }
# ]
# },
# "detailLevel": "Full"
# },
# "weightedScoring": {
# "maxScore": 1
# },
# "isSubmitAllowed": true
# },
# {
# "id": "jeVDBjnNEeaetxLgjw7H8Q@0",
# "question": {
# "type": "singleNumeric"
# },
# "variant": {
# "definition": {
# "prompt": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>\n\nConsider the following two sentences.\n</text><list bulletType=\"bullets\"><li><text>Sentence 1: The quick brown fox jumps over the lazy dog.\n</text></li><li><text>Sentence 2: A quick brown dog outpaces a quick fox.\n</text></li></list><text>\n\nCompute the Euclidean distance using word counts. Round your answer to 3 decimal places.</text><text>Note. To compute word counts, turn all words into lower case and strip all punctuation, so that \"The\" and \"the\" are counted as the same token.</text></co-content>"
# }
# }
# },
# "detailLevel": "Full"
# },
# "weightedScoring": {
# "maxScore": 1
# },
# "isSubmitAllowed": true
# },
# {
# "id": "-tI-EjnNEeaPCw5NUSdt1w@0",
# "question": {
# "type": "singleNumeric"
# },
# "variant": {
# "definition": {
# "prompt": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>Refer back to the two sentences given in Question 2 to answer the following:</text><text>Recall that we can use cosine similarity to define a distance. We call that distance cosine distance. </text><text>Compute the <strong>cosine distance</strong> using word counts. Round your answer to 3 decimal places.\n</text><text>Note: To compute word counts, turn all words into lower case and strip all punctuation, so that \"The\" and \"the\" are counted as the same token.</text><text>Hint. Recall that we can use cosine similarity to define a distance. We call that distance cosine distance.</text></co-content>"
# }
# }
# },
# "detailLevel": "Full"
# },
# "weightedScoring": {
# "maxScore": 1
# },
# "isSubmitAllowed": true
# },
# {
# "id": "LGECRDnOEeaetxLgjw7H8Q@0",
# "question": {
# "type": "mcq"
# },
# "variant": {
# "definition": {
# "prompt": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>(True/False) For positive features, cosine similarity is always between 0 and 1.</text></co-content>"
# }
# },
# "options": [
# {
# "id": "0.838238929639803",
# "display": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>True</text></co-content>"
# }
# }
# },
# {
# "id": "0.9654190569725087",
# "display": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>False</text></co-content>"
# }
# }
# }
# ]
# },
# "detailLevel": "Full"
# },
# "weightedScoring": {
# "maxScore": 1
# },
# "isSubmitAllowed": true
# },
# {
# "id": "N62eSDnOEea5PAq35BZMoQ@0",
# "question": {
# "type": "mcq"
# },
# "variant": {
# "definition": {
# "prompt": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>\n\nUsing the formula for TF-IDF presented in the lecture, complete the following sentence:</text><text>A word is assigned a zero TF-IDF weight when it appears in ____ documents. (N: number of documents in the corpus)</text></co-content>"
# }
# },
# "options": [
# {
# "id": "0.10877084920366831",
# "display": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>N - 1</text></co-content>"
# }
# }
# },
# {
# "id": "0.29922629273211787",
# "display": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>N/2</text></co-content>"
# }
# }
# },
# {
# "id": "0.69796593807345",
# "display": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>N</text></co-content>"
# }
# }
# },
# {
# "id": "0.6731572688278926",
# "display": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>0.1*N</text></co-content>"
# }
# }
# },
# {
# "id": "0.8467992755507772",
# "display": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>100</text></co-content>"
# }
# }
# }
# ]
# },
# "detailLevel": "Full"
# },
# "weightedScoring": {
# "maxScore": 1
# },
# "isSubmitAllowed": true
# },
# {
# "id": "TuHdkjnOEeaPCw5NUSdt1w@0",
# "question": {
# "type": "mcq"
# },
# "variant": {
# "definition": {
# "prompt": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>\n\nWhich of the following does <strong>not </strong>describe the word count document representation?</text></co-content>"
# }
# },
# "options": [
# {
# "id": "0.3821039264467949",
# "display": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>Ignores the order of the words</text></co-content>"
# }
# }
# },
# {
# "id": "0.3470767421220087",
# "display": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>Assigns a high score to a frequently occurring word</text></co-content>"
# }
# }
# },
# {
# "id": "0.3341840649172314",
# "display": {
# "typeName": "cml",
# "definition": {
# "dtdId": "assess/1",
# "value": "<co-content><text>Penalizes words that appear in every document</text></co-content>"
# }
# }
# }
# ]
# },
# "detailLevel": "Full"
# },
# "weightedScoring": {
# "maxScore": 1
# },
# "isSubmitAllowed": true
# }
# ],
# "evaluation": null
# }
# }
# ],
# "paging": null,
# "linked": {
# "gradingAttempts.v1": []
# }
# }
#
# Request payload:
# {"name":"getState","argument":[]}
POST_OPENCOURSE_ONDEMAND_EXAM_SESSIONS_GET_STATE = 'https://api.coursera.org/api/onDemandExamSessions.v1/{session_id}/actions?includes=gradingAttempts'
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
# define a per-user cache folder
if os.name == "posix": # pragma: no cover
import pwd
_USER = pwd.getpwuid(os.getuid())[0]
else:
_USER = getpass.getuser()
PATH_CACHE = os.path.join(tempfile.gettempdir(), _USER + "_coursera_dl_cache")
PATH_COOKIES = os.path.join(PATH_CACHE, 'cookies')
WINDOWS_UNC_PREFIX = u'\\\\?\\'
#: This extension is used to save contents of supplementary instructions.
IN_MEMORY_EXTENSION = 'html'
#: This marker is added in front of a URL when supplementary instructions
#: are passed from parser to downloader. URL field fill contain the data
#: that will be stored to a file. The marker should be removed from URL
#: field first.
IN_MEMORY_MARKER = '#inmemory#'
#: These are hard limits for format (file extension) and
#: title (file name) lengths to avoid too long file names
#: (longer than 255 characters)
FORMAT_MAX_LENGTH = 20
TITLE_MAX_LENGTH = 200
#: CSS that is usen to prettify instructions
INSTRUCTIONS_HTML_INJECTION_PRE = '''
<style>
body {
padding: 50px 85px 50px 85px;
}
table th, table td {
border: 1px solid #e0e0e0;
padding: 5px 20px;
text-align: left;
}
input {
margin: 10px;
}
}
th {
font-weight: bold;
}
td, th {
display: table-cell;
vertical-align: inherit;
}
img {
height: auto;
max-width: 100%;
}
pre {
display: block;
margin: 20px;
background: #424242;
color: #fff;
font-size: 13px;
white-space: pre-wrap;
padding: 9.5px;
margin: 0 0 10px;
border: 1px solid #ccc;
}
</style>
<script type="text/javascript" async
src="'''
INSTRUCTIONS_HTML_MATHJAX_URL = 'https://cdn.mathjax.org/mathjax/latest/MathJax.js'
INSTRUCTIONS_HTML_INJECTION_AFTER = '''?config=TeX-AMS-MML_HTMLorMML">
</script>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
tex2jax: {
inlineMath: [ ['$$','$$'], ['$','$'] ],
displayMath: [ ["\\\\[","\\\\]"] ],
processEscapes: true
}
});
</script>
'''
# The following url is the root url (tree) for a Coursera Course
OPENCOURSE_NOTEBOOK_DESCRIPTIONS = "https://hub.coursera-notebooks.org/hub/coursera_login?token={authId}&next=/"
OPENCOURSE_NOTEBOOK_LAUNCHES = "https://api.coursera.org/api/onDemandNotebookWorkspaceLaunches.v1/?fields=authorizationId%2CcontentPath%2CuseLegacySystem"
OPENCOURSE_NOTEBOOK_TREE = "https://hub.coursera-notebooks.org/user/{jupId}/api/contents/{path}?type=directory&_={timestamp}"
OPENCOURSE_NOTEBOOK_DOWNLOAD = "https://hub.coursera-notebooks.org/user/{jupId}/files/{path}?download=1"
| 42,347
|
Python
|
.py
| 956
| 42.609833
| 1,345
| 0.522034
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,533
|
test_api.py
|
coursera-dl_coursera-dl/coursera/test/test_api.py
|
"""
Test APIs.
"""
from os.path import expanduser
import json
import pytest
from mock import patch, Mock
from coursera import api
from coursera import define
from coursera.test.utils import slurp_fixture, links_to_plain_text
from coursera.utils import BeautifulSoup
from requests.exceptions import HTTPError
from requests import Response
@pytest.fixture
def course():
course = api.CourseraOnDemand(
session=Mock(cookies={}), course_id='0', course_name='test_course')
return course
@patch('coursera.api.get_page')
def test_extract_links_from_programming_http_error(get_page, course):
"""
This test checks that downloader skips locked programming assignments
instead of throwing an error. (Locked == returning 403 error code)
"""
locked_response = Response()
locked_response.status_code = define.HTTP_FORBIDDEN
get_page.side_effect = HTTPError('Mocked HTTP error',
response=locked_response)
assert None == course.extract_links_from_programming('0')
@patch('coursera.api.get_page')
def test_extract_links_from_exam_http_error(get_page, course):
"""
This test checks that downloader skips locked exams
instead of throwing an error. (Locked == returning 403 error code)
"""
locked_response = Response()
locked_response.status_code = define.HTTP_FORBIDDEN
get_page.side_effect = HTTPError('Mocked HTTP error',
response=locked_response)
assert None == course.extract_links_from_exam('0')
@patch('coursera.api.get_page')
def test_extract_links_from_supplement_http_error(get_page, course):
"""
This test checks that downloader skips locked supplements
instead of throwing an error. (Locked == returning 403 error code)
"""
locked_response = Response()
locked_response.status_code = define.HTTP_FORBIDDEN
get_page.side_effect = HTTPError('Mocked HTTP error',
response=locked_response)
assert None == course.extract_links_from_supplement('0')
@patch('coursera.api.get_page')
def test_extract_links_from_lecture_http_error(get_page, course):
"""
This test checks that downloader skips locked lectures
instead of throwing an error. (Locked == returning 403 error code)
"""
locked_response = Response()
locked_response.status_code = define.HTTP_FORBIDDEN
get_page.side_effect = HTTPError('Mocked HTTP error',
response=locked_response)
assert None == course.extract_links_from_lecture('fake_course_id', '0')
@patch('coursera.api.get_page')
def test_extract_links_from_quiz_http_error(get_page, course):
"""
This test checks that downloader skips locked quizzes
instead of throwing an error. (Locked == returning 403 error code)
"""
locked_response = Response()
locked_response.status_code = define.HTTP_FORBIDDEN
get_page.side_effect = HTTPError('Mocked HTTP error',
response=locked_response)
assert None == course.extract_links_from_quiz('0')
@patch('coursera.api.get_page')
def test_extract_references_poll_http_error(get_page, course):
"""
This test checks that downloader skips locked programming assignments
instead of throwing an error. (Locked == returning 403 error code)
"""
locked_response = Response()
locked_response.status_code = define.HTTP_FORBIDDEN
get_page.side_effect = HTTPError('Mocked HTTP error',
response=locked_response)
assert None == course.extract_references_poll()
@patch('coursera.api.get_page')
def test_extract_links_from_reference_http_error(get_page, course):
"""
This test checks that downloader skips locked resources
instead of throwing an error. (Locked == returning 403 error code)
"""
locked_response = Response()
locked_response.status_code = define.HTTP_FORBIDDEN
get_page.side_effect = HTTPError('Mocked HTTP error',
response=locked_response)
assert None == course.extract_links_from_reference('0')
@patch('coursera.api.get_page')
def test_extract_links_from_programming_immediate_instructions_http_error(
get_page, course):
"""
This test checks that downloader skips locked programming immediate instructions
instead of throwing an error. (Locked == returning 403 error code)
"""
locked_response = Response()
locked_response.status_code = define.HTTP_FORBIDDEN
get_page.side_effect = HTTPError('Mocked HTTP error',
response=locked_response)
assert (
None == course.extract_links_from_programming_immediate_instructions('0'))
@patch('coursera.api.get_page')
def test_ondemand_programming_supplement_no_instructions(get_page, course):
no_instructions = slurp_fixture(
'json/supplement-programming-no-instructions.json')
get_page.return_value = json.loads(no_instructions)
output = course.extract_links_from_programming('0')
assert {} == output
@patch('coursera.api.get_page')
@pytest.mark.parametrize(
"input_filename,expected_output", [
('peer-assignment-instructions-all.json', 'intro Review criteria section'),
('peer-assignment-instructions-no-title.json', 'intro section'),
('peer-assignment-instructions-only-introduction.json', 'intro'),
('peer-assignment-instructions-only-sections.json', 'Review criteria section'),
('peer-assignment-no-instructions.json', ''),
]
)
def test_ondemand_from_peer_assignment_instructions(
get_page, course, input_filename, expected_output):
instructions = slurp_fixture('json/%s' % input_filename)
get_page.return_value = json.loads(instructions)
output = course.extract_links_from_peer_assignment('0')
assert expected_output == links_to_plain_text(output)
@patch('coursera.api.get_page')
def test_ondemand_from_programming_immediate_instructions_no_instructions(
get_page, course):
no_instructions = slurp_fixture(
'json/supplement-programming-immediate-instructions-no-instructions.json')
get_page.return_value = json.loads(no_instructions)
output = course.extract_links_from_programming_immediate_instructions('0')
assert {} == output
@patch('coursera.api.get_page')
def test_ondemand_programming_supplement_empty_instructions(get_page, course):
empty_instructions = slurp_fixture(
'json/supplement-programming-empty-instructions.json')
get_page.return_value = json.loads(empty_instructions)
output = course.extract_links_from_programming('0')
# Make sure that SOME html content has been extracted, but remove
# it immediately because it's a hassle to properly prepare test input
# for it. FIXME later.
assert 'html' in output
del output['html']
assert {} == output
@patch('coursera.api.get_page')
def test_ondemand_programming_immediate_instructions_empty_instructions(
get_page, course):
empty_instructions = slurp_fixture(
'json/supplement-programming-immediate-instructions-empty-instructions.json')
get_page.return_value = json.loads(empty_instructions)
output = course.extract_links_from_programming_immediate_instructions('0')
# Make sure that SOME html content has been extracted, but remove
# it immediately because it's a hassle to properly prepare test input
# for it. FIXME later.
assert 'html' in output
del output['html']
assert {} == output
@patch('coursera.api.get_page')
def test_ondemand_programming_supplement_one_asset(get_page, course):
one_asset_tag = slurp_fixture('json/supplement-programming-one-asset.json')
one_asset_url = slurp_fixture('json/asset-urls-one.json')
asset_json = json.loads(one_asset_url)
get_page.side_effect = [json.loads(one_asset_tag),
json.loads(one_asset_url)]
expected_output = {'pdf': [(asset_json['elements'][0]['url'],
'statement-pca')]}
output = course.extract_links_from_programming('0')
# Make sure that SOME html content has been extracted, but remove
# it immediately because it's a hassle to properly prepare test input
# for it. FIXME later.
assert 'html' in output
del output['html']
assert expected_output == output
@patch('coursera.api.get_page')
def test_extract_references_poll(get_page, course):
"""
Test extracting course references.
"""
get_page.side_effect = [
json.loads(slurp_fixture('json/references-poll-reply.json'))
]
expected_output = json.loads(
slurp_fixture('json/references-poll-output.json'))
output = course.extract_references_poll()
assert expected_output == output
@patch('coursera.api.get_page')
def test_ondemand_programming_immediate_instructions_one_asset(get_page, course):
one_asset_tag = slurp_fixture(
'json/supplement-programming-immediate-instructions-one-asset.json')
one_asset_url = slurp_fixture('json/asset-urls-one.json')
asset_json = json.loads(one_asset_url)
get_page.side_effect = [json.loads(one_asset_tag),
json.loads(one_asset_url)]
expected_output = {'pdf': [(asset_json['elements'][0]['url'],
'statement-pca')]}
output = course.extract_links_from_programming_immediate_instructions('0')
# Make sure that SOME html content has been extracted, but remove
# it immediately because it's a hassle to properly prepare test input
# for it. FIXME later.
assert 'html' in output
del output['html']
assert expected_output == output
@patch('coursera.api.get_page')
def test_ondemand_programming_supplement_three_assets(get_page, course):
three_assets_tag = slurp_fixture(
'json/supplement-programming-three-assets.json')
three_assets_url = slurp_fixture('json/asset-urls-three.json')
get_page.side_effect = [json.loads(three_assets_tag),
json.loads(three_assets_url)]
expected_output = json.loads(slurp_fixture(
'json/supplement-three-assets-output.json'))
output = course.extract_links_from_programming('0')
output = json.loads(json.dumps(output))
# Make sure that SOME html content has been extracted, but remove
# it immediately because it's a hassle to properly prepare test input
# for it. FIXME later.
assert 'html' in output
del output['html']
assert expected_output == output
@patch('coursera.api.get_page')
def test_extract_links_from_lecture_assets_typename_asset(get_page, course):
open_course_assets_reply = slurp_fixture(
'json/supplement-open-course-assets-reply.json')
api_assets_v1_reply = slurp_fixture(
'json/supplement-api-assets-v1-reply.json')
get_page.side_effect = [json.loads(open_course_assets_reply),
json.loads(api_assets_v1_reply)]
expected_output = json.loads(slurp_fixture(
'json/supplement-extract-links-from-lectures-output.json'))
assets = ['giAxucdaEeWJTQ5WTi8YJQ']
output = course._extract_links_from_lecture_assets(assets)
output = json.loads(json.dumps(output))
assert expected_output == output
@patch('coursera.api.get_page')
def test_extract_links_from_lecture_assets_typname_url_and_asset(get_page, course):
"""
This test makes sure that _extract_links_from_lecture_assets grabs url
links both from typename == 'asset' and == 'url'.
"""
get_page.side_effect = [
json.loads(slurp_fixture(
'json/supplement-open-course-assets-typename-url-reply-1.json')),
json.loads(slurp_fixture(
'json/supplement-open-course-assets-typename-url-reply-2.json')),
json.loads(slurp_fixture(
'json/supplement-open-course-assets-typename-url-reply-3.json')),
json.loads(slurp_fixture(
'json/supplement-open-course-assets-typename-url-reply-4.json')),
json.loads(slurp_fixture(
'json/supplement-open-course-assets-typename-url-reply-5.json')),
]
expected_output = json.loads(slurp_fixture(
'json/supplement-extract-links-from-lectures-url-asset-output.json'))
assets = ['Yry0spSKEeW8oA5fR3afVQ',
'kMQyUZSLEeWj-hLVp2Pm8w',
'xkAloZmJEeWjYA4jOOgP8Q']
output = course._extract_links_from_lecture_assets(assets)
output = json.loads(json.dumps(output))
assert expected_output == output
@patch('coursera.api.get_page')
def test_list_courses(get_page, course):
"""
Test course listing method.
"""
get_page.side_effect = [
json.loads(slurp_fixture('json/list-courses-input.json'))
]
expected_output = json.loads(
slurp_fixture('json/list-courses-output.json'))
expected_output = expected_output['courses']
output = course.list_courses()
assert expected_output == output
@pytest.mark.parametrize(
"input_filename,output_filename,subtitle_language,video_id", [
('video-reply-1.json', 'video-output-1.json',
'en,zh-CN|zh-TW', "None"),
('video-reply-1.json', 'video-output-1-en.json',
'zh-TW', "None"),
('video-reply-1.json', 'video-output-1-en.json',
'en', "None"),
('video-reply-1.json', 'video-output-1-all.json',
'all', "None"),
('video-reply-1.json', 'video-output-1-all.json',
'zh-TW,all|zh-CN', "None"),
('video-reply-2.json', 'video-output-2.json',
'en,zh-CN|zh-TW', "None"),
]
)
def test_extract_subtitles_from_video_dom(input_filename, output_filename, subtitle_language, video_id):
video_dom = json.loads(slurp_fixture('json/%s' % input_filename))
expected_output = json.loads(slurp_fixture('json/%s' % output_filename))
course = api.CourseraOnDemand(
session=Mock(cookies={}), course_id='0', course_name='test_course')
actual_output = course._extract_subtitles_from_video_dom(
video_dom, subtitle_language, video_id)
actual_output = json.loads(json.dumps(actual_output))
assert actual_output == expected_output
@pytest.mark.parametrize(
"input_filename,output_filename", [
('empty-input.json', 'empty-output.txt'),
('answer-text-replaced-with-span-input.json',
'answer-text-replaced-with-span-output.txt'),
('question-type-textExactMatch-input.json',
'question-type-textExactMatch-output.txt'),
('question-type-regex-input.json', 'question-type-regex-output.txt'),
('question-type-mathExpression-input.json',
'question-type-mathExpression-output.txt'),
('question-type-checkbox-input.json', 'question-type-checkbox-output.txt'),
('question-type-mcq-input.json', 'question-type-mcq-output.txt'),
('question-type-singleNumeric-input.json',
'question-type-singleNumeric-output.txt'),
('question-type-reflect-input.json', 'question-type-reflect-output.txt'),
('question-type-mcqReflect-input.json',
'question-type-mcqReflect-output.txt'),
('question-type-unknown-input.json', 'question-type-unknown-output.txt'),
('multiple-questions-input.json', 'multiple-questions-output.txt'),
]
)
def test_quiz_exam_to_markup_converter(input_filename, output_filename):
quiz_json = json.loads(slurp_fixture(
'json/quiz-to-markup/%s' % input_filename))
expected_output = slurp_fixture(
'json/quiz-to-markup/%s' % output_filename).strip()
converter = api.QuizExamToMarkupConverter(session=None)
actual_output = converter(quiz_json).strip()
# print('>%s<' % expected_output)
# print('>%s<' % actual_output)
assert actual_output == expected_output
class TestMarkupToHTMLConverter:
def _p(self, html):
return BeautifulSoup(html).prettify()
STYLE = None
def setup_method(self, test_method):
self.STYLE = self._p(
"".join([define.INSTRUCTIONS_HTML_INJECTION_PRE,
define.INSTRUCTIONS_HTML_MATHJAX_URL,
define.INSTRUCTIONS_HTML_INJECTION_AFTER])
)
self.markup_to_html = api.MarkupToHTMLConverter(session=None)
ALTERNATIVE_MATHJAX_CDN = "https://alternative/mathjax/cdn.js"
self.STYLE_WITH_ALTER = self._p(
"".join([define.INSTRUCTIONS_HTML_INJECTION_PRE,
ALTERNATIVE_MATHJAX_CDN,
define.INSTRUCTIONS_HTML_INJECTION_AFTER])
)
self.markup_to_html_with_alter_mjcdn = api.MarkupToHTMLConverter(
session=None, mathjax_cdn_url=ALTERNATIVE_MATHJAX_CDN)
def test_empty(self):
output = self.markup_to_html("")
output_with_alter_mjcdn = self.markup_to_html_with_alter_mjcdn("")
markup = """
<meta charset="UTF-8"/>
"""
assert self._p(markup) + self.STYLE == output
assert self._p(markup) + \
self.STYLE_WITH_ALTER == output_with_alter_mjcdn
def test_replace_text_tag(self):
markup = """
<co-content>
<text>
Test<text>Nested</text>
</text>
<text>
Test2
</text>
</co-content>
"""
result = """
<meta charset="UTF-8"/>
<co-content>
<p>
Test<p>Nested</p>
</p>
<p>
Test2
</p>
</co-content>\n
"""
output = self.markup_to_html(markup)
output_with_alter_mjcdn = self.markup_to_html_with_alter_mjcdn(markup)
assert self._p(result) + self.STYLE == output
assert self._p(result) + \
self.STYLE_WITH_ALTER == output_with_alter_mjcdn
def test_replace_heading(self):
output = self.markup_to_html("""
<co-content>
<heading level="1">Text</heading>
<heading level="2">Text</heading>
<heading level="3">Text</heading>
<heading level="4">Text</heading>
<heading level="5">Text</heading>
<heading >Text</heading>
</co-content>
""")
assert self._p("""
<meta charset="UTF-8"/>
<co-content>
<h1 level="1">Text</h1>
<h2 level="2">Text</h2>
<h3 level="3">Text</h3>
<h4 level="4">Text</h4>
<h5 level="5">Text</h5>
<h1>Text</h1>
</co-content>\n
""") + self.STYLE == output
def test_replace_code(self):
output = self.markup_to_html("""
<co-content>
<code>Text</code>
<code>Text</code>
</co-content>
""")
assert self._p("""
<meta charset="UTF-8"/>
<co-content>
<pre>Text</pre>
<pre>Text</pre>
</co-content>\n
""") + self.STYLE == output
def test_replace_list(self):
output = self.markup_to_html("""
<co-content>
<list bullettype="numbers">Text</list>
<list bullettype="bullets">Text</list>
</co-content>
""")
assert self._p("""
<meta charset="UTF-8"/>
<co-content>
<ol bullettype="numbers">Text</ol>
<ul bullettype="bullets">Text</ul>
</co-content>\n
""") + self.STYLE == output
@patch('coursera.api.AssetRetriever')
def test_replace_images(self, mock_asset_retriever):
replies = {
'nVhIAj61EeaGyBLfiQeo_w': Mock(data=b'a', content_type='image/png'),
'vdqUTz61Eea_CQ5dfWSAjQ': Mock(data=b'b', content_type='image/png'),
'nodata': Mock(data=None, content_type='image/png')
}
mock_asset_retriever.__call__ = Mock(return_value=None)
mock_asset_retriever.__getitem__ = Mock(
side_effect=replies.__getitem__)
self.markup_to_html._asset_retriever = mock_asset_retriever
output = self.markup_to_html("""
<co-content>
<text>\n\n</text>
<img assetId=\"nVhIAj61EeaGyBLfiQeo_w\" alt=\"\"/>
<text>\n\n</text>
<img assetId=\"vdqUTz61Eea_CQ5dfWSAjQ\" alt=\"\"/>
<text>\n\n</text>
</co-content>
""")
assert self._p("""
<meta charset="UTF-8"/>
<co-content>
<p></p>
<img alt="" assetid="nVhIAj61EeaGyBLfiQeo_w" src="data:image/png;base64,YQ=="/>
<p></p>
<img alt="" assetid="vdqUTz61Eea_CQ5dfWSAjQ" src="data:image/png;base64,Yg=="/>
<p></p>
</co-content>\n
""") + self.STYLE == output
@patch('coursera.api.AssetRetriever')
def test_replace_audios(self, mock_asset_retriever):
replies = {
'aWTK9sYwEeW7AxLLCrgDQQ': Mock(data=b'a', content_type='audio/mpeg'),
'bWTK9sYwEeW7AxLLCrgDQQ': Mock(data=b'b', content_type='unknown')
}
mock_asset_retriever.__call__ = Mock(return_value=None)
mock_asset_retriever.__getitem__ = Mock(
side_effect=replies.__getitem__)
self.markup_to_html._asset_retriever = mock_asset_retriever
output = self.markup_to_html("""
<co-content>
<asset id=\"aWTK9sYwEeW7AxLLCrgDQQ\" name=\"M111\" extension=\"mp3\" assetType=\"audio\"/>
<asset id=\"bWTK9sYwEeW7AxLLCrgDQQ\" name=\"M112\" extension=\"mp3\" assetType=\"unknown\"/>
</co-content>
""")
assert self._p("""
<meta charset="UTF-8"/>
<co-content>
<asset assettype="audio" extension="mp3" id="aWTK9sYwEeW7AxLLCrgDQQ" name="M111">
</asset>
<audio controls="">
Your browser does not support the audio element.
<source src="data:audio/mpeg;base64,YQ==" type="audio/mpeg">
</source>
</audio>
<asset assettype="unknown" extension="mp3" id="bWTK9sYwEeW7AxLLCrgDQQ" name="M112">
</asset>
</co-content>\n
""") + self.STYLE == output
def test_quiz_converter():
pytest.skip()
quiz_to_markup = api.QuizExamToMarkupConverter(session=None)
markup_to_html = api.MarkupToHTMLConverter(session=None)
quiz_data = json.load(open('quiz.json'))['contentResponseBody']['return']
result = markup_to_html(quiz_to_markup(quiz_data))
# from ipdb import set_trace; set_trace(context=20)
print('RESULT', result)
with open('quiz.html', 'w') as file:
file.write(result)
def test_quiz_converter_all():
pytest.skip()
import os
from coursera.coursera_dl import get_session
from coursera.cookies import login
session = None
session = get_session()
quiz_to_markup = api.QuizExamToMarkupConverter(session=session)
markup_to_html = api.MarkupToHTMLConverter(session=session)
path = 'quiz_json'
for filename in ['quiz-audio.json']: # os.listdir(path):
# for filename in ['all_question_types.json']:
# if 'YV0W4' not in filename:
# continue
# if 'QVHj1' not in filename:
# continue
#quiz_data = json.load(open('quiz.json'))['contentResponseBody']['return']
current = os.path.join(path, filename)
print(current)
quiz_data = json.load(open(current))
result = markup_to_html(quiz_to_markup(quiz_data))
# from ipdb import set_trace; set_trace(context=20)
# print('RESULT', result)
with open('quiz_html/' + filename + '.html', 'w') as f:
f.write(result)
def create_session():
from coursera.coursera_dl import get_session
from coursera.credentials import get_credentials
from coursera.cookies import login
session = get_session()
username, password = get_credentials(netrc=expanduser('~/.netrc'))
login(session, username, password)
return session
@patch('coursera.api.get_page')
@patch('coursera.api.get_reply')
def test_asset_retriever(get_reply, get_page):
reply = json.loads(slurp_fixture('json/asset-retriever/assets-reply.json'))
get_page.side_effect = [reply]
get_reply.side_effect = [Mock(status_code=200, content='<...>',
headers=Mock(get=Mock(return_value='image/png')))] * 4
asset_ids = ['bWTK9sYwEeW7AxLLCrgDQQ',
'VceKeChKEeaOMw70NkE3iw',
'VcmGXShKEea4ehL5RXz3EQ',
'vdqUTz61Eea_CQ5dfWSAjQ']
expected_output = [
api.Asset(id="bWTK9sYwEeW7AxLLCrgDQQ", name="M111.mp3", type_name="audio",
url="url4", content_type="image/png", data="<...>"),
api.Asset(id="VceKeChKEeaOMw70NkE3iw", name="09_graph_decomposition_problems_1.pdf",
type_name="pdf", url="url7", content_type="image/png", data="<...>"),
api.Asset(id="VcmGXShKEea4ehL5RXz3EQ", name="09_graph_decomposition_starter_files_1.zip",
type_name="generic", url="url2", content_type="image/png", data="<...>"),
api.Asset(id="vdqUTz61Eea_CQ5dfWSAjQ", name="Capture.PNG",
type_name="image", url="url9", content_type="image/png", data="<...>"),
]
retriever = api.AssetRetriever(session=None)
actual_output = retriever(asset_ids)
assert expected_output == actual_output
def test_debug_asset_retriever():
pytest.skip()
asset_ids = ['bWTK9sYwEeW7AxLLCrgDQQ',
'bXCx18YwEeWicwr5JH8fgw',
'bX9X18YwEeW7AxLLCrgDQQ',
'bYHvf8YwEeWFNA5XwZEiOw',
'tZmigMYxEeWFNA5XwZEiOw']
asset_ids = asset_ids[0:5]
more = ['VceKeChKEeaOMw70NkE3iw',
'VcmGXShKEea4ehL5RXz3EQ']
print('session')
session = create_session()
retriever = api.AssetRetriever(session)
#assets = retriever.get(asset_ids)
assets = retriever(more)
print(assets)
| 25,872
|
Python
|
.py
| 583
| 36.391081
| 104
| 0.647353
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,534
|
test_commandline.py
|
coursera-dl_coursera-dl/coursera/test/test_commandline.py
|
"""
Test command line module.
"""
from coursera import commandline
from coursera.test import test_workflow
def test_class_name_arg_required():
args = {'list_courses': False, 'version': False}
mock_args = test_workflow.MockedCommandLineArgs(**args)
assert commandline.class_name_arg_required(mock_args)
def test_class_name_arg_not_required():
not_required_cases = [
{'list_courses': True, 'version': False},
{'list_courses': False, 'version': True},
{'list_courses': True, 'version': True},
]
for args in not_required_cases:
mock_args = test_workflow.MockedCommandLineArgs(**args)
assert not commandline.class_name_arg_required(mock_args)
| 707
|
Python
|
.py
| 18
| 34.444444
| 65
| 0.697368
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,535
|
test_utils.py
|
coursera-dl_coursera-dl/coursera/test/test_utils.py
|
# -*- coding: utf-8 -*-
"""
Test the utility functions.
"""
import datetime
import os
import pytest
import random
import json
from time import time
import requests
import six
from mock import Mock
from coursera import utils
from coursera import coursera_dl
from coursera import api
from coursera.test.utils import slurp_fixture
from coursera.formatting import (format_section, format_resource,
format_combine_number_resource)
from coursera.utils import total_seconds, is_course_complete
@pytest.mark.parametrize(
"unclean,clean", [
('(23:90)', '23-90'),
('(:', '-'),
('a téest &and a@noòtheèr', 'a_test_and_another'),
('Lecture 2.7 - Evaluation and Operators (16:25)',
'Lecture_2.7_-_Evaluation_and_Operators_16-25'),
('Week 3: Data and Abstraction', 'Week_3-_Data_and_Abstraction'),
(' (Week 1) BRANDING: Marketing Strategy and Brand Positioning',
'Week_1_BRANDING-__Marketing_Strategy_and_Brand_Positioning'),
('test & " adfas', 'test__-_adfas'), # `"` were changed first to `-`
(' ', ''),
('☂℮﹩т ω☤☂ℌ Ṳᾔ☤ḉ◎ⅾε', '__')
]
)
def test_clean_filename(unclean, clean):
assert utils.clean_filename(unclean) == clean
@pytest.mark.parametrize(
"unclean,clean", [
('(23:90)', '(23-90)'),
('(:', '(-'),
('a téest &and a@noòtheèr', 'a téest &and a@noòtheèr'),
('Lecture 2.7 - Evaluation and Operators (16:25)',
'Lecture 2.7 - Evaluation and Operators (16-25)'),
('Week 3: Data and Abstraction',
'Week 3- Data and Abstraction'),
(' (Week 1) BRANDING: Marketing Strategy and Brand Positioning',
' (Week 1) BRANDING- Marketing Strategy and Brand Positioning'),
('test & " adfas', 'test & - adfas'), # `"` are forbidden on Windows
(' ', u'\xa0'),
('☂℮﹩т ω☤☂ℌ Ṳᾔ☤ḉ◎ⅾε', '☂℮﹩т ω☤☂ℌ Ṳᾔ☤ḉ◎ⅾε')
]
)
def test_clean_filename_minimal_change(unclean, clean):
assert utils.clean_filename(unclean, minimal_change=True) == clean
@pytest.mark.parametrize(
"url,format", [
('https://class.coursera.org/sub?q=123_en&format=txt', 'txt'),
('https://class.coursera.org/sub?q=123_en&format=srt', 'srt'),
('https://d396qusza40orc.cloudfront.net/week7-4.pdf', 'pdf'),
('https://class.coursera.org/download.mp4?lecture_id=123', 'mp4'),
]
)
def test_get_anchor_format(url, format):
assert utils.get_anchor_format(url) == format
def test_random_string():
random.seed(0) # set seed for reproducible tests
res = utils.random_string(8)
assert len(res) == 8
# Python 2 and Python 3 use different strategies for generation of
# PRNG, according to the documentation available at
# https://docs.python.org/3.4/library/random.html#random.seed
if six.PY2:
assert res == '0UAqFzWs'
else:
assert res == '2yW4Acq9'
def test_fix_url_adds_scheme():
url = "www.coursera.org"
assert utils.fix_url(url) == 'http://www.coursera.org'
def test_fix_url_removes_spaces():
url = " www.coursera.org "
assert utils.fix_url(url) == 'http://www.coursera.org'
def test_format_combine_resource_works_correctly():
rv = format_combine_number_resource(5, 4, "Moving_the_furniture", 'The_Basics', "mp4")
assert '05_04_Moving_the_furniture_The_Basics.mp4' == rv
def test_format_combine_resource_works_correctly_without_title():
rv = format_combine_number_resource(5, 1, "Introduction", '', "mp4")
assert '05_01_Introduction.mp4' == rv
def test_format_resource_works_correctly():
rv = format_resource(2, "Washing", "Dishes", "mp9")
assert '02_Washing_Dishes.mp9' == rv
def test_format_resource_works_correctly_without_title():
rv = format_resource(1, "Introduction", '', "mp2")
assert '01_Introduction.mp2' == rv
def test_format_section_works_correctly():
rv = format_section(9, 'bob', 'WEAVING', False)
assert '09_bob' == rv
def test_format_section_works_correctly_with_verbose():
rv = format_section(9, 'bill', 'WEAVING', True)
assert 'WEAVING_09_bill' == rv
def test_fix_url_doesnt_alters_empty_url():
url = None
assert utils.fix_url(url) is None
url = ""
assert utils.fix_url(url) == ""
def test_decode_input():
encoded_inputs = [
str("/home/user/темп"),
str("22少女時代22")]
for encoded_input in encoded_inputs:
decoded_input = utils.decode_input(encoded_input)
assert isinstance(decoded_input, six.text_type), "Decoded input is not a text type."
def test_total_seconds():
ts = total_seconds(datetime.timedelta(days=30))
assert ts == 2592000
def test_is_course_complete_should_give_false_if_there_was_recent_update():
delta = total_seconds(datetime.timedelta(days=29))
tm = time() - delta
rv = is_course_complete(tm)
assert rv is False
def test_is_course_complete_should_give_true_if_there_was_no_recent_update():
delta = total_seconds(datetime.timedelta(days=31))
tm = time() - delta
rv = is_course_complete(tm)
assert rv is True
def test_correct_formatting_of_class_URL():
pytest.skip()
url = coursera_dl.get_syllabus_url('bob', False)
assert 'https://class.coursera.org/bob/lecture/index' == url
def test_correct_formatting_of_class_with_preview_URL():
pytest.skip()
url = coursera_dl.get_syllabus_url('bill', True)
assert 'https://class.coursera.org/bill/lecture/preview' == url
def test_parse_args():
args = coursera_dl.parse_args(['-u', 'bob', '-p', 'bill', 'posa-001'])
assert args.about is False
assert args.class_names == ['posa-001']
assert args.username == 'bob'
assert args.password == 'bill'
def get_mock_session(page_text):
page_obj = Mock()
page_obj.text = page_text
page_obj.raise_for_status = Mock()
session = requests.Session()
session.send = Mock(return_value=page_obj)
session.prepare_request = Mock(return_value=None)
return page_obj, session
def test_get_page():
page_obj, session = get_mock_session('<page/>')
p = coursera_dl.get_page(session, 'http://www.not.here')
session.send.assert_called_once_with(None)
page_obj.raise_for_status.assert_called_once_with()
assert p == '<page/>'
def test_grab_hidden_video_url():
pytest.skip()
filename = os.path.join(
os.path.dirname(__file__), "fixtures", "html",
"hidden-videos_2.html")
page_text = open(filename).read()
page_obj, session = get_mock_session(page_text)
p = coursera_dl.grab_hidden_video_url(session,
'http://www.hidden.video')
assert 'video1.mp4' == p
@pytest.mark.parametrize(
"input,output", [
('html/supplement-deduplication.html', 'json/supplement-deduplication.json'),
('html/supplement-skip-sites.html', 'json/supplement-skip-sites.json'),
('html/supplement-two-zips.html', 'json/supplement-two-zips.json'),
]
)
def test_extract_supplement_links(input, output):
page_text = slurp_fixture(input)
expected_output = json.loads(slurp_fixture(output))
course = api.CourseraOnDemand(
session=None, course_id='0', course_name='test_course')
output = course._extract_links_from_text(page_text)
# This is the easiest way to convert nested tuples to lists
output = json.loads(json.dumps(output))
assert expected_output == output
| 7,584
|
Python
|
.py
| 179
| 36.26257
| 92
| 0.660766
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,536
|
test_credentials.py
|
coursera-dl_coursera-dl/coursera/test/test_credentials.py
|
# -*- coding: utf-8 -*-
"""
Test retrieving the credentials.
"""
import os.path
import pytest
from coursera import credentials
NETRC = \
os.path.join(os.path.dirname(__file__),
"fixtures", "auth", "netrc")
NOT_NETRC = \
os.path.join(os.path.dirname(__file__),
"fixtures", "auth", "not_netrc")
def test_authenticate_through_netrc_with_given_path():
username, password = credentials.authenticate_through_netrc(NETRC)
assert username == 'user@mail.com'
assert password == 'secret'
def test_authenticate_through_netrc_raises_exception():
pytest.raises(
credentials.CredentialsError,
credentials.authenticate_through_netrc,
NOT_NETRC)
def test_get_credentials_with_netrc():
username, password = credentials.get_credentials(netrc=NETRC)
assert username == 'user@mail.com'
assert password == 'secret'
def test_get_credentials_with_invalid_netrc_raises_exception():
pytest.raises(
credentials.CredentialsError,
credentials.get_credentials,
netrc=NOT_NETRC)
def test_get_credentials_with_username_and_password_given():
username, password = credentials.get_credentials(username='user',
password='pass')
assert username == 'user'
assert password == 'pass'
def test_get_credentials_with_username_given(use_keyring=False):
import getpass
_getpass = getpass.getpass
getpass.getpass = lambda x: 'pass'
username, password = credentials.get_credentials(username='user',
use_keyring=use_keyring)
assert username == 'user'
assert password == 'pass'
getpass.getpass = _getpass
def test_get_credentials_without_username_given_raises_exception():
pytest.raises(
credentials.CredentialsError,
credentials.get_credentials)
def test_get_credentials_with_keyring():
if not credentials.keyring:
return None
test_get_credentials_with_username_given(True)
# Test again, this time without getpass
username, password = credentials.get_credentials(username='user',
use_keyring=True)
assert username == 'user'
assert password == 'pass'
| 2,302
|
Python
|
.py
| 58
| 31.810345
| 77
| 0.664112
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,537
|
utils.py
|
coursera-dl_coursera-dl/coursera/test/utils.py
|
"""
Helper functions that are only used in tests.
"""
import os
import re
from io import open
from six import iteritems
from coursera.define import IN_MEMORY_MARKER
from coursera.utils import BeautifulSoup
def slurp_fixture(path):
return open(os.path.join(os.path.dirname(__file__),
"fixtures", path), encoding='utf8').read()
def links_to_plain_text(links):
"""
Converts extracted links into text and cleans up extra whitespace. Only HTML
sections are converted. This is a helper to be used in tests.
@param links: Links obtained from such methods as extract_links_from_peer_assignment.
@type links: @see CourseraOnDemand._extract_links_from_text
@return: HTML converted to plain text with extra space removed.
@rtype: str
"""
result = []
for filetype, contents in iteritems(links):
if filetype != 'html':
continue
for content, _prefix in contents:
if content.startswith(IN_MEMORY_MARKER):
content = content[len(IN_MEMORY_MARKER):]
soup = BeautifulSoup(content)
[script.extract() for script in soup(["script", "style"])]
text = re.sub(r'[ \t\r\n]+', ' ', soup.get_text()).strip()
result.append(text)
return ''.join(result)
| 1,315
|
Python
|
.py
| 33
| 33.030303
| 89
| 0.66011
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,538
|
test_cookies.py
|
coursera-dl_coursera-dl/coursera/test/test_cookies.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test syllabus parsing.
"""
import os.path
import requests
from six.moves import http_cookiejar as cookielib
from coursera import cookies
FIREFOX_COOKIES = \
os.path.join(os.path.dirname(__file__),
"fixtures", "cookies", "firefox_cookies.txt")
CHROME_COOKIES = \
os.path.join(os.path.dirname(__file__),
"fixtures", "cookies", "chrome_cookies.txt")
FIREFOX_COOKIES_WITHOUT_COURSERA = \
os.path.join(os.path.dirname(__file__),
"fixtures", "cookies", "firefox_cookies_without_coursera.txt")
FIREFOX_COOKIES_EXPIRED = \
os.path.join(os.path.dirname(__file__),
"fixtures", "cookies", "firefox_cookies_expired.txt")
class MockResponse:
def raise_for_status(self):
pass
class MockSession:
def __init__(self):
self.called = False
def get(self, url):
self.called = True
return MockResponse()
def test_get_cookiejar_from_firefox_cookies():
cj = cookies.get_cookie_jar(FIREFOX_COOKIES)
assert isinstance(cj, cookielib.MozillaCookieJar)
def test_get_cookiejar_from_chrome_cookies():
cj = cookies.get_cookie_jar(CHROME_COOKIES)
assert isinstance(cj, cookielib.MozillaCookieJar)
def test_find_cookies_for_class():
cj = cookies.find_cookies_for_class(FIREFOX_COOKIES, 'class-001')
assert isinstance(cj, requests.cookies.RequestsCookieJar)
assert len(cj) == 6
domains = cj.list_domains()
assert len(domains) == 2
assert '.coursera.org' in domains
assert 'class.coursera.org' in domains
paths = cj.list_paths()
assert len(paths) == 2
assert '/' in paths
assert '/class-001' in paths
def test_did_not_find_cookies_for_class():
cj = cookies.find_cookies_for_class(
FIREFOX_COOKIES_WITHOUT_COURSERA, 'class-001')
assert isinstance(cj, requests.cookies.RequestsCookieJar)
assert len(cj) == 0
def test_did_not_find_expired_cookies_for_class():
cj = cookies.find_cookies_for_class(
FIREFOX_COOKIES_EXPIRED, 'class-001')
assert isinstance(cj, requests.cookies.RequestsCookieJar)
assert len(cj) == 2
def test_we_have_enough_cookies():
cj = cookies.find_cookies_for_class(FIREFOX_COOKIES, 'class-001')
enough = cookies.do_we_have_enough_cookies(cj, 'class-001')
assert enough
def test_we_dont_have_enough_cookies():
cj = cookies.find_cookies_for_class(
FIREFOX_COOKIES_WITHOUT_COURSERA, 'class-001')
enough = cookies.do_we_have_enough_cookies(cj, 'class-001')
assert not enough
def test_make_cookie_values():
cj = cookies.find_cookies_for_class(FIREFOX_COOKIES, 'class-001')
values = 'csrf_token=csrfclass001; session=sessionclass1'
cookie_values = cookies.make_cookie_values(cj, 'class-001')
assert cookie_values == values
| 2,861
|
Python
|
.py
| 72
| 34.375
| 79
| 0.696838
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,539
|
test_filter.py
|
coursera-dl_coursera-dl/coursera/test/test_filter.py
|
from coursera.filtering import skip_format_url
def test_filter():
test_cases = [
(True, 'ipynb', 'http://localhost:8888/notebooks/machine-learning-specialization-private/course-4/2_kmeans-with-text-data_sklearn.ipynb#Takeaway'),
(True, '', 'http://developer.android.com/reference/android/location/Location.html'),
(True, '', 'http://www.apache.org/licenses/LICENSE-2.0'),
(True, 'com', 'http://swirlstats.com'),
(True, 'com', 'http://swirlstats.com/'),
(True, 'com', 'mailto:user@server.com'),
(True, 'txt*md', 'http://www.apache.org/licenses/LICENSE.txt-md'),
(False, 'zip', 'https://s3-us-west-2.amazonaws.com/coursera-temporary/images.zip'),
(False, 'html', 'http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html'),
(False, 'html', 'http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html#subsection'),
(False, 'asp', 'http://www.investopedia.com/terms/t/transaction.asp'),
(False, 'cfm', 'http://papers.ssrn.com/sol3/papers.cfm?abstract_id=2235922'),
(False, 'cfm', 'http://papers.ssrn.com/sol3/papers.cfm?abstract_id=937020.'),
(False, 'cgi', 'http://chicagounbound.uchicago.edu/cgi/viewcontent.cgi?article=1401&context=law_and_economics'),
(False, 'do', 'http://eur-lex.europa.eu/LexUriServ/LexUriServ.do?uri=CELEX:12008E101:EN:HTML'),
(False, 'edu', 'https://www.facebook.com/illinois.edu'),
(False, 'file', 'http://phx.corporate-ir.net/External.File?item=UGFyZW50SUQ9MjMzODh8Q2hpbGRJRD0tMXxUeXBlPTM=&t=1'),
(False, 'pdf', 'http://www.gpo.gov/fdsys/pkg/FR-2011-04-14/pdf/2011-9020.pdf'),
(False, 'pdf', 'https://d3c33hcgiwev3.cloudfront.net/_fc37f1cdf6ffbc39a2b0114bb281ddbe_IMBA-2015-4_ECON529_Esfahani_M1L3V2.pdf?Expires=1467676800&Signature=OO0ZJwbXdj9phKyVm6FA5ueCzFZzlEd15-10txezfIIui~bu18Omcnzhr0MgjoCi3TY06R0MT0NzKsAAmdJu4cQZzhqShfRUB5VsOl~xQbXzIRgqMHR15M7ro4eTX6DvTK3-kmTST6sEAnxUcdKCyQrliSoXVOkE13e5dwWlHAA_&Key-Pair-Id=APKAJLTNE6QMUY6HBC5A'),
]
for expected_result, fmt, url in test_cases:
assert expected_result == skip_format_url(fmt, url)
| 2,229
|
Python
|
.py
| 25
| 81.12
| 372
| 0.707727
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,540
|
test_downloaders.py
|
coursera-dl_coursera-dl/coursera/test/test_downloaders.py
|
# -*- coding: utf-8 -*-
"""
Test the downloaders.
"""
from coursera import downloaders
from coursera import coursera_dl
from coursera.filtering import find_resources_to_get
import pytest
@pytest.fixture
def sample_bag():
sample_bag = {
'mp4': [['h://url1/lc1.mp4', 'video']],
'pdf': [['h://url2/lc2.pdf', 'slides']],
'txt': [['h://url3/lc3.txt', 'subtitle']]
}
return sample_bag
def test_collect_all_resources(sample_bag):
res = find_resources_to_get(sample_bag, 'all', None)
assert [('mp4', 'h://url1/lc1.mp4', 'video'),
('pdf', 'h://url2/lc2.pdf', 'slides'),
('txt', 'h://url3/lc3.txt', 'subtitle')] == sorted(res)
def test_collect_only_pdfs(sample_bag):
res = find_resources_to_get(sample_bag, 'pdf', None)
assert [('pdf', 'h://url2/lc2.pdf', 'slides')] == sorted(res)
def test_collect_with_filtering(sample_bag):
res = find_resources_to_get(sample_bag, 'all', 'de')
res = sorted(res)
assert [('mp4', 'h://url1/lc1.mp4', 'video'),
('pdf', 'h://url2/lc2.pdf', 'slides')] == res
# External Downloader
def _ext_get_session():
import time
import requests
expires = int(time.time() + 60 * 60 * 24 * 365 * 50)
s = requests.Session()
s.cookies.set('csrf_token', 'csrfclass001',
domain="www.coursera.org", expires=expires)
s.cookies.set('session', 'sessionclass1',
domain="www.coursera.org", expires=expires)
s.cookies.set('k', 'v',
domain="www.example.org", expires=expires)
return s
def test_bin_not_specified():
pytest.raises(RuntimeError, downloaders.ExternalDownloader, None)
def test_bin_not_found_raises_exception():
d = downloaders.ExternalDownloader(None, bin='no_way_this_exists')
d._prepare_cookies = lambda cmd, cv: None
d._create_command = lambda x, y: ['no_way_this_exists']
pytest.raises(OSError, d._start_download, 'url', 'filename', False)
def test_bin_is_set():
d = downloaders.ExternalDownloader(None, bin='test')
assert d.bin == 'test'
def test_prepare_cookies():
s = _ext_get_session()
d = downloaders.ExternalDownloader(s, bin="test")
def mock_add_cookies(cmd, cv):
cmd.append(cv)
d._add_cookies = mock_add_cookies
command = []
d._prepare_cookies(command, 'http://www.coursera.org')
assert 'csrf_token=csrfclass001' in command[0]
assert 'session=sessionclass1' in command[0]
def test_prepare_cookies_does_nothing():
s = _ext_get_session()
s.cookies.clear(domain="www.coursera.org")
d = downloaders.ExternalDownloader(s, bin="test")
command = []
def mock_add_cookies(cmd, cookie_values):
pass
d._add_cookies = mock_add_cookies
d._prepare_cookies(command, 'http://www.coursera.org')
assert command == []
def test_start_command_raises_exception():
d = downloaders.ExternalDownloader(None, bin='test')
d._add_cookies = lambda cmd, cookie_values: None
pytest.raises(
NotImplementedError,
d._create_command, 'url', 'filename')
def test_wget():
s = _ext_get_session()
d = downloaders.WgetDownloader(s)
command = d._create_command('download_url', 'save_to')
assert command[0] == 'wget'
assert 'download_url' in command
assert 'save_to' in command
d._prepare_cookies(command, 'http://www.coursera.org')
assert any("Cookie: " in e for e in command)
assert any("csrf_token=csrfclass001" in e for e in command)
assert any("session=sessionclass1" in e for e in command)
def test_curl():
s = _ext_get_session()
d = downloaders.CurlDownloader(s)
command = d._create_command('download_url', 'save_to')
assert command[0] == 'curl'
assert 'download_url' in command
assert 'save_to' in command
d._prepare_cookies(command, 'http://www.coursera.org')
assert any("csrf_token=csrfclass001" in e for e in command)
assert any("session=sessionclass1" in e for e in command)
def test_aria2():
s = _ext_get_session()
d = downloaders.Aria2Downloader(s)
command = d._create_command('download_url', 'save_to')
assert command[0] == 'aria2c'
assert 'download_url' in command
assert 'save_to' in command
d._prepare_cookies(command, 'http://www.coursera.org')
assert any("Cookie: " in e for e in command)
assert any("csrf_token=csrfclass001" in e for e in command)
assert any("session=sessionclass1" in e for e in command)
def test_axel():
s = _ext_get_session()
d = downloaders.AxelDownloader(s)
command = d._create_command('download_url', 'save_to')
assert command[0] == 'axel'
assert 'download_url' in command
assert 'save_to' in command
d._prepare_cookies(command, 'http://www.coursera.org')
assert any("Cookie: " in e for e in command)
assert any("csrf_token=csrfclass001" in e for e in command)
assert any("session=sessionclass1" in e for e in command)
# Native Downloader
def test_all_attempts_have_failed():
import time
class IObject(object):
pass
class MockSession(object):
def get(self, url, stream=True, headers={}):
object_ = IObject()
object_.status_code = 400
object_.reason = None
return object_
_sleep = time.sleep
time.sleep = lambda interval: 0
session = MockSession()
d = downloaders.NativeDownloader(session)
assert d._start_download('download_url', 'save_to', False) is False
time.sleep = _sleep
# Download Progress
def _get_progress(total):
p = downloaders.DownloadProgress(total)
p.report_progress = lambda: None
return p
def test_calc_percent_if_total_is_zero():
p = _get_progress(0)
assert p.calc_percent() == '--%'
p.read(10)
assert p.calc_percent() == '--%'
def test_calc_percent_if_not_yet_read():
p = _get_progress(100)
assert (p.calc_percent() ==
'[ ] 0%')
def test_calc_percent_if_read():
p = _get_progress(100)
p.read(2)
assert (p.calc_percent() ==
'[# ] 2%')
p.read(18)
assert (p.calc_percent() ==
'[########## ] 20%')
p = _get_progress(2300)
p.read(177)
assert (p.calc_percent() ==
'[### ] 7%')
def test_calc_speed_if_total_is_zero():
p = _get_progress(0)
assert p.calc_speed() == '---b/s'
def test_calc_speed_if_not_yet_read():
p = _get_progress(100)
assert p.calc_speed() == '---b/s'
def test_calc_speed_ifread():
p = _get_progress(10000)
p.read(2000)
p._now = p._start + 1000
assert p.calc_speed() == '2.00B/s'
| 6,824
|
Python
|
.py
| 175
| 33.354286
| 71
| 0.624562
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,541
|
test_parsing.py
|
coursera-dl_coursera-dl/coursera/test/test_parsing.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test functionality of coursera module.
"""
import json
import os.path
import pytest
from six import iteritems
from mock import patch, Mock, mock_open
from coursera import coursera_dl
from coursera import api
from coursera.define import IN_MEMORY_EXTENSION, IN_MEMORY_MARKER
# JSon Handling
@pytest.fixture
def get_page(monkeypatch):
monkeypatch.setattr(coursera_dl, 'get_page', Mock())
@pytest.fixture
def json_path():
return os.path.join(os.path.dirname(__file__), "fixtures", "json")
def test_that_should_not_dl_if_file_exist(get_page, json_path):
pytest.skip()
coursera_dl.get_page = Mock()
coursera_dl.download_about(object(), "matrix-002", json_path)
assert coursera_dl.get_page.called is False
def test_that_we_parse_and_write_json_correctly(get_page, json_path):
pytest.skip()
unprocessed_json = os.path.join(os.path.dirname(__file__),
"fixtures", "json", "unprocessed.json")
raw_data = open(unprocessed_json).read()
coursera_dl.get_page = lambda x, y: raw_data
open_mock = mock_open()
with patch('coursera.coursera_dl.open', open_mock, create=True):
coursera_dl.download_about(object(), "networksonline-002", json_path)
about_json = os.path.join(json_path, 'networksonline-002-about.json')
open_mock.assert_called_once_with(about_json, 'w')
data = json.loads(open_mock().write.call_args[0][0])
assert data['id'] == 394
assert data['shortName'] == 'networksonline'
# Test Syllabus Parsing
@pytest.fixture
def get_old_style_video(monkeypatch):
pytest.skip()
"""
Mock some methods that would, otherwise, create repeatedly many web
requests.
More specifically, we mock:
* the search for hidden videos
* the actual download of videos
"""
# Mock coursera_dl.grab_hidden_video_url
monkeypatch.setattr(coursera_dl, 'grab_hidden_video_url',
lambda session, href: None)
# Mock coursera_dl.get_old_style_video
monkeypatch.setattr(coursera_dl, 'get_old_style_video',
lambda session, href: None)
# @pytest.mark.parametrize(
# "filename,num_sections,num_lectures,num_resources,num_videos", [
# ("regular-syllabus.html", 23, 102, 502, 102),
# ("links-to-wikipedia.html", 5, 37, 158, 36),
# ("preview.html", 20, 106, 106, 106),
# ("sections-not-to-be-missed.html", 9, 61, 224, 61),
# ("sections-not-to-be-missed-2.html", 20, 121, 397, 121),
# ("parsing-datasci-001-with-bs4.html", 10, 97, 358, 97), # issue 134
# ("parsing-startup-001-with-bs4.html", 4, 44, 136, 44), # issue 137
# ("parsing-wealthofnations-001-with-bs4.html", 8, 74, 296, 74), # issue 131
# ("parsing-malsoftware-001-with-bs4.html", 3, 18, 56, 16), # issue 148
# ("multiple-resources-with-the-same-format.html", 18, 97, 478, 97),
# ]
# )
def test_parse(get_old_style_video, filename, num_sections, num_lectures,
num_resources, num_videos):
pytest.skip()
filename = os.path.join(os.path.dirname(__file__), "fixtures", "html",
filename)
with open(filename) as syllabus:
syllabus_page = syllabus.read()
sections = coursera_dl.parse_old_style_syllabus(None, syllabus_page, None)
# section count
assert len(sections) == num_sections
# lecture count
lectures = [lec for sec in sections for lec in sec[1]]
assert len(lectures) == num_lectures
# resource count
resources = [(res[0], len(res[1]))
for lec in lectures for res in iteritems(lec[1])]
assert sum(r for f, r in resources) == num_resources
# mp4 count
assert sum(r for f, r in resources if f == "mp4") == num_videos
@patch('coursera.api.get_page')
def test_get_on_demand_supplement_url_accumulates_assets(mocked):
input = open(
os.path.join(os.path.dirname(__file__),
"fixtures", "json", "supplement-multiple-assets-input.json")).read()
expected_output = json.load(open(
os.path.join(os.path.dirname(__file__),
"fixtures", "json", "supplement-multiple-assets-output.json")))
mocked.return_value = json.loads(input)
course = api.CourseraOnDemand(
session=None, course_id='0', course_name='test_course')
output = course.extract_links_from_supplement('element_id')
# Make sure that SOME html content has been extracted, but remove
# it immediately because it's a hassle to properly prepare test input
# for it. FIXME later.
assert 'html' in output
del output['html']
# This is the easiest way to convert nested tuples to lists
output = json.loads(json.dumps(output))
assert expected_output == output
| 4,888
|
Python
|
.py
| 109
| 38.908257
| 89
| 0.654357
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,542
|
test_workflow.py
|
coursera-dl_coursera-dl/coursera/test/test_workflow.py
|
from os.path import normpath
import pytest
import requests
from requests.exceptions import RequestException
from coursera.workflow import CourseraDownloader, _iter_modules, _walk_modules
from coursera.commandline import parse_args
from coursera.parallel import ConsecutiveDownloader, ParallelDownloader
from coursera.downloaders import Downloader
class MockedCommandLineArgs(object):
"""
This mock uses default arguments from parse_args and allows to overwrite
them in constructor.
"""
def __init__(self, **kwargs):
args = parse_args('-u username -p password test_class'.split())
self.__dict__.update(args.__dict__)
self.__dict__.update(kwargs)
def __repr__(self):
return self.__dict__.__repr__()
class MockedFailingDownloader(Downloader):
"""
This mock will raise whatever exception you pass to it in constructor
in _start_download method. Pass None to prevent any exception.
"""
def __init__(self, exception_to_throw):
self._exception_to_throw = exception_to_throw
def _start_download(self, *args, **kwargs):
if self._exception_to_throw is None:
return
raise self._exception_to_throw
TEST_URL = "https://api.coursera.org/api/test-url"
def make_test_modules():
modules = [
["section1",
[
["module1",
[
["lecture1",
{"en.txt": [
[TEST_URL,
"title"
]
]
}
]
]
]
]
]
]
return modules
@pytest.mark.parametrize(
'expected_failed_urls,exception_to_throw,downloader_wrapper_class', [
([], None, ConsecutiveDownloader),
([], None, ParallelDownloader),
([TEST_URL], RequestException('Test exception'), ConsecutiveDownloader),
([TEST_URL], RequestException('Test exception'), ParallelDownloader),
([TEST_URL], Exception('Test exception'), ConsecutiveDownloader),
([TEST_URL], Exception('Test exception'), ParallelDownloader),
([TEST_URL], ValueError('Test exception'), ConsecutiveDownloader),
([TEST_URL], ValueError('Test exception'), ParallelDownloader),
([TEST_URL], AttributeError('Test exception'), ConsecutiveDownloader),
([TEST_URL], AttributeError('Test exception'), ParallelDownloader),
]
)
def test_failed_urls_are_collected(expected_failed_urls,
exception_to_throw,
downloader_wrapper_class):
"""
This test makes sure that if there was an exception in the file downloader,
downloader wrapper will intercept it and course downloader will record
the problematic URL.
"""
file_downloader = MockedFailingDownloader(exception_to_throw)
course_downloader = CourseraDownloader(
downloader=downloader_wrapper_class(file_downloader),
commandline_args=MockedCommandLineArgs(overwrite=True),
class_name='test_class',
path='',
ignored_formats=None,
disable_url_skipping=False)
modules = make_test_modules()
course_downloader.download_modules(modules)
assert expected_failed_urls == course_downloader.failed_urls
def test_iter_modules():
"""
Test that all modules are iterated and intermediate values are formatted
correctly. Filtering is not tested at the moment.
"""
modules = make_test_modules()
args = MockedCommandLineArgs()
expected_output = [
(0, '01_section1'),
(0, normpath('test_class/01_section1/01_module1')),
(0, 'lecture1', 'en.txt', 'title'),
('en.txt', 'https://api.coursera.org/api/test-url', 'title')
]
collected_output = []
for module in _iter_modules(modules=modules, class_name='test_class',
path='', ignored_formats=None, args=args):
collected_output.append((module.index, module.name))
for section in module.sections:
collected_output.append((section.index, section.dir))
for lecture in section.lectures:
for resource in lecture.resources:
collected_output.append((lecture.index, lecture.name,
resource.fmt, resource.title))
collected_output.append((resource.fmt, resource.url, resource.title))
assert expected_output == collected_output
def test_walk_modules():
"""
Test _walk_modules, a flattened version of _iter_modules.
"""
modules = make_test_modules()
args = MockedCommandLineArgs()
expected_output = [
(0, '01_section1',
0, normpath('test_class/01_section1/01_module1'),
0, 'lecture1', normpath('test_class/01_section1/01_module1/01_lecture1_title.en.txt'),
'https://api.coursera.org/api/test-url')]
collected_output = []
for module, section, lecture, resource in _walk_modules(
modules=modules, class_name='test_class',
path='', ignored_formats=None, args=args):
collected_output.append(
(module.index, module.name,
section.index, section.dir,
lecture.index, lecture.name, lecture.filename(resource.fmt, resource.title),
resource.url)
)
assert expected_output == collected_output
| 5,460
|
Python
|
.py
| 131
| 32.587786
| 95
| 0.630537
|
coursera-dl/coursera-dl
| 9,343
| 2,207
| 211
|
LGPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,543
|
tasks.py
|
jrnl-org_jrnl/tasks.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import json
import os
import pathlib
import subprocess
import requests
import xmltodict
DOCS_URL = "http://127.0.0.1:8000"
SITEMAP_FILENAME = "sitemap.xml"
CONFIG_FILENAME = "config.json"
def delete_files(files):
for file in files:
pathlib.Path(file).unlink(missing_ok=True)
def run_shell(command):
# Required to run NPM commands in Windows and *nix
subprocess.run(command, check=True, shell=True)
def generate_sitemap():
sitemap = requests.get(f"{DOCS_URL}/{SITEMAP_FILENAME}")
with open(SITEMAP_FILENAME, "wb") as f:
f.write(sitemap.content)
def generate_pa11y_config_from_sitemap():
with open(SITEMAP_FILENAME) as f:
xml_sitemap = xmltodict.parse(f.read())
urls = [
f"{DOCS_URL}/",
f"{DOCS_URL}/search.html?q=jrnl",
]
urls += [url["loc"] for url in xml_sitemap["urlset"]["url"]]
with open(CONFIG_FILENAME, "w") as f:
f.write(
json.dumps(
{
"defaults": {"chromeLaunchConfig": {"args": ["--no-sandbox"]}},
"urls": urls,
}
)
)
def output_file(file):
if not os.getenv("CI", False):
return
print(f"::group::{file}")
with open(file) as f:
print(f.read())
print("::endgroup::")
| 1,412
|
Python
|
.py
| 45
| 25
| 83
| 0.616012
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,544
|
conftest.py
|
jrnl-org_jrnl/tests/conftest.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
from pytest import mark
from pytest import skip
from jrnl.os_compat import on_posix
from jrnl.os_compat import on_windows
pytest_plugins = [
"tests.lib.fixtures",
"tests.lib.given_steps",
"tests.lib.when_steps",
"tests.lib.then_steps",
]
def pytest_bdd_apply_tag(tag, function):
# skip markers
if tag == "skip_win":
marker = mark.skipif(on_windows(), reason="Skip test on Windows")
elif tag == "skip_posix":
marker = mark.skipif(on_posix(), reason="Skip test on Mac/Linux")
# only on OS markers
elif tag == "on_win":
marker = mark.skipif(not on_windows(), reason="Skip test not on Windows")
elif tag == "on_posix":
marker = mark.skipif(not on_posix(), reason="Skip test not on Mac/Linux")
else:
# Fall back to pytest-bdd's default behavior
return None
marker(function)
return True
def pytest_runtest_setup(item):
markers = [mark.name for mark in item.iter_markers()]
on_win = on_windows()
on_nix = on_posix()
if "skip_win" in markers and on_win:
skip("Skip test on Windows")
if "skip_posix" in markers and on_nix:
skip("Skip test on Mac/Linux")
if "on_win" in markers and not on_win:
skip("Skip test not on Windows")
if "on_posix" in markers and not on_nix:
skip("Skip test not on Mac/Linux")
| 1,463
|
Python
|
.py
| 40
| 31.225
| 81
| 0.660752
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,545
|
test_os_compat.py
|
jrnl-org_jrnl/tests/unit/test_os_compat.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
from unittest import mock
import pytest
from jrnl.os_compat import on_posix
from jrnl.os_compat import on_windows
from jrnl.os_compat import split_args
@pytest.mark.parametrize(
"systems",
[
["linux", False],
["win32", True],
["cygwin", False],
["msys", False],
["darwin", False],
["os2", False],
["os2emx", False],
["riscos", False],
["atheos", False],
["freebsd7", False],
["freebsd8", False],
["freebsdN", False],
["openbsd6", False],
],
)
def test_on_windows(systems):
osname, expected_on_windows = systems[0], systems[1]
with mock.patch("jrnl.os_compat.platform", osname):
assert on_windows() == expected_on_windows
@pytest.mark.parametrize(
"systems",
[
["linux", True],
["win32", False],
["cygwin", True],
["msys", True],
["darwin", True],
["os2", True],
["os2emx", True],
["riscos", True],
["atheos", True],
["freebsd7", True],
["freebsd8", True],
["freebsdN", True],
["openbsd6", True],
],
)
def test_on_posix(systems):
osname, expected_on_posix = systems[0], systems[1]
with mock.patch("jrnl.os_compat.platform", osname):
assert on_posix() == expected_on_posix
@pytest.mark.parametrize(
"args",
[
["notepad", ["notepad"]],
["subl -w", ["subl", "-w"]],
[
'"C:\\Program Files\\Sublime Text 3\\subl.exe" -w',
['"C:\\Program Files\\Sublime Text 3\\subl.exe"', "-w"],
],
],
)
def test_split_args_on_windows(args):
input_arguments, expected_split_args = args[0], args[1]
with mock.patch("jrnl.os_compat.on_windows", lambda: True):
assert split_args(input_arguments) == expected_split_args
@pytest.mark.parametrize(
"args",
[
["vim", ["vim"]],
[
'vim -f +Goyo +Limelight "+set spell linebreak"',
["vim", "-f", "+Goyo", "+Limelight", '"+set spell linebreak"'],
],
],
)
def test_split_args_on_not_windows(args):
input_arguments, expected_split_args = args[0], args[1]
with mock.patch("jrnl.os_compat.on_windows", lambda: True):
assert split_args(input_arguments) == expected_split_args
| 2,418
|
Python
|
.py
| 80
| 23.7875
| 75
| 0.568543
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,546
|
test_override.py
|
jrnl-org_jrnl/tests/unit/test_override.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
from argparse import Namespace
import pytest
from jrnl.override import _convert_dots_to_list
from jrnl.override import _get_config_node
from jrnl.override import _get_key_and_value_from_pair
from jrnl.override import _recursively_apply
from jrnl.override import apply_overrides
@pytest.fixture()
def minimal_config():
cfg = {
"colors": {"body": "red", "date": "green"},
"default": "/tmp/journal.jrnl",
"editor": "vim",
"journals": {"default": "/tmp/journals/journal.jrnl"},
}
return cfg
def expected_args(overrides):
default_args = {
"contains": None,
"debug": False,
"delete": False,
"edit": False,
"end_date": None,
"today_in_history": False,
"month": None,
"day": None,
"year": None,
"excluded": [],
"export": False,
"filename": None,
"limit": None,
"on_date": None,
"preconfig_cmd": None,
"postconfig_cmd": None,
"short": False,
"starred": False,
"start_date": None,
"strict": False,
"tags": False,
"text": [],
"config_override": [],
}
return Namespace(**{**default_args, **overrides})
def test_apply_override(minimal_config):
overrides = {"config_override": [["editor", "nano"]]}
apply_overrides(expected_args(overrides), minimal_config)
assert minimal_config["editor"] == "nano"
def test_override_dot_notation(minimal_config):
overrides = {"config_override": [["colors.body", "blue"]]}
apply_overrides(expected_args(overrides), minimal_config)
assert minimal_config["colors"] == {"body": "blue", "date": "green"}
def test_multiple_overrides(minimal_config):
overrides = {
"config_override": [
["colors.title", "magenta"],
["editor", "nano"],
["journals.burner", "/tmp/journals/burner.jrnl"],
]
}
actual = apply_overrides(expected_args(overrides), minimal_config)
assert actual["editor"] == "nano"
assert actual["colors"]["title"] == "magenta"
assert "burner" in actual["journals"]
assert actual["journals"]["burner"] == "/tmp/journals/burner.jrnl"
def test_recursively_apply():
cfg = {"colors": {"body": "red", "title": "green"}}
cfg = _recursively_apply(cfg, ["colors", "body"], "blue")
assert cfg["colors"]["body"] == "blue"
def test_get_config_node(minimal_config):
assert len(minimal_config.keys()) == 4
assert _get_config_node(minimal_config, "editor") == "vim"
assert _get_config_node(minimal_config, "display_format") is None
def test_get_kv_from_pair():
pair = {"ab.cde": "fgh"}
k, v = _get_key_and_value_from_pair(pair)
assert k == "ab.cde"
assert v == "fgh"
class TestDotNotationToList:
def test_unpack_dots_to_list(self):
keys = "a.b.c.d.e.f"
keys_list = _convert_dots_to_list(keys)
assert len(keys_list) == 6
def test_sequential_delimiters(self):
k = "g.r..h.v"
k_l = _convert_dots_to_list(k)
assert len(k_l) == 4
| 3,191
|
Python
|
.py
| 88
| 30
| 72
| 0.612987
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,547
|
test_controller.py
|
jrnl-org_jrnl/tests/unit/test_controller.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import random
import string
from unittest import mock
import pytest
import jrnl
from jrnl.args import parse_args
from jrnl.controller import _display_search_results
@pytest.fixture
def random_string():
return "".join(random.choices(string.ascii_uppercase + string.digits, k=25))
@pytest.mark.parametrize("export_format", ["pretty", "short"])
def test_display_search_results_pretty_short(export_format):
mock_args = parse_args(["--format", export_format])
test_journal = jrnl.journals.Journal()
test_journal.new_entry("asdf")
test_journal.pprint = mock.Mock()
_display_search_results(mock_args, test_journal)
test_journal.pprint.assert_called_once()
@pytest.mark.parametrize(
"export_format", ["markdown", "json", "xml", "yaml", "fancy", "dates"]
)
@mock.patch("jrnl.plugins.get_exporter")
@mock.patch("builtins.print")
def test_display_search_results_builtin_plugins(
mock_print, mock_exporter, export_format, random_string
):
test_filename = random_string
mock_args = parse_args(["--format", export_format, "--file", test_filename])
test_journal = jrnl.journals.Journal()
test_journal.new_entry("asdf")
mock_export = mock.Mock()
mock_exporter.return_value.export = mock_export
_display_search_results(mock_args, test_journal)
mock_exporter.assert_called_once_with(export_format)
mock_export.assert_called_once_with(test_journal, test_filename)
mock_print.assert_called_once_with(mock_export.return_value)
| 1,595
|
Python
|
.py
| 38
| 38.526316
| 80
| 0.745455
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,548
|
test_install.py
|
jrnl-org_jrnl/tests/unit/test_install.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import sys
from unittest import mock
import pytest
@pytest.mark.filterwarnings(
"ignore:.*imp module is deprecated.*"
) # ansiwrap spits out an unrelated warning
def test_initialize_autocomplete_runs_without_readline():
from jrnl import install
with mock.patch.dict(sys.modules, {"readline": None}):
install._initialize_autocomplete() # should not throw exception
| 487
|
Python
|
.py
| 12
| 37.5
| 72
| 0.765957
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,549
|
test_color.py
|
jrnl-org_jrnl/tests/unit/test_color.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import pytest
from colorama import Fore
from colorama import Style
from jrnl.color import colorize
@pytest.fixture()
def data_fixture():
string = "Zwei peanuts walked into a bar"
yield string
def test_colorize(data_fixture):
string = data_fixture
colorized_string = colorize(string, "BLUE", True)
assert colorized_string == Style.BRIGHT + Fore.BLUE + string + Style.RESET_ALL
| 499
|
Python
|
.py
| 14
| 32.714286
| 82
| 0.761506
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,550
|
test_output.py
|
jrnl-org_jrnl/tests/unit/test_output.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
from unittest.mock import Mock
from unittest.mock import patch
from jrnl.messages import Message
from jrnl.output import print_msg
@patch("jrnl.output.print_msgs")
def test_print_msg_calls_print_msgs_as_list_with_style(print_msgs):
test_msg = Mock(Message)
print_msg(test_msg)
print_msgs.assert_called_once_with([test_msg], style=test_msg.style)
@patch("jrnl.output.print_msgs")
def test_print_msg_calls_print_msgs_with_kwargs(print_msgs):
test_msg = Mock(Message)
kwargs = {
"delimter": "test delimiter 🤡",
"get_input": True,
"hide_input": True,
"some_rando_arg": "💩",
}
print_msg(test_msg, **kwargs)
print_msgs.assert_called_once_with([test_msg], style=test_msg.style, **kwargs)
| 853
|
Python
|
.py
| 22
| 34.590909
| 82
| 0.706667
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,551
|
test_time.py
|
jrnl-org_jrnl/tests/unit/test_time.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import datetime
import pytest
from jrnl import time
def test_default_hour_is_added():
assert time.parse(
"2020-06-20", inclusive=False, default_hour=9, default_minute=0, bracketed=False
) == datetime.datetime(2020, 6, 20, 9)
def test_default_minute_is_added():
assert time.parse(
"2020-06-20",
inclusive=False,
default_hour=0,
default_minute=30,
bracketed=False,
) == datetime.datetime(2020, 6, 20, 0, 30)
@pytest.mark.parametrize(
"inputs",
[
[2000, 2, 29, True],
[2023, 1, 0, False],
[2023, 1, 1, True],
[2023, 4, 31, False],
[2023, 12, 31, True],
[2023, 12, 32, False],
[2023, 13, 1, False],
[2100, 2, 27, True],
[2100, 2, 28, True],
[2100, 2, 29, False],
],
)
def test_is_valid_date(inputs):
year, month, day, expected_result = inputs
assert time.is_valid_date(year, month, day) == expected_result
| 1,071
|
Python
|
.py
| 35
| 24.657143
| 88
| 0.601753
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,552
|
test_config_file.py
|
jrnl-org_jrnl/tests/unit/test_config_file.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import os
import pytest
from jrnl.exception import JrnlException
from jrnl.install import find_alt_config
def test_find_alt_config(request):
work_config_path = os.path.join(
request.fspath.dirname, "..", "data", "configs", "basic_onefile.yaml"
)
found_alt_config = find_alt_config(work_config_path)
assert found_alt_config == work_config_path
def test_find_alt_config_not_exist(request):
bad_config_path = os.path.join(
request.fspath.dirname, "..", "data", "configs", "does-not-exist.yaml"
)
with pytest.raises(JrnlException) as ex:
found_alt_config = find_alt_config(bad_config_path)
assert found_alt_config is not None
assert isinstance(ex.value, JrnlException)
| 832
|
Python
|
.py
| 20
| 37.05
| 78
| 0.718012
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,553
|
test_journals_folder_journal.py
|
jrnl-org_jrnl/tests/unit/test_journals_folder_journal.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import pathlib
from unittest import mock
import pytest
from jrnl.journals.FolderJournal import Folder
@pytest.mark.parametrize(
"inputs_and_outputs",
[
[
"/2020/01",
["02.txt", "03.txt", "31.txt"],
["/2020/01/02.txt", "/2020/01/03.txt", "/2020/01/31.txt"],
],
[
"/2020/02", # leap year
["02.txt", "03.txt", "28.txt", "29.txt", "31.txt", "39.txt"],
[
"/2020/02/02.txt",
"/2020/02/03.txt",
"/2020/02/28.txt",
"/2020/02/29.txt",
],
],
[
"/2100/02", # not a leap year
["01.txt", "28.txt", "29.txt", "39.txt"],
["/2100/02/01.txt", "/2100/02/28.txt"],
],
[
"/2023/04",
["29.txt", "30.txt", "31.txt", "39.txt"],
["/2023/04/29.txt", "/2023/04/30.txt"],
],
],
)
def test_get_day_files_expected_filtering(inputs_and_outputs):
year_month_path, glob_filenames, expected_output = inputs_and_outputs
year_month_path = pathlib.Path(year_month_path)
glob_files = map(lambda x: year_month_path / x, glob_filenames)
expected_output = list(map(lambda x: str(pathlib.PurePath(x)), expected_output))
with (
mock.patch("pathlib.Path.glob", return_value=glob_files),
mock.patch.object(pathlib.Path, "is_file", return_value=True),
):
actual_output = list(Folder._get_day_files(year_month_path))
actual_output.sort()
expected_output.sort()
assert actual_output == expected_output
| 1,731
|
Python
|
.py
| 49
| 26.612245
| 84
| 0.541268
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,554
|
test_parse_args.py
|
jrnl-org_jrnl/tests/unit/test_parse_args.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import shlex
import pytest
from jrnl.args import parse_args
from jrnl.config import make_yaml_valid_dict
def cli_as_dict(str):
cli = shlex.split(str)
args = parse_args(cli)
return vars(args)
def expected_args(**kwargs):
default_args = {
"contains": None,
"debug": False,
"delete": False,
"change_time": None,
"edit": False,
"end_date": None,
"exclude_starred": False,
"exclude_tagged": False,
"today_in_history": False,
"month": None,
"day": None,
"year": None,
"excluded": [],
"export": False,
"filename": None,
"limit": None,
"on_date": None,
"preconfig_cmd": None,
"postconfig_cmd": None,
"short": False,
"starred": False,
"start_date": None,
"strict": False,
"tagged": False,
"tags": False,
"template": None,
"text": [],
"config_override": [],
"config_file_path": "",
}
return {**default_args, **kwargs}
def test_empty():
assert cli_as_dict("") == expected_args()
def test_contains_alone():
assert cli_as_dict("-contains whatever") == expected_args(contains=["whatever"])
def test_debug_alone():
assert cli_as_dict("--debug") == expected_args(debug=True)
def test_delete_alone():
assert cli_as_dict("--delete") == expected_args(delete=True)
def test_change_time_alone():
assert cli_as_dict("--change-time") == expected_args(change_time="now")
assert cli_as_dict("--change-time yesterday") == expected_args(
change_time="yesterday"
)
def test_diagnostic_alone():
from jrnl.commands import preconfig_diagnostic
assert cli_as_dict("--diagnostic") == expected_args(
preconfig_cmd=preconfig_diagnostic
)
def test_edit_alone():
assert cli_as_dict("--edit") == expected_args(edit=True)
def test_encrypt_alone():
from jrnl.commands import postconfig_encrypt
assert cli_as_dict("--encrypt") == expected_args(postconfig_cmd=postconfig_encrypt)
def test_decrypt_alone():
from jrnl.commands import postconfig_decrypt
assert cli_as_dict("--decrypt") == expected_args(postconfig_cmd=postconfig_decrypt)
def test_end_date_alone():
expected = expected_args(end_date="2020-01-01")
assert expected == cli_as_dict("-until 2020-01-01")
assert expected == cli_as_dict("-to 2020-01-01")
def test_not_empty():
with pytest.raises(SystemExit) as wrapped_e:
cli_as_dict("-not")
assert wrapped_e.value.code == 2
def test_not_alone():
assert cli_as_dict("-not test") == expected_args(excluded=["test"])
def test_not_multiple_alone():
assert cli_as_dict("-not one -not two") == expected_args(excluded=["one", "two"])
assert cli_as_dict("-not one -not two -not three") == expected_args(
excluded=["one", "two", "three"]
)
@pytest.mark.parametrize(
"cli",
[
"two -not one -not three",
"-not one two -not three",
"-not one -not three two",
],
)
def test_not_mixed(cli):
result = expected_args(excluded=["one", "three"], text=["two"])
assert cli_as_dict(cli) == result
def test_not_interspersed():
result = expected_args(excluded=["one", "three"], text=["two", "two", "two"])
assert cli_as_dict("two -not one two -not three two") == result
def test_export_alone():
assert cli_as_dict("--export json") == expected_args(export="json")
def test_import_alone():
from jrnl.commands import postconfig_import
assert cli_as_dict("--import") == expected_args(postconfig_cmd=postconfig_import)
def test_file_flag_alone():
assert cli_as_dict("--file test.txt") == expected_args(filename="test.txt")
assert cli_as_dict("--file 'lorem ipsum.txt'") == expected_args(
filename="lorem ipsum.txt"
)
def test_limit_alone():
assert cli_as_dict("-n 5") == expected_args(limit=5)
assert cli_as_dict("-n 999") == expected_args(limit=999)
def test_limit_shorthand_alone():
assert cli_as_dict("-5") == expected_args(limit=5)
assert cli_as_dict("-999") == expected_args(limit=999)
def test_list_alone():
from jrnl.commands import postconfig_list
assert cli_as_dict("--ls") == expected_args(postconfig_cmd=postconfig_list)
def test_on_date_alone():
assert cli_as_dict("-on 'saturday'") == expected_args(on_date="saturday")
def test_month_alone():
assert cli_as_dict("-month 1") == expected_args(month="1")
assert cli_as_dict("-month 01") == expected_args(month="01")
assert cli_as_dict("-month January") == expected_args(month="January")
assert cli_as_dict("-month Jan") == expected_args(month="Jan")
def test_day_alone():
assert cli_as_dict("-day 1") == expected_args(day="1")
assert cli_as_dict("-day 01") == expected_args(day="01")
def test_year_alone():
assert cli_as_dict("-year 2021") == expected_args(year="2021")
assert cli_as_dict("-year 21") == expected_args(year="21")
def test_today_in_history_alone():
assert cli_as_dict("-today-in-history") == expected_args(today_in_history=True)
def test_short_alone():
assert cli_as_dict("--short") == expected_args(short=True)
def test_starred_alone():
assert cli_as_dict("-starred") == expected_args(starred=True)
def test_start_date_alone():
assert cli_as_dict("-from 2020-01-01") == expected_args(start_date="2020-01-01")
assert cli_as_dict("-from 'January 1st'") == expected_args(start_date="January 1st")
def test_and_alone():
assert cli_as_dict("-and") == expected_args(strict=True)
def test_tags_alone():
assert cli_as_dict("--tags") == expected_args(tags=True)
def test_text_alone():
assert cli_as_dict("lorem ipsum dolor sit amet") == expected_args(
text=["lorem", "ipsum", "dolor", "sit", "amet"]
)
def test_version_alone():
from jrnl.commands import preconfig_version
assert cli_as_dict("--version") == expected_args(preconfig_cmd=preconfig_version)
def test_editor_override():
parsed_args = cli_as_dict('--config-override editor "nano"')
assert parsed_args == expected_args(config_override=[["editor", "nano"]])
def test_color_override():
assert cli_as_dict("--config-override colors.body blue") == expected_args(
config_override=[["colors.body", "blue"]]
)
def test_multiple_overrides():
parsed_args = cli_as_dict(
"--config-override colors.title green "
'--config-override editor "nano" '
'--config-override journal.scratchpad "/tmp/scratchpad"'
)
assert parsed_args == expected_args(
config_override=[
["colors.title", "green"],
["editor", "nano"],
["journal.scratchpad", "/tmp/scratchpad"],
]
)
# @see https://github.com/jrnl-org/jrnl/issues/520
@pytest.mark.parametrize(
"cli",
[
"-and second @oldtag @newtag",
"second @oldtag @newtag -and",
"second -and @oldtag @newtag",
"second @oldtag -and @newtag",
],
)
def test_and_ordering(cli):
result = expected_args(strict=True, text=["second", "@oldtag", "@newtag"])
assert cli_as_dict(cli) == result
# @see https://github.com/jrnl-org/jrnl/issues/520
@pytest.mark.parametrize(
"cli",
[
"--edit second @oldtag @newtag",
"second @oldtag @newtag --edit",
"second --edit @oldtag @newtag",
"second @oldtag --edit @newtag",
],
)
def test_edit_ordering(cli):
result = expected_args(edit=True, text=["second", "@oldtag", "@newtag"])
assert cli_as_dict(cli) == result
class TestDeserialization:
@pytest.mark.parametrize(
"input_str",
[
["editor", "nano"],
["colors.title", "blue"],
["default", "/tmp/egg.txt"],
],
)
def test_deserialize_multiword_strings(self, input_str):
runtime_config = make_yaml_valid_dict(input_str)
assert runtime_config.__class__ is dict
assert input_str[0] in runtime_config
assert runtime_config[input_str[0]] == input_str[1]
def test_deserialize_multiple_datatypes(self):
cfg = make_yaml_valid_dict(["linewrap", "23"])
assert cfg["linewrap"] == 23
cfg = make_yaml_valid_dict(["encrypt", "false"])
assert cfg["encrypt"] is False
cfg = make_yaml_valid_dict(["editor", "vi -c startinsert"])
assert cfg["editor"] == "vi -c startinsert"
cfg = make_yaml_valid_dict(["highlight", "true"])
assert cfg["highlight"] is True
| 8,654
|
Python
|
.py
| 219
| 33.643836
| 88
| 0.63729
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,555
|
test_editor.py
|
jrnl-org_jrnl/tests/unit/test_editor.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import os
from unittest.mock import mock_open
from unittest.mock import patch
import pytest
from jrnl.editor import get_template_path
from jrnl.editor import read_template_file
from jrnl.exception import JrnlException
@patch(
"os.getcwd", side_effect="/"
) # prevent failures in CI if current directory has been deleted
@patch("builtins.open", side_effect=FileNotFoundError())
def test_read_template_file_with_no_file_raises_exception(mock_open, mock_getcwd):
with pytest.raises(JrnlException) as ex:
read_template_file("invalid_file.txt")
assert isinstance(ex.value, JrnlException)
@patch(
"os.getcwd", side_effect="/"
) # prevent failures in CI if current directory has been deleted
@patch("builtins.open", new_callable=mock_open, read_data="template text")
def test_read_template_file_with_valid_file_returns_text(mock_file, mock_getcwd):
assert read_template_file("valid_file.txt") == "template text"
def test_get_template_path_when_exists_returns_correct_path():
with patch("os.path.exists", return_value=True):
output = get_template_path("template", "templatepath")
assert output == os.path.join("templatepath", "template")
@patch("jrnl.editor.absolute_path")
def test_get_template_path_when_doesnt_exist_returns_correct_path(mock_absolute_paths):
with patch("os.path.exists", return_value=False):
output = get_template_path("template", "templatepath")
assert output == mock_absolute_paths.return_value
| 1,629
|
Python
|
.py
| 32
| 46.09375
| 88
| 0.742202
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,556
|
test_export.py
|
jrnl-org_jrnl/tests/unit/test_export.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
from unittest import mock
import pytest
from jrnl.exception import JrnlException
from jrnl.plugins.fancy_exporter import check_provided_linewrap_viability
from jrnl.plugins.yaml_exporter import YAMLExporter
@pytest.fixture()
def datestr():
yield "2020-10-20 16:59"
def build_card_header(datestr):
top_left_corner = "┎─╮"
content = top_left_corner + datestr
return content
class TestFancy:
def test_too_small_linewrap(self, datestr):
journal = "test_journal"
content = build_card_header(datestr)
total_linewrap = 12
with pytest.raises(JrnlException):
check_provided_linewrap_viability(total_linewrap, [content], journal)
class TestYaml:
@mock.patch("builtins.open")
def test_export_to_nonexisting_folder(self, mock_open):
with pytest.raises(JrnlException):
YAMLExporter.write_file("journal", "non-existing-path")
mock_open.assert_not_called()
| 1,057
|
Python
|
.py
| 27
| 33.703704
| 81
| 0.729703
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,557
|
test_path.py
|
jrnl-org_jrnl/tests/unit/test_path.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import random
import string
from os import getenv
from unittest.mock import patch
import pytest
from jrnl.path import absolute_path
from jrnl.path import expand_path
from jrnl.path import home_dir
@pytest.fixture
def home_dir_str(monkeypatch):
username = "username"
monkeypatch.setenv("USERPROFILE", username) # for windows
monkeypatch.setenv("HOME", username) # for *nix
return username
@pytest.fixture
def random_test_var(monkeypatch):
name = f"JRNL_TEST_{''.join(random.sample(string.ascii_uppercase, 10))}"
val = "".join(random.sample(string.ascii_lowercase, 25))
monkeypatch.setenv(name, val)
return (name, val)
def test_home_dir(home_dir_str):
assert home_dir() == home_dir_str
@pytest.mark.on_posix
@pytest.mark.parametrize(
"path",
["~"],
)
def test_expand_path_actually_expands_mac_linux(path):
# makes sure that path isn't being returns as-is
assert expand_path(path) != path
@pytest.mark.on_win
@pytest.mark.parametrize(
"path",
["~", "%USERPROFILE%"],
)
def test_expand_path_actually_expands_windows(path):
# makes sure that path isn't being returns as-is
assert expand_path(path) != path
@pytest.mark.on_posix
@pytest.mark.parametrize(
"paths",
[
["~", "HOME"],
],
)
def test_expand_path_expands_into_correct_value_mac_linux(paths):
input_path, expected_path = paths[0], paths[1]
assert expand_path(input_path) == getenv(expected_path)
@pytest.mark.on_win
@pytest.mark.parametrize(
"paths",
[
["~", "USERPROFILE"],
["%USERPROFILE%", "USERPROFILE"],
],
)
def test_expand_path_expands_into_correct_value_windows(paths):
input_path, expected_path = paths[0], paths[1]
assert expand_path(input_path) == getenv(expected_path)
@pytest.mark.on_posix
@pytest.mark.parametrize("_", range(25))
def test_expand_path_expands_into_random_env_value_mac_linux(_, random_test_var):
var_name, var_value = random_test_var[0], random_test_var[1]
assert expand_path(var_name) == var_name
assert expand_path(f"${var_name}") == var_value # mac & linux
assert expand_path(f"${var_name}") == getenv(var_name)
@pytest.mark.on_win
@pytest.mark.parametrize("_", range(25))
def test_expand_path_expands_into_random_env_value_windows(_, random_test_var):
var_name, var_value = random_test_var[0], random_test_var[1]
assert expand_path(var_name) == var_name
assert expand_path(f"%{var_name}%") == var_value # windows
assert expand_path(f"%{var_name}%") == getenv(var_name)
@patch("jrnl.path.expand_path")
@patch("os.path.abspath")
def test_absolute_path(mock_abspath, mock_expand_path):
test_val = "test_value"
assert absolute_path(test_val) == mock_abspath.return_value
mock_expand_path.assert_called_with(test_val)
mock_abspath.assert_called_with(mock_expand_path.return_value)
| 2,967
|
Python
|
.py
| 82
| 32.695122
| 81
| 0.709193
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,558
|
fixtures.py
|
jrnl-org_jrnl/tests/lib/fixtures.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import os
import tempfile
from collections import defaultdict
from collections.abc import Iterable
from pathlib import Path
from unittest.mock import Mock
from unittest.mock import patch
import toml
from keyring import backend
from keyring import errors
from pytest import fixture
from rich.console import Console
from jrnl.config import load_config
from jrnl.os_compat import split_args
from tests.lib.helpers import get_fixture
# --- Keyring --- #
@fixture
def keyring():
return NoKeyring()
@fixture
def keyring_type():
return "default"
class TestKeyring(backend.KeyringBackend):
"""A test keyring that just stores its values in a hash"""
priority = 1
keys = defaultdict(dict)
def set_password(self, servicename, username, password):
self.keys[servicename][username] = password
def get_password(self, servicename, username):
return self.keys[servicename].get(username)
def delete_password(self, servicename, username):
self.keys[servicename][username] = None
class NoKeyring(backend.KeyringBackend):
"""A keyring that simulated an environment with no keyring backend."""
priority = 2
keys = defaultdict(dict)
def set_password(self, servicename, username, password):
raise errors.NoKeyringError
def get_password(self, servicename, username):
raise errors.NoKeyringError
def delete_password(self, servicename, username):
raise errors.NoKeyringError
class FailedKeyring(backend.KeyringBackend):
"""A keyring that cannot be retrieved."""
priority = 2
def set_password(self, servicename, username, password):
raise errors.KeyringError
def get_password(self, servicename, username):
raise errors.KeyringError
def delete_password(self, servicename, username):
raise errors.KeyringError
# ----- Misc ----- #
@fixture
def cli_run(
mock_factories,
mock_args,
mock_is_tty,
mock_config_path,
mock_editor,
mock_user_input,
mock_overrides,
mock_default_journal_path,
mock_default_templates_path,
):
# Check if we need more mocks
mock_factories.update(mock_args)
mock_factories.update(mock_is_tty)
mock_factories.update(mock_overrides)
mock_factories.update(mock_editor)
mock_factories.update(mock_config_path)
mock_factories.update(mock_user_input)
mock_factories.update(mock_default_journal_path)
mock_factories.update(mock_default_templates_path)
return {
"status": 0,
"stdout": None,
"stderr": None,
"mocks": {},
"mock_factories": mock_factories,
}
@fixture
def mock_factories():
return {}
@fixture
def mock_args(cache_dir, request):
def _mock_args():
command = get_fixture(request, "command", "")
if cache_dir["exists"]:
command = command.format(cache_dir=cache_dir["path"])
args = split_args(command)
return patch("sys.argv", ["jrnl"] + args)
return {"args": _mock_args}
@fixture
def mock_is_tty(is_tty):
return {"is_tty": lambda: patch("sys.stdin.isatty", return_value=is_tty)}
@fixture
def mock_overrides(config_in_memory):
from jrnl.override import apply_overrides
def my_overrides(*args, **kwargs):
result = apply_overrides(*args, **kwargs)
config_in_memory["overrides"] = result
return result
return {
"overrides": lambda: patch(
"jrnl.controller.apply_overrides", side_effect=my_overrides
)
}
@fixture
def mock_config_path(request):
config_path = get_fixture(request, "config_path")
if not config_path:
return {}
return {
"config_path_install": lambda: patch(
"jrnl.install.get_config_path", return_value=config_path
),
"config_path_config": lambda: patch(
"jrnl.config.get_config_path", return_value=config_path
),
}
@fixture
def mock_default_journal_path(temp_dir):
journal_path = os.path.join(temp_dir.name, "journal.txt")
return {
"default_journal_path_install": lambda: patch(
"jrnl.install.get_default_journal_path", return_value=journal_path
),
"default_journal_path_config": lambda: patch(
"jrnl.config.get_default_journal_path", return_value=journal_path
),
}
@fixture
def mock_default_templates_path(temp_dir):
templates_path = os.path.join(temp_dir.name, "templates")
return {
"get_templates_path": lambda: patch(
"jrnl.editor.get_templates_path", return_value=templates_path
),
}
@fixture
def temp_dir():
return tempfile.TemporaryDirectory()
@fixture
def working_dir(request):
return os.path.join(request.config.rootpath, "tests")
@fixture
def toml_version(working_dir):
pyproject = os.path.join(working_dir, "..", "pyproject.toml")
pyproject_contents = toml.load(pyproject)
return pyproject_contents["tool"]["poetry"]["version"]
@fixture
def input_method():
return ""
@fixture
def all_input():
return ""
@fixture
def command():
return ""
@fixture
def cache_dir():
return {"exists": False, "path": ""}
@fixture
def str_value():
return ""
@fixture
def should_not():
return False
@fixture
def mock_user_input(request, password_input, stdin_input):
def _mock_user_input():
# user_input needs to be here because we don't know it until cli_run starts
user_input = get_fixture(request, "all_input", None)
if user_input is None:
user_input = Exception("Unexpected call for user input")
else:
user_input = iter(user_input.splitlines())
def mock_console_input(**kwargs):
pw = kwargs.get("password", False)
if pw and not isinstance(password_input, Exception):
return password_input
if isinstance(user_input, Iterable):
input_line = next(user_input)
# A raw newline is used to indicate deliberate empty input
return "" if input_line == r"\n" else input_line
# exceptions
return user_input if not pw else password_input
mock_console = Mock(wraps=Console(stderr=True))
mock_console.input = Mock(side_effect=mock_console_input)
return patch("jrnl.output._get_console", return_value=mock_console)
return {
"user_input": _mock_user_input,
"stdin_input": lambda: patch("sys.stdin.read", side_effect=stdin_input),
}
@fixture
def password_input(request):
password_input = get_fixture(request, "password", None)
if password_input is None:
password_input = Exception("Unexpected call for password input")
return password_input
@fixture
def stdin_input(request, is_tty):
stdin_input = get_fixture(request, "all_input", None)
if stdin_input is None or is_tty:
stdin_input = Exception("Unexpected call for stdin input")
else:
stdin_input = [stdin_input]
return stdin_input
@fixture
def is_tty(input_method):
assert input_method in ["", "enter", "pipe", "type"]
return input_method not in ["pipe", "type"]
@fixture
def config_on_disk(config_path):
return load_config(config_path)
@fixture
def config_in_memory():
return dict()
@fixture
def journal_name():
return None
@fixture
def which_output_stream():
return None
@fixture
def editor_input():
return None
@fixture
def num_args():
return None
@fixture
def parsed_output():
return {"lang": None, "obj": None}
@fixture
def editor_state():
return {
"command": "",
"intent": {"method": "r", "input": None},
"tmpfile": {"name": None, "content": None},
}
@fixture
def mock_editor(editor_state):
def _mock_editor(editor_command):
tmpfile = editor_command[-1]
editor_state["command"] = editor_command
editor_state["tmpfile"]["name"] = tmpfile
Path(tmpfile).touch()
with open(tmpfile, editor_state["intent"]["method"]) as f:
# Touch the file so jrnl knows it was edited
if editor_state["intent"]["input"] is not None:
f.write(editor_state["intent"]["input"])
file_content = f.read()
editor_state["tmpfile"]["content"] = file_content
return {"editor": lambda: patch("subprocess.call", side_effect=_mock_editor)}
| 8,540
|
Python
|
.py
| 256
| 27.652344
| 83
| 0.673554
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,559
|
type_builders.py
|
jrnl-org_jrnl/tests/lib/type_builders.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
from parse_type import TypeBuilder
should_choice = TypeBuilder.make_enum(
{
"should": True,
"should not": False,
}
)
| 239
|
Python
|
.py
| 9
| 22.666667
| 52
| 0.688596
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,560
|
when_steps.py
|
jrnl-org_jrnl/tests/lib/when_steps.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import os
from contextlib import ExitStack
from pytest_bdd import when
from pytest_bdd.parsers import parse
from pytest_bdd.parsers import re
# This is an undocumented and unsupported function:
# https://github.com/pytest-dev/pytest-bdd/issues/684
try:
from pytest_bdd.compat import inject_fixture # pytest_bdd 7.1.2 and later
except ImportError:
from pytest_bdd.steps import inject_fixture # pytest_bdd 7.1.1 and earlier
from jrnl.main import run
@when(parse('we change directory to "{directory_name}"'))
def when_we_change_directory(directory_name):
if not os.path.isdir(directory_name):
os.mkdir(directory_name)
os.chdir(directory_name)
# These variables are used in the `@when(re(...))` section below
command = '(?P<command>[^"]*)'
input_method = "(?P<input_method>enter|pipe|type)"
all_input = '("(?P<all_input>[^"]*)")'
# Note: A line with only a raw newline r'\n' is treated as
# an empty line of input internally for testing purposes.
@when(parse('we run "jrnl {command}" and {input_method}\n{all_input}'))
@when(re(f'we run "jrnl ?{command}" and {input_method} {all_input}'))
@when(re(f'we run "jrnl {command}"(?! and)'))
@when('we run "jrnl"')
def we_run_jrnl(capsys, keyring, request, command, input_method, all_input):
from keyring import set_keyring
set_keyring(keyring)
# fixture injection (pytest-bdd >=6.0)
inject_fixture(request, "command", command)
inject_fixture(request, "input_method", input_method)
inject_fixture(request, "all_input", all_input)
cli_run = request.getfixturevalue("cli_run")
with ExitStack() as stack:
mocks = cli_run["mocks"]
factories = cli_run["mock_factories"]
for id in factories:
mocks[id] = stack.enter_context(factories[id]())
try:
cli_run["status"] = run() or 0
except StopIteration:
# This happens when input is expected, but don't have any input left
pass
except SystemExit as e:
cli_run["status"] = e.code
captured = capsys.readouterr()
cli_run["stdout"] = captured.out
cli_run["stderr"] = captured.err
| 2,247
|
Python
|
.py
| 52
| 38.461538
| 80
| 0.690717
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,561
|
given_steps.py
|
jrnl-org_jrnl/tests/lib/given_steps.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import json
import os
import random
import shutil
import string
from datetime import datetime
from unittest.mock import MagicMock
from unittest.mock import patch
from xml.etree import ElementTree as ET
from pytest_bdd import given
from pytest_bdd.parsers import parse
from jrnl import __version__
from jrnl.time import __get_pdt_calendar
from tests.lib.fixtures import FailedKeyring
from tests.lib.fixtures import NoKeyring
from tests.lib.fixtures import TestKeyring
@given(parse("we {editor_method} to the editor if opened\n{editor_input}"))
@given(parse("we {editor_method} nothing to the editor if opened"))
def we_enter_editor(editor_method, editor_input, editor_state):
file_method = editor_state["intent"]["method"]
if editor_method == "write":
file_method = "w+"
elif editor_method == "append":
file_method = "a+"
else:
assert False, f"Method '{editor_method}' not supported"
editor_state["intent"] = {"method": file_method, "input": editor_input}
@given(parse('now is "{date_str}"'))
def now_is_str(date_str, mock_factories):
class DatetimeMagicMock(MagicMock):
# needed because jrnl does some reflection on datetime
def __instancecheck__(self, subclass):
return isinstance(subclass, datetime)
def mocked_now(tz=None):
now = datetime.strptime(date_str, "%Y-%m-%d %I:%M:%S %p")
if tz:
time_zone = datetime.utcnow().astimezone().tzinfo
now = now.replace(tzinfo=time_zone)
return now
# jrnl uses two different classes to parse dates, so both must be mocked
datetime_mock = DatetimeMagicMock(wraps=datetime)
datetime_mock.now.side_effect = mocked_now
pdt = __get_pdt_calendar()
calendar_mock = MagicMock(wraps=pdt)
calendar_mock.parse.side_effect = lambda date_str_input: pdt.parse(
date_str_input, mocked_now()
)
mock_factories["datetime"] = lambda: patch("datetime.datetime", new=datetime_mock)
mock_factories["calendar_parse"] = lambda: patch(
"jrnl.time.__get_pdt_calendar", return_value=calendar_mock
)
@given("we don't have a keyring", target_fixture="keyring")
def we_dont_have_keyring(keyring_type):
return NoKeyring()
@given("we have a keyring", target_fixture="keyring")
@given(parse("we have a {keyring_type} keyring"), target_fixture="keyring")
def we_have_type_of_keyring(keyring_type):
match keyring_type:
case "failed":
return FailedKeyring()
case _:
return TestKeyring()
@given(parse("we use no config"), target_fixture="config_path")
def we_use_no_config(temp_dir):
os.chdir(temp_dir.name) # @todo move this step to a more universal place
return os.path.join(temp_dir.name, "non_existing_config.yaml")
@given(parse('we use the config "{config_file}"'), target_fixture="config_path")
def we_use_the_config(request, temp_dir, working_dir, config_file):
# Move into temp dir as cwd
os.chdir(temp_dir.name) # @todo move this step to a more universal place
# Copy the config file over
config_source = os.path.join(working_dir, "data", "configs", config_file)
config_dest = os.path.join(temp_dir.name, config_file)
shutil.copy2(config_source, config_dest)
# @todo make this only copy some journals over
# Copy all of the journals over
journal_source = os.path.join(working_dir, "data", "journals")
journal_dest = os.path.join(temp_dir.name, "features", "journals")
shutil.copytree(journal_source, journal_dest)
# @todo maybe only copy needed templates over?
# Copy all of the templates over
template_source = os.path.join(working_dir, "data", "templates")
template_dest = os.path.join(temp_dir.name, "features", "templates")
shutil.copytree(template_source, template_dest)
# @todo get rid of this by using default config values
# merge in version number
if (
config_file.endswith("yaml")
and os.path.exists(config_dest)
and os.path.getsize(config_dest) > 0
):
# Add jrnl version to file for 2.x journals
with open(config_dest, "a") as cf:
cf.write("version: {}".format(__version__))
return config_dest
@given(
parse('we copy the template "{template_file}" to the default templates folder'),
target_fixture="default_templates_path",
)
def we_copy_the_template(request, temp_dir, working_dir, template_file):
# Move into temp dir as cwd
os.chdir(temp_dir.name) # @todo move this step to a more universal place
# Copy template over
template_source = os.path.join(working_dir, "data", "templates", template_file)
template_dest = os.path.join(temp_dir.name, "templates", template_file)
os.makedirs(os.path.dirname(template_dest), exist_ok=True)
shutil.copy2(template_source, template_dest)
return template_dest
@given(parse('the config "{config_file}" exists'), target_fixture="config_path")
def config_exists(config_file, temp_dir, working_dir):
config_source = os.path.join(working_dir, "data", "configs", config_file)
config_dest = os.path.join(temp_dir.name, config_file)
shutil.copy2(config_source, config_dest)
@given(parse('we use the password "{password}" if prompted'), target_fixture="password")
def use_password_forever(password):
return password
@given("we create a cache directory", target_fixture="cache_dir")
def create_cache_dir(temp_dir):
random_str = "".join(random.choices(string.ascii_uppercase + string.digits, k=20))
dir_path = os.path.join(temp_dir.name, "cache_" + random_str)
os.mkdir(dir_path)
return {"exists": True, "path": dir_path}
@given(parse("we parse the output as {language_name}"), target_fixture="parsed_output")
def parse_output_as_language(cli_run, language_name):
language_name = language_name.upper()
actual_output = cli_run["stdout"]
if language_name == "XML":
parsed_output = ET.fromstring(actual_output)
elif language_name == "JSON":
parsed_output = json.loads(actual_output)
else:
assert False, f"Language name {language_name} not recognized"
return {"lang": language_name, "obj": parsed_output}
@given(parse('the home directory is called "{home_dir}"'))
def home_directory(temp_dir, home_dir, monkeypatch):
home_path = os.path.join(temp_dir.name, home_dir)
monkeypatch.setenv("USERPROFILE", home_path) # for windows
monkeypatch.setenv("HOME", home_path) # for *nix
| 6,574
|
Python
|
.py
| 140
| 42.014286
| 88
| 0.704538
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,562
|
helpers.py
|
jrnl-org_jrnl/tests/lib/helpers.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import functools
import os
def does_directory_contain_files(file_list, directory_path):
if not os.path.isdir(directory_path):
return False
for file in file_list.split("\n"):
fullpath = directory_path + "/" + file
if not os.path.isfile(fullpath):
return False
return True
def does_directory_contain_n_files(directory_path, number):
count = 0
if not os.path.isdir(directory_path):
return False
files = [
f
for f in os.listdir(directory_path)
if os.path.isfile(os.path.join(directory_path, f))
]
count = len(files)
return int(number) == count
def assert_equal_tags_ignoring_order(
actual_line, expected_line, actual_content, expected_content
):
actual_tags = set(tag.strip() for tag in actual_line[len("tags: ") :].split(","))
expected_tags = set(
tag.strip() for tag in expected_line[len("tags: ") :].split(",")
)
assert actual_tags == expected_tags, [
[actual_tags, expected_tags],
[expected_content, actual_content],
]
# @see: https://stackoverflow.com/a/65782539/569146
def get_nested_val(dictionary, path, *default):
try:
return functools.reduce(lambda x, y: x[y], path.split("."), dictionary)
except KeyError:
if default:
return default[0]
raise
# @see: https://stackoverflow.com/a/41599695/569146
def spy_wrapper(wrapped_function):
from unittest import mock
mock = mock.MagicMock()
def wrapper(self, *args, **kwargs):
mock(*args, **kwargs)
return wrapped_function(self, *args, **kwargs)
wrapper.mock = mock
return wrapper
def get_fixture(request, name, default=None):
try:
return request.getfixturevalue(name)
except LookupError:
return default
| 1,921
|
Python
|
.py
| 56
| 28.375
| 85
| 0.660705
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,563
|
then_steps.py
|
jrnl-org_jrnl/tests/lib/then_steps.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import json
import os
import re
from xml.etree import ElementTree as ET
from pytest_bdd import then
from pytest_bdd.parsers import parse
from ruamel.yaml import YAML
from jrnl.config import scope_config
from tests.lib.helpers import assert_equal_tags_ignoring_order
from tests.lib.helpers import does_directory_contain_files
from tests.lib.helpers import does_directory_contain_n_files
from tests.lib.helpers import get_nested_val
from tests.lib.type_builders import should_choice
SHOULD_DICT = {"Should": should_choice}
@then("we should get no error")
def should_get_no_error(cli_run):
assert cli_run["status"] == 0, cli_run["status"]
@then("we should get an error")
def should_get_an_error(cli_run):
assert cli_run["status"] != 0, cli_run["status"]
@then(parse("the output should match\n{regex}"))
@then(parse('the output should match "{regex}"'))
def output_should_match(regex, cli_run):
out = cli_run["stdout"]
matches = re.findall(regex, out)
assert matches, f"\nRegex didn't match:\n{regex}\n{str(out)}\n{str(matches)}"
@then(parse("the output {it_should:Should} contain\n{expected}", SHOULD_DICT))
@then(parse('the output {it_should:Should} contain "{expected}"', SHOULD_DICT))
@then(
parse(
"the {which_output_stream} output {it_should:Should} contain\n{expected}",
SHOULD_DICT,
)
)
@then(
parse(
'the {which_output_stream} output {it_should:Should} contain "{expected}"',
SHOULD_DICT,
)
)
def output_should_contain(expected, which_output_stream, cli_run, it_should):
output_str = (
f"\nEXPECTED:\n{expected}\n\n"
f"ACTUAL STDOUT:\n{cli_run['stdout']}\n\n"
f"ACTUAL STDERR:\n{cli_run['stderr']}"
)
assert expected
if which_output_stream is None:
assert ((expected in cli_run["stdout"]) == it_should) or (
(expected in cli_run["stderr"]) == it_should
), output_str
elif which_output_stream == "standard":
assert (expected in cli_run["stdout"]) == it_should, output_str
elif which_output_stream == "error":
assert (expected in cli_run["stderr"]) == it_should, output_str
else:
assert (expected in cli_run[which_output_stream]) == it_should, output_str
@then(parse("the output should not contain\n{expected_output}"))
@then(parse('the output should not contain "{expected_output}"'))
def output_should_not_contain(expected_output, cli_run):
assert expected_output not in cli_run["stdout"]
@then(parse("the output should be\n{expected_output}"))
@then(parse('the output should be "{expected_output}"'))
def output_should_be(expected_output, cli_run):
actual = cli_run["stdout"].strip()
expected = expected_output.strip()
assert actual == expected
@then("the output should be empty")
def output_should_be_empty(cli_run):
actual = cli_run["stdout"].strip()
assert actual == ""
@then(parse('the output should contain the date "{date}"'))
def output_should_contain_date(date, cli_run):
assert date and date in cli_run["stdout"]
@then("the output should contain pyproject.toml version")
def output_should_contain_version(cli_run, toml_version):
out = cli_run["stdout"]
assert toml_version in out, toml_version
@then("the version in the config file should be up-to-date")
def config_file_version(config_on_disk, toml_version):
config_version = config_on_disk["version"]
assert config_version == toml_version
@then(parse("the output should be {width:d} columns wide"))
def output_should_be_columns_wide(cli_run, width):
out = cli_run["stdout"]
out_lines = out.splitlines()
for line in out_lines:
assert len(line) <= width
@then(
parse(
'the default journal "{journal_file}" '
'should be in the "{journal_dir}" directory'
)
)
def default_journal_location(journal_file, journal_dir, config_on_disk, temp_dir):
default_journal_path = config_on_disk["journals"]["default"]["journal"]
expected_journal_path = (
os.path.join(temp_dir.name, journal_file)
if journal_dir == "."
else os.path.join(temp_dir.name, journal_dir, journal_file)
)
# Use os.path.samefile here because both paths might not be fully expanded.
assert os.path.samefile(default_journal_path, expected_journal_path)
@then(
parse(
'the config for journal "{journal_name}" '
'{it_should:Should} contain "{some_yaml}"',
SHOULD_DICT,
)
)
@then(
parse(
'the config for journal "{journal_name}" '
"{it_should:Should} contain\n{some_yaml}",
SHOULD_DICT,
)
)
@then(parse('the config {it_should:Should} contain "{some_yaml}"', SHOULD_DICT))
@then(parse("the config {it_should:Should} contain\n{some_yaml}", SHOULD_DICT))
def config_var_on_disk(config_on_disk, journal_name, it_should, some_yaml):
actual = config_on_disk
if journal_name:
actual = actual["journals"][journal_name]
expected = YAML(typ="safe").load(some_yaml)
actual_slice = actual
if isinstance(actual, dict):
# `expected` objects formatted in yaml only compare one level deep
actual_slice = {key: actual.get(key) for key in expected.keys()}
assert (expected == actual_slice) == it_should
@then(
parse(
'the config in memory for journal "{journal_name}" '
'{it_should:Should} contain "{some_yaml}"',
SHOULD_DICT,
)
)
@then(
parse(
'the config in memory for journal "{journal_name}" '
"{it_should:Should} contain\n{some_yaml}",
SHOULD_DICT,
)
)
@then(
parse('the config in memory {it_should:Should} contain "{some_yaml}"', SHOULD_DICT)
)
@then(
parse("the config in memory {it_should:Should} contain\n{some_yaml}", SHOULD_DICT)
)
def config_var_in_memory(config_in_memory, journal_name, it_should, some_yaml):
actual = config_in_memory["overrides"]
if journal_name:
actual = actual["journals"][journal_name]
expected = YAML(typ="safe").load(some_yaml)
actual_slice = actual
if isinstance(actual, dict):
# `expected` objects formatted in yaml only compare one level deep
actual_slice = {key: get_nested_val(actual, key) for key in expected.keys()}
assert (expected == actual_slice) == it_should
@then("we should be prompted for a password")
def password_was_called(cli_run):
assert cli_run["mocks"]["user_input"].return_value.input.called
@then("we should not be prompted for a password")
def password_was_not_called(cli_run):
assert not cli_run["mocks"]["user_input"].return_value.input.called
@then(parse("the cache directory should contain the files\n{file_list}"))
def assert_dir_contains_files(file_list, cache_dir):
assert does_directory_contain_files(file_list, cache_dir["path"])
@then(parse("the cache directory should contain {number} files"))
def assert_dir_contains_n_files(cache_dir, number):
assert does_directory_contain_n_files(cache_dir["path"], number)
@then(parse("the journal directory should contain\n{file_list}"))
def journal_directory_should_contain(config_on_disk, file_list):
scoped_config = scope_config(config_on_disk, "default")
assert does_directory_contain_files(file_list, scoped_config["journal"])
@then(parse('journal "{journal_name}" should not exist'))
def journal_directory_should_not_exist(config_on_disk, journal_name):
scoped_config = scope_config(config_on_disk, journal_name)
assert not does_directory_contain_files(
scoped_config["journal"], "."
), f'Journal "{journal_name}" does exist'
@then(parse("the journal {it_should:Should} exist", SHOULD_DICT))
def journal_should_not_exist(config_on_disk, it_should):
scoped_config = scope_config(config_on_disk, "default")
expected_path = scoped_config["journal"]
contains_files = does_directory_contain_files(expected_path, ".")
assert contains_files == it_should
@then(
parse(
'the journal "{journal_name}" directory {it_should:Should} exist', SHOULD_DICT
)
)
def directory_should_not_exist(config_on_disk, it_should, journal_name):
scoped_config = scope_config(config_on_disk, journal_name)
expected_path = scoped_config["journal"]
dir_exists = os.path.isdir(expected_path)
assert dir_exists == it_should
@then(parse('the content of file "{file_path}" in the cache should be\n{file_content}'))
def content_of_file_should_be(file_path, file_content, cache_dir):
assert cache_dir["exists"]
expected_content = file_content.strip().splitlines()
with open(os.path.join(cache_dir["path"], file_path), "r") as f:
actual_content = f.read().strip().splitlines()
for actual_line, expected_line in zip(actual_content, expected_content):
if actual_line.startswith("tags: ") and expected_line.startswith("tags: "):
assert_equal_tags_ignoring_order(
actual_line, expected_line, actual_content, expected_content
)
else:
assert actual_line.strip() == expected_line.strip(), [
[actual_line.strip(), expected_line.strip()],
[actual_content, expected_content],
]
@then(parse("the cache should contain the files\n{file_list}"))
def cache_dir_contains_files(file_list, cache_dir):
assert cache_dir["exists"]
actual_files = os.listdir(cache_dir["path"])
expected_files = file_list.split("\n")
# sort to deal with inconsistent default file ordering on different OS's
actual_files.sort()
expected_files.sort()
assert actual_files == expected_files, [actual_files, expected_files]
@then(parse("the output should be valid {language_name}"))
def assert_output_is_valid_language(cli_run, language_name):
language_name = language_name.upper()
if language_name == "XML":
xml_tree = ET.fromstring(cli_run["stdout"])
assert xml_tree, "Invalid XML"
elif language_name == "JSON":
assert json.loads(cli_run["stdout"]), "Invalid JSON"
else:
assert False, f"Language name {language_name} not recognized"
@then(parse('"{node_name}" in the parsed output should have {number:d} elements'))
def assert_parsed_output_item_count(node_name, number, parsed_output):
lang = parsed_output["lang"]
obj = parsed_output["obj"]
if lang == "XML":
xml_node_names = (node.tag for node in obj)
assert node_name in xml_node_names, str(list(xml_node_names))
actual_entry_count = len(obj.find(node_name))
assert actual_entry_count == number, actual_entry_count
elif lang == "JSON":
my_obj = obj
for node in node_name.split("."):
try:
my_obj = my_obj[int(node)]
except ValueError:
assert node in my_obj
my_obj = my_obj[node]
assert len(my_obj) == number, len(my_obj)
else:
assert False, f"Language name {lang} not recognized"
@then(parse('"{field_name}" in the parsed output should {comparison}\n{expected_keys}'))
def assert_output_field_content(field_name, comparison, expected_keys, parsed_output):
lang = parsed_output["lang"]
obj = parsed_output["obj"]
expected_keys = expected_keys.split("\n")
if len(expected_keys) == 1:
expected_keys = expected_keys[0]
if lang == "XML":
xml_node_names = (node.tag for node in obj)
assert field_name in xml_node_names, str(list(xml_node_names))
if field_name == "tags":
actual_tags = set(t.attrib["name"] for t in obj.find("tags"))
assert set(actual_tags) == set(expected_keys), [
actual_tags,
set(expected_keys),
]
else:
assert False, "This test only works for tags in XML"
elif lang == "JSON":
my_obj = obj
for node in field_name.split("."):
try:
my_obj = my_obj[int(node)]
except ValueError:
assert node in my_obj, [my_obj.keys(), node]
my_obj = my_obj[node]
if comparison == "be":
if isinstance(my_obj, str):
assert expected_keys == my_obj, [my_obj, expected_keys]
else:
assert set(expected_keys) == set(my_obj), [
set(my_obj),
set(expected_keys),
]
elif comparison == "contain":
if isinstance(my_obj, str):
assert expected_keys in my_obj, [my_obj, expected_keys]
else:
assert all(elem in my_obj for elem in expected_keys), [
my_obj,
expected_keys,
]
else:
assert False, f"Language name {lang} not recognized"
@then(parse('there should be {number:d} "{item}" elements'))
def count_elements(number, item, cli_run):
actual_output = cli_run["stdout"]
xml_tree = ET.fromstring(actual_output)
assert len(xml_tree.findall(".//" + item)) == number
@then(parse("the editor {it_should:Should} have been called", SHOULD_DICT))
@then(
parse(
"the editor {it_should:Should} have been called with {num_args} arguments",
SHOULD_DICT,
)
)
def count_editor_args(num_args, cli_run, editor_state, it_should):
assert cli_run["mocks"]["editor"].called == it_should
if isinstance(num_args, int):
assert len(editor_state["command"]) == int(num_args)
@then(parse("the stdin prompt {it_should:Should} have been called", SHOULD_DICT))
def stdin_prompt_called(cli_run, it_should):
assert cli_run["mocks"]["stdin_input"].called == it_should
@then(parse('the editor filename should end with "{suffix}"'))
def editor_filename_suffix(suffix, editor_state):
editor_filename = editor_state["tmpfile"]["name"]
assert editor_state["tmpfile"]["name"].endswith(suffix), (editor_filename, suffix)
@then(parse('the editor file content should {comparison} "{str_value}"'))
@then(parse("the editor file content should {comparison} empty"))
@then(parse("the editor file content should {comparison}\n{str_value}"))
def contains_editor_file(comparison, str_value, editor_state):
content = editor_state["tmpfile"]["content"]
# content = f'\n"""\n{content}\n"""\n'
if comparison == "be":
assert content == str_value
elif comparison == "contain":
assert str_value in content
else:
assert False, f"Comparison '{comparison}' not supported"
| 14,565
|
Python
|
.py
| 333
| 37.666667
| 88
| 0.666195
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,564
|
test_features.py
|
jrnl-org_jrnl/tests/bdd/test_features.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
from pytest_bdd import scenarios
scenarios("features/actions.feature")
scenarios("features/build.feature")
scenarios("features/config_file.feature")
scenarios("features/core.feature")
scenarios("features/datetime.feature")
scenarios("features/delete.feature")
scenarios("features/change_time.feature")
scenarios("features/encrypt.feature")
scenarios("features/file_storage.feature")
scenarios("features/format.feature")
scenarios("features/import.feature")
scenarios("features/install.feature")
scenarios("features/multiple_journals.feature")
scenarios("features/override.feature")
scenarios("features/password.feature")
scenarios("features/search.feature")
scenarios("features/star.feature")
scenarios("features/tag.feature")
scenarios("features/template.feature")
scenarios("features/upgrade.feature")
scenarios("features/write.feature")
| 938
|
Python
|
.py
| 24
| 38
| 52
| 0.828947
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,565
|
allow_all_python_version.py
|
jrnl-org_jrnl/.build/allow_all_python_version.py
|
import toml
pyproject = toml.load("pyproject.toml")
pyproject["tool"]["poetry"]["dependencies"]["python"] = "*"
with open("pyproject.toml", "w") as toml_file:
toml.dump(pyproject, toml_file)
| 198
|
Python
|
.py
| 5
| 37.2
| 59
| 0.705263
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,566
|
__main__.py
|
jrnl-org_jrnl/jrnl/__main__.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import sys
from jrnl.main import run
if __name__ == "__main__":
sys.exit(run())
| 183
|
Python
|
.py
| 6
| 28.333333
| 52
| 0.701149
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,567
|
path.py
|
jrnl-org_jrnl/jrnl/path.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import os.path
from pathlib import Path
import xdg.BaseDirectory
from jrnl.exception import JrnlException
from jrnl.messages import Message
from jrnl.messages import MsgStyle
from jrnl.messages import MsgText
# Constants
XDG_RESOURCE = "jrnl"
DEFAULT_CONFIG_NAME = "jrnl.yaml"
DEFAULT_JOURNAL_NAME = "journal.txt"
def home_dir() -> str:
return os.path.expanduser("~")
def expand_path(path: str) -> str:
return os.path.expanduser(os.path.expandvars(path))
def absolute_path(path: str) -> str:
return os.path.abspath(expand_path(path))
def get_default_journal_path() -> str:
journal_data_path = xdg.BaseDirectory.save_data_path(XDG_RESOURCE) or home_dir()
return os.path.join(journal_data_path, DEFAULT_JOURNAL_NAME)
def get_templates_path() -> str:
"""
Get the path to the XDG templates directory. Creates the directory if it
doesn't exist.
"""
# jrnl_xdg_resource_path is created by save_data_path if it does not exist
jrnl_xdg_resource_path = Path(xdg.BaseDirectory.save_data_path(XDG_RESOURCE))
jrnl_templates_path = jrnl_xdg_resource_path / "templates"
# Create the directory if needed.
jrnl_templates_path.mkdir(exist_ok=True)
return str(jrnl_templates_path)
def get_config_directory() -> str:
try:
return xdg.BaseDirectory.save_config_path(XDG_RESOURCE)
except FileExistsError:
raise JrnlException(
Message(
MsgText.ConfigDirectoryIsFile,
MsgStyle.ERROR,
{
"config_directory_path": os.path.join(
xdg.BaseDirectory.xdg_config_home, XDG_RESOURCE
)
},
),
)
def get_config_path() -> str:
try:
config_directory_path = get_config_directory()
except JrnlException:
return os.path.join(home_dir(), DEFAULT_CONFIG_NAME)
return os.path.join(config_directory_path, DEFAULT_CONFIG_NAME)
| 2,066
|
Python
|
.py
| 54
| 31.888889
| 84
| 0.685557
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,568
|
config.py
|
jrnl-org_jrnl/jrnl/config.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import argparse
import logging
import os
from typing import Any
from typing import Callable
import colorama
from rich.pretty import pretty_repr
from ruamel.yaml import YAML
from ruamel.yaml import constructor
from jrnl import __version__
from jrnl.exception import JrnlException
from jrnl.messages import Message
from jrnl.messages import MsgStyle
from jrnl.messages import MsgText
from jrnl.output import list_journals
from jrnl.output import print_msg
from jrnl.path import get_config_path
from jrnl.path import get_default_journal_path
# Constants
DEFAULT_JOURNAL_KEY = "default"
YAML_SEPARATOR = ": "
YAML_FILE_ENCODING = "utf-8"
def make_yaml_valid_dict(input: list) -> dict:
"""
Convert a two-element list of configuration key-value pair into a flat dict.
The dict is created through the yaml loader, with the assumption that
"input[0]: input[1]" is valid yaml.
:param input: list of configuration keys in dot-notation and their respective values
:type input: list
:return: A single level dict of the configuration keys in dot-notation and their
respective desired values
:rtype: dict
"""
assert len(input) == 2
# yaml compatible strings are of the form Key:Value
yamlstr = YAML_SEPARATOR.join(input)
runtime_modifications = YAML(typ="safe").load(yamlstr)
return runtime_modifications
def save_config(config: dict, alt_config_path: str | None = None) -> None:
"""Supply alt_config_path if using an alternate config through --config-file."""
config["version"] = __version__
yaml = YAML(typ="safe")
yaml.default_flow_style = False # prevents collapsing of tree structure
with open(
alt_config_path if alt_config_path else get_config_path(),
"w",
encoding=YAML_FILE_ENCODING,
) as f:
yaml.dump(config, f)
def get_default_config() -> dict[str, Any]:
return {
"version": __version__,
"journals": {"default": {"journal": get_default_journal_path()}},
"editor": os.getenv("VISUAL") or os.getenv("EDITOR") or "",
"encrypt": False,
"template": False,
"default_hour": 9,
"default_minute": 0,
"timeformat": "%F %r",
"tagsymbols": "#@",
"highlight": True,
"linewrap": 79,
"indent_character": "|",
"colors": {
"body": "none",
"date": "none",
"tags": "none",
"title": "none",
},
}
def get_default_colors() -> dict[str, Any]:
return {
"body": "none",
"date": "black",
"tags": "yellow",
"title": "cyan",
}
def scope_config(config: dict, journal_name: str) -> dict:
if journal_name not in config["journals"]:
return config
config = config.copy()
journal_conf = config["journals"].get(journal_name)
if isinstance(journal_conf, dict):
# We can override the default config on a by-journal basis
logging.debug(
"Updating configuration with specific journal overrides:\n%s",
pretty_repr(journal_conf),
)
config.update(journal_conf)
else:
# But also just give them a string to point to the journal file
config["journal"] = journal_conf
logging.debug("Scoped config:\n%s", pretty_repr(config))
return config
def verify_config_colors(config: dict) -> bool:
"""
Ensures the keys set for colors are valid colorama.Fore attributes, or "None"
:return: True if all keys are set correctly, False otherwise
"""
all_valid_colors = True
for key, color in config["colors"].items():
upper_color = color.upper()
if upper_color == "NONE":
continue
if not getattr(colorama.Fore, upper_color, None):
print_msg(
Message(
MsgText.InvalidColor,
MsgStyle.NORMAL,
{
"key": key,
"color": color,
},
)
)
all_valid_colors = False
return all_valid_colors
def load_config(config_path: str) -> dict:
"""Tries to load a config file from YAML."""
try:
with open(config_path, encoding=YAML_FILE_ENCODING) as f:
yaml = YAML(typ="safe")
yaml.allow_duplicate_keys = False
return yaml.load(f)
except constructor.DuplicateKeyError as e:
print_msg(
Message(
MsgText.ConfigDoubleKeys,
MsgStyle.WARNING,
{
"error_message": e,
},
)
)
with open(config_path, encoding=YAML_FILE_ENCODING) as f:
yaml = YAML(typ="safe")
yaml.allow_duplicate_keys = True
return yaml.load(f)
def is_config_json(config_path: str) -> bool:
with open(config_path, "r", encoding="utf-8") as f:
config_file = f.read()
return config_file.strip().startswith("{")
def update_config(
config: dict, new_config: dict, scope: str | None, force_local: bool = False
) -> None:
"""Updates a config dict with new values - either global if scope is None
or config['journals'][scope] is just a string pointing to a journal file,
or within the scope"""
if scope and isinstance(config["journals"][scope], dict):
config["journals"][scope].update(new_config)
elif scope and force_local: # Convert to dict
config["journals"][scope] = {"journal": config["journals"][scope]}
config["journals"][scope].update(new_config)
else:
config.update(new_config)
def get_journal_name(args: argparse.Namespace, config: dict) -> argparse.Namespace:
args.journal_name = DEFAULT_JOURNAL_KEY
# The first arg might be a journal name
if args.text:
potential_journal_name = args.text[0]
if potential_journal_name[-1] == ":":
potential_journal_name = potential_journal_name[0:-1]
if potential_journal_name in config["journals"]:
args.journal_name = potential_journal_name
args.text = args.text[1:]
logging.debug("Using journal name: %s", args.journal_name)
return args
def cmd_requires_valid_journal_name(func: Callable) -> Callable:
def wrapper(args: argparse.Namespace, config: dict, original_config: dict):
validate_journal_name(args.journal_name, config)
func(args=args, config=config, original_config=original_config)
return wrapper
def validate_journal_name(journal_name: str, config: dict) -> None:
if journal_name not in config["journals"]:
raise JrnlException(
Message(
MsgText.NoNamedJournal,
MsgStyle.ERROR,
{
"journal_name": journal_name,
"journals": list_journals(config),
},
),
)
| 7,057
|
Python
|
.py
| 186
| 30
| 88
| 0.623169
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,569
|
exception.py
|
jrnl-org_jrnl/jrnl/exception.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
from typing import TYPE_CHECKING
from jrnl.output import print_msg
if TYPE_CHECKING:
from jrnl.messages import Message
from jrnl.messages import MsgText
class JrnlException(Exception):
"""Common exceptions raised by jrnl."""
def __init__(self, *messages: "Message"):
self.messages = messages
def print(self) -> None:
for msg in self.messages:
print_msg(msg)
def has_message_text(self, message_text: "MsgText"):
return any([m.text == message_text for m in self.messages])
| 635
|
Python
|
.py
| 16
| 34.4375
| 67
| 0.703764
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,570
|
prompt.py
|
jrnl-org_jrnl/jrnl/prompt.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
from jrnl.messages import Message
from jrnl.messages import MsgStyle
from jrnl.messages import MsgText
from jrnl.output import print_msg
from jrnl.output import print_msgs
def create_password(journal_name: str) -> str:
kwargs = {
"get_input": True,
"hide_input": True,
}
while True:
pw = print_msg(
Message(
MsgText.PasswordFirstEntry,
MsgStyle.PROMPT,
params={"journal_name": journal_name},
),
**kwargs
)
if not pw:
print_msg(Message(MsgText.PasswordCanNotBeEmpty, MsgStyle.WARNING))
continue
elif pw == print_msg(
Message(MsgText.PasswordConfirmEntry, MsgStyle.PROMPT), **kwargs
):
break
print_msg(Message(MsgText.PasswordDidNotMatch, MsgStyle.ERROR))
if yesno(Message(MsgText.PasswordStoreInKeychain), default=True):
from jrnl.keyring import set_keyring_password
set_keyring_password(pw, journal_name)
return pw
def prompt_password(first_try: bool = True) -> str:
if not first_try:
print_msg(Message(MsgText.WrongPasswordTryAgain, MsgStyle.WARNING))
return (
print_msg(
Message(MsgText.Password, MsgStyle.PROMPT),
get_input=True,
hide_input=True,
)
or ""
)
def yesno(prompt: Message | str, default: bool = True) -> bool:
response = print_msgs(
[
prompt,
Message(
MsgText.YesOrNoPromptDefaultYes
if default
else MsgText.YesOrNoPromptDefaultNo
),
],
style=MsgStyle.PROMPT,
delimiter=" ",
get_input=True,
)
answers = {
str(MsgText.OneCharacterYes): True,
str(MsgText.OneCharacterNo): False,
}
# Does using `lower()` work in all languages?
return answers.get(str(response).lower().strip(), default)
| 2,086
|
Python
|
.py
| 64
| 23.90625
| 79
| 0.610169
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,571
|
os_compat.py
|
jrnl-org_jrnl/jrnl/os_compat.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import shlex
from sys import platform
def on_windows() -> bool:
return "win32" in platform
def on_posix() -> bool:
return not on_windows()
def split_args(args: str) -> list[str]:
"""Split arguments and add escape characters as appropriate for the OS"""
return shlex.split(args, posix=on_posix())
| 415
|
Python
|
.py
| 11
| 34.636364
| 77
| 0.725441
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,572
|
controller.py
|
jrnl-org_jrnl/jrnl/controller.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import logging
import sys
from typing import TYPE_CHECKING
from jrnl import install
from jrnl import plugins
from jrnl import time
from jrnl.config import DEFAULT_JOURNAL_KEY
from jrnl.config import get_config_path
from jrnl.config import get_journal_name
from jrnl.config import scope_config
from jrnl.editor import get_text_from_editor
from jrnl.editor import get_text_from_stdin
from jrnl.editor import read_template_file
from jrnl.exception import JrnlException
from jrnl.journals import open_journal
from jrnl.messages import Message
from jrnl.messages import MsgStyle
from jrnl.messages import MsgText
from jrnl.output import print_msg
from jrnl.output import print_msgs
from jrnl.override import apply_overrides
if TYPE_CHECKING:
from argparse import Namespace
from jrnl.journals import Entry
from jrnl.journals import Journal
def run(args: "Namespace"):
"""
Flow:
1. Run standalone command if it doesn't need config (help, version, etc), then exit
2. Load config
3. Run standalone command if it does need config (encrypt, decrypt, etc), then exit
4. Load specified journal
5. Start append mode, or search mode
6. Perform actions with results from search mode (if needed)
7. Profit
"""
# Run command if possible before config is available
if callable(args.preconfig_cmd):
return args.preconfig_cmd(args)
# Load the config, and extract journal name
config = install.load_or_install_jrnl(args.config_file_path)
original_config = config.copy()
# Apply config overrides
config = apply_overrides(args, config)
args = get_journal_name(args, config)
config = scope_config(config, args.journal_name)
# Run post-config command now that config is ready
if callable(args.postconfig_cmd):
return args.postconfig_cmd(
args=args, config=config, original_config=original_config
)
# --- All the standalone commands are now done --- #
# Get the journal we're going to be working with
journal = open_journal(args.journal_name, config)
kwargs = {
"args": args,
"config": config,
"journal": journal,
"old_entries": journal.entries,
}
if _is_append_mode(**kwargs):
append_mode(**kwargs)
return
# If not append mode, then we're in search mode (only 2 modes exist)
search_mode(**kwargs)
entries_found_count = len(journal)
_print_entries_found_count(entries_found_count, args)
# Actions
_perform_actions_on_search_results(**kwargs)
if entries_found_count != 0 and _has_action_args(args):
_print_changed_counts(journal)
else:
# display only occurs if no other action occurs
_display_search_results(**kwargs)
def _perform_actions_on_search_results(**kwargs):
args = kwargs["args"]
# Perform actions (if needed)
if args.change_time:
_change_time_search_results(**kwargs)
if args.delete:
_delete_search_results(**kwargs)
# open results in editor (if `--edit` was used)
if args.edit:
_edit_search_results(**kwargs)
def _is_append_mode(args: "Namespace", config: dict, **kwargs) -> bool:
"""Determines if we are in append mode (as opposed to search mode)"""
# Are any search filters present? If so, then search mode.
append_mode = (
not _has_search_args(args)
and not _has_action_args(args)
and not _has_display_args(args)
)
# Might be writing and want to move to editor part of the way through
if args.edit and args.text:
append_mode = True
# If the text is entirely tags, then we are also searching (not writing)
if append_mode and args.text and _has_only_tags(config["tagsymbols"], args.text):
append_mode = False
return append_mode
def append_mode(args: "Namespace", config: dict, journal: "Journal", **kwargs) -> None:
"""
Gets input from the user to write to the journal
0. Check for a template passed as an argument, or in the global config
1. Check for input from cli
2. Check input being piped in
3. Open editor if configured (prepopulated with template if available)
4. Use stdin.read as last resort
6. Write any found text to journal, or exit
"""
logging.debug("Append mode: starting")
template_text = _get_template(args, config)
if args.text:
logging.debug(f"Append mode: cli text detected: {args.text}")
raw = " ".join(args.text).strip()
if args.edit:
raw = _write_in_editor(config, raw)
elif not sys.stdin.isatty():
logging.debug("Append mode: receiving piped text")
raw = sys.stdin.read()
else:
raw = _write_in_editor(config, template_text)
if template_text is not None and raw == template_text:
logging.error("Append mode: raw text was the same as the template")
raise JrnlException(Message(MsgText.NoChangesToTemplate, MsgStyle.NORMAL))
if not raw or raw.isspace():
logging.error("Append mode: couldn't get raw text or entry was empty")
raise JrnlException(Message(MsgText.NoTextReceived, MsgStyle.NORMAL))
logging.debug(
f"Append mode: appending raw text to journal '{args.journal_name}': {raw}"
)
journal.new_entry(raw)
if args.journal_name != DEFAULT_JOURNAL_KEY:
print_msg(
Message(
MsgText.JournalEntryAdded,
MsgStyle.NORMAL,
{"journal_name": args.journal_name},
)
)
journal.write()
logging.debug("Append mode: completed journal.write()")
def _get_template(args, config) -> str:
# Read template file and pass as raw text into the composer
logging.debug(
"Get template:\n"
f"--template: {args.template}\n"
f"from config: {config.get('template')}"
)
template_path = args.template or config.get("template")
template_text = None
if template_path:
template_text = read_template_file(template_path)
return template_text
def search_mode(args: "Namespace", journal: "Journal", **kwargs) -> None:
"""
Search for entries in a journal, and return the
results. If no search args, then return all results
"""
logging.debug("Search mode: starting")
# If no search args, then return all results (don't filter anything)
if not _has_search_args(args) and not _has_display_args(args) and not args.text:
logging.debug("Search mode: has no search args")
return
logging.debug("Search mode: has search args")
_filter_journal_entries(args, journal)
def _write_in_editor(config: dict, prepopulated_text: str | None = None) -> str:
if config["editor"]:
logging.debug("Append mode: opening editor")
raw = get_text_from_editor(config, prepopulated_text)
else:
raw = get_text_from_stdin()
return raw
def _filter_journal_entries(args: "Namespace", journal: "Journal", **kwargs) -> None:
"""Filter journal entries in-place based upon search args"""
if args.on_date:
args.start_date = args.end_date = args.on_date
if args.today_in_history:
now = time.parse("now")
args.day = now.day
args.month = now.month
journal.filter(
tags=args.text,
month=args.month,
day=args.day,
year=args.year,
start_date=args.start_date,
end_date=args.end_date,
strict=args.strict,
starred=args.starred,
tagged=args.tagged,
exclude=args.excluded,
exclude_starred=args.exclude_starred,
exclude_tagged=args.exclude_tagged,
contains=args.contains,
)
journal.limit(args.limit)
def _print_entries_found_count(count: int, args: "Namespace") -> None:
logging.debug(f"count: {count}")
if count == 0:
if args.edit or args.change_time:
print_msg(Message(MsgText.NothingToModify, MsgStyle.WARNING))
elif args.delete:
print_msg(Message(MsgText.NothingToDelete, MsgStyle.WARNING))
else:
print_msg(Message(MsgText.NoEntriesFound, MsgStyle.NORMAL))
return
elif args.limit and args.limit == count:
# Don't show count if the user expects a limited number of results
logging.debug("args.limit is true-ish")
return
logging.debug("Printing general summary")
my_msg = (
MsgText.EntryFoundCountSingular if count == 1 else MsgText.EntryFoundCountPlural
)
print_msg(Message(my_msg, MsgStyle.NORMAL, {"num": count}))
def _other_entries(journal: "Journal", entries: list["Entry"]) -> list["Entry"]:
"""Find entries that are not in journal"""
return [e for e in entries if e not in journal.entries]
def _edit_search_results(
config: dict, journal: "Journal", old_entries: list["Entry"], **kwargs
) -> None:
"""
1. Send the given journal entries to the user-configured editor
2. Print out stats on any modifications to journal
3. Write modifications to journal
"""
if not config["editor"]:
raise JrnlException(
Message(
MsgText.EditorNotConfigured,
MsgStyle.ERROR,
{"config_file": get_config_path()},
)
)
# separate entries we are not editing
other_entries = _other_entries(journal, old_entries)
# Send user to the editor
try:
edited = get_text_from_editor(config, journal.editable_str())
except JrnlException as e:
if e.has_message_text(MsgText.NoTextReceived):
raise JrnlException(
Message(MsgText.NoEditsReceivedJournalNotDeleted, MsgStyle.WARNING)
)
else:
raise e
journal.parse_editable_str(edited)
# Put back entries we separated earlier, sort, and write the journal
journal.entries += other_entries
journal.sort()
journal.write()
def _print_changed_counts(journal: "Journal", **kwargs) -> None:
stats = journal.get_change_counts()
msgs = []
if stats["added"] > 0:
my_msg = (
MsgText.JournalCountAddedSingular
if stats["added"] == 1
else MsgText.JournalCountAddedPlural
)
msgs.append(Message(my_msg, MsgStyle.NORMAL, {"num": stats["added"]}))
if stats["deleted"] > 0:
my_msg = (
MsgText.JournalCountDeletedSingular
if stats["deleted"] == 1
else MsgText.JournalCountDeletedPlural
)
msgs.append(Message(my_msg, MsgStyle.NORMAL, {"num": stats["deleted"]}))
if stats["modified"] > 0:
my_msg = (
MsgText.JournalCountModifiedSingular
if stats["modified"] == 1
else MsgText.JournalCountModifiedPlural
)
msgs.append(Message(my_msg, MsgStyle.NORMAL, {"num": stats["modified"]}))
if not msgs:
msgs.append(Message(MsgText.NoEditsReceived, MsgStyle.NORMAL))
print_msgs(msgs)
def _get_predit_stats(journal: "Journal") -> dict[str, int]:
return {"count": len(journal)}
def _delete_search_results(
journal: "Journal", old_entries: list["Entry"], **kwargs
) -> None:
entries_to_delete = journal.prompt_action_entries(MsgText.DeleteEntryQuestion)
journal.entries = old_entries
if entries_to_delete:
journal.delete_entries(entries_to_delete)
journal.write()
def _change_time_search_results(
args: "Namespace",
journal: "Journal",
old_entries: list["Entry"],
no_prompt: bool = False,
**kwargs,
) -> None:
# separate entries we are not editing
# @todo if there's only 1, don't prompt
entries_to_change = journal.prompt_action_entries(MsgText.ChangeTimeEntryQuestion)
if entries_to_change:
date = time.parse(args.change_time)
journal.entries = old_entries
journal.change_date_entries(date, entries_to_change)
journal.write()
def _display_search_results(args: "Namespace", journal: "Journal", **kwargs) -> None:
if len(journal) == 0:
return
# Get export format from config file if not provided at the command line
args.export = args.export or kwargs["config"].get("display_format")
if args.tags:
print(plugins.get_exporter("tags").export(journal))
elif args.short or args.export == "short":
print(journal.pprint(short=True))
elif args.export == "pretty":
print(journal.pprint())
elif args.export:
exporter = plugins.get_exporter(args.export)
print(exporter.export(journal, args.filename))
else:
print(journal.pprint())
def _has_search_args(args: "Namespace") -> bool:
"""Looking for arguments that filter a journal"""
return any(
(
args.contains,
args.tagged,
args.excluded,
args.exclude_starred,
args.exclude_tagged,
args.end_date,
args.today_in_history,
args.month,
args.day,
args.year,
args.limit,
args.on_date,
args.starred,
args.start_date,
args.strict, # -and
)
)
def _has_action_args(args: "Namespace") -> bool:
return any(
(
args.change_time,
args.delete,
args.edit,
)
)
def _has_display_args(args: "Namespace") -> bool:
return any(
(
args.tags,
args.short,
args.export, # --format
)
)
def _has_only_tags(tag_symbols: str, args_text: str) -> bool:
return all(word[0] in tag_symbols for word in " ".join(args_text).split())
| 13,799
|
Python
|
.py
| 360
| 31.3
| 88
| 0.656325
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,573
|
output.py
|
jrnl-org_jrnl/jrnl/output.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import textwrap
from typing import Callable
from rich.console import Console
from rich.text import Text
from jrnl.messages import Message
from jrnl.messages import MsgStyle
from jrnl.messages import MsgText
def deprecated_cmd(
old_cmd: str, new_cmd: str, callback: Callable | None = None, **kwargs
) -> None:
print_msg(
Message(
MsgText.DeprecatedCommand,
MsgStyle.WARNING,
{"old_cmd": old_cmd, "new_cmd": new_cmd},
)
)
if callback is not None:
callback(**kwargs)
def journal_list_to_json(journal_list: dict) -> str:
import json
return json.dumps(journal_list)
def journal_list_to_yaml(journal_list: dict) -> str:
from io import StringIO
from ruamel.yaml import YAML
output = StringIO()
dumper = YAML()
dumper.width = 1000
dumper.dump(journal_list, output)
return output.getvalue()
def journal_list_to_stdout(journal_list: dict) -> str:
result = f"Journals defined in config ({journal_list['config_path']})\n"
ml = min(max(len(k) for k in journal_list["journals"]), 20)
for journal, cfg in journal_list["journals"].items():
result += " * {:{}} -> {}\n".format(
journal, ml, cfg["journal"] if isinstance(cfg, dict) else cfg
)
return result
def list_journals(configuration: dict, format: str | None = None) -> str:
from jrnl import config
"""List the journals specified in the configuration file"""
journal_list = {
"config_path": config.get_config_path(),
"journals": configuration["journals"],
}
if format == "json":
return journal_list_to_json(journal_list)
elif format == "yaml":
return journal_list_to_yaml(journal_list)
else:
return journal_list_to_stdout(journal_list)
def print_msg(msg: Message, **kwargs) -> str | None:
"""Helper function to print a single message"""
kwargs["style"] = msg.style
return print_msgs([msg], **kwargs)
def print_msgs(
msgs: list[Message],
delimiter: str = "\n",
style: MsgStyle = MsgStyle.NORMAL,
get_input: bool = False,
hide_input: bool = False,
) -> str | None:
# Same as print_msg, but for a list
text = Text("", end="")
kwargs = style.decoration.args
for i, msg in enumerate(msgs):
kwargs = _add_extra_style_args_if_needed(kwargs, msg=msg)
m = format_msg_text(msg)
if i != len(msgs) - 1:
m.append(delimiter)
text.append(m)
if style.append_space:
text.append(" ")
decorated_text = style.decoration.callback(text, **kwargs)
# Always print messages to stderr
console = _get_console(stderr=True)
if get_input:
return str(console.input(prompt=decorated_text, password=hide_input))
console.print(decorated_text, new_line_start=style.prepend_newline)
def _get_console(stderr: bool = True) -> Console:
return Console(stderr=stderr)
def _add_extra_style_args_if_needed(args: dict, msg: Message):
args["border_style"] = msg.style.color
args["title"] = msg.style.box_title
return args
def format_msg_text(msg: Message) -> Text:
text = textwrap.dedent(msg.text.value)
text = text.format(**msg.params)
# dedent again in case inserted text needs it
text = textwrap.dedent(text)
text = text.strip()
return Text(text)
def wrap_with_ansi_colors(text: str, width: int) -> str:
richtext = Text.from_ansi(text, no_wrap=False, tab_size=None)
console = Console(width=width, force_terminal=True)
with console.capture() as capture:
console.print(richtext, sep="", end="")
return capture.get()
| 3,768
|
Python
|
.py
| 100
| 32.06
| 77
| 0.665747
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,574
|
upgrade.py
|
jrnl-org_jrnl/jrnl/upgrade.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import logging
import os
from jrnl import __version__
from jrnl.config import is_config_json
from jrnl.config import load_config
from jrnl.config import scope_config
from jrnl.exception import JrnlException
from jrnl.journals import Journal
from jrnl.journals import open_journal
from jrnl.messages import Message
from jrnl.messages import MsgStyle
from jrnl.messages import MsgText
from jrnl.output import print_msg
from jrnl.output import print_msgs
from jrnl.path import expand_path
from jrnl.prompt import yesno
def backup(filename: str, binary: bool = False):
filename = expand_path(filename)
try:
with open(filename, "rb" if binary else "r") as original:
contents = original.read()
with open(filename + ".backup", "wb" if binary else "w") as backup:
backup.write(contents)
print_msg(
Message(
MsgText.BackupCreated, MsgStyle.NORMAL, {"filename": "filename.backup"}
)
)
except FileNotFoundError:
print_msg(Message(MsgText.DoesNotExist, MsgStyle.WARNING, {"name": filename}))
cont = yesno(f"\nCreate {filename}?", default=False)
if not cont:
raise JrnlException(Message(MsgText.UpgradeAborted, MsgStyle.WARNING))
def check_exists(path: str) -> bool:
"""
Checks if a given path exists.
"""
return os.path.exists(path)
def upgrade_jrnl(config_path: str) -> None:
config = load_config(config_path)
print_msg(Message(MsgText.WelcomeToJrnl, MsgStyle.NORMAL, {"version": __version__}))
encrypted_journals = {}
plain_journals = {}
other_journals = {}
all_journals = []
for journal_name, journal_conf in config["journals"].items():
if isinstance(journal_conf, dict):
path = expand_path(journal_conf.get("journal"))
encrypt = journal_conf.get("encrypt")
else:
encrypt = config.get("encrypt")
path = expand_path(journal_conf)
if os.path.exists(path):
path = os.path.expanduser(path)
else:
print_msg(Message(MsgText.DoesNotExist, MsgStyle.ERROR, {"name": path}))
continue
if encrypt:
encrypted_journals[journal_name] = path
elif os.path.isdir(path):
other_journals[journal_name] = path
else:
plain_journals[journal_name] = path
kwargs = {
# longest journal name
"pad": max([len(journal) for journal in config["journals"]]),
}
_print_journal_summary(
journals=encrypted_journals,
header=Message(
MsgText.JournalsToUpgrade,
params={
"version": __version__,
},
),
**kwargs,
)
_print_journal_summary(
journals=plain_journals,
header=Message(
MsgText.JournalsToUpgrade,
params={
"version": __version__,
},
),
**kwargs,
)
_print_journal_summary(
journals=other_journals,
header=Message(MsgText.JournalsToIgnore),
**kwargs,
)
cont = yesno(Message(MsgText.ContinueUpgrade), default=False)
if not cont:
raise JrnlException(Message(MsgText.UpgradeAborted, MsgStyle.WARNING))
for journal_name, path in encrypted_journals.items():
print_msg(
Message(
MsgText.UpgradingJournal,
params={
"journal_name": journal_name,
"path": path,
},
)
)
backup(path, binary=True)
old_journal = open_journal(
journal_name, scope_config(config, journal_name), legacy=True
)
logging.debug(f"Clearing encryption method for '{journal_name}' journal")
# Update the encryption method
new_journal = Journal.from_journal(old_journal)
new_journal.config["encrypt"] = "jrnlv2"
new_journal._get_encryption_method()
# Copy over password (jrnlv1 only supported password-based encryption)
new_journal.encryption_method.password = old_journal.encryption_method.password
all_journals.append(new_journal)
for journal_name, path in plain_journals.items():
print_msg(
Message(
MsgText.UpgradingJournal,
params={
"journal_name": journal_name,
"path": path,
},
)
)
backup(path)
old_journal = open_journal(
journal_name, scope_config(config, journal_name), legacy=True
)
all_journals.append(Journal.from_journal(old_journal))
# loop through lists to validate
failed_journals = [j for j in all_journals if not j.validate_parsing()]
if len(failed_journals) > 0:
raise JrnlException(
Message(MsgText.AbortingUpgrade, MsgStyle.WARNING),
Message(
MsgText.JournalFailedUpgrade,
MsgStyle.ERROR,
{
"s": "s" if len(failed_journals) > 1 else "",
"failed_journals": "\n".join(j.name for j in failed_journals),
},
),
)
# write all journals - or - don't
for j in all_journals:
j.write()
print_msg(Message(MsgText.UpgradingConfig, MsgStyle.NORMAL))
backup(config_path)
print_msg(Message(MsgText.AllDoneUpgrade, MsgStyle.NORMAL))
def is_old_version(config_path: str) -> bool:
return is_config_json(config_path)
def _print_journal_summary(journals: dict, header: Message, pad: int) -> None:
if not journals:
return
msgs = [header]
for journal, path in journals.items():
msgs.append(
Message(
MsgText.PaddedJournalName,
params={
"journal_name": journal,
"path": path,
"pad": pad,
},
)
)
print_msgs(msgs)
| 6,164
|
Python
|
.py
| 172
| 26.465116
| 88
| 0.600806
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,575
|
time.py
|
jrnl-org_jrnl/jrnl/time.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import datetime
FAKE_YEAR = 9999
DEFAULT_FUTURE = datetime.datetime(FAKE_YEAR, 12, 31, 23, 59, 59)
DEFAULT_PAST = datetime.datetime(FAKE_YEAR, 1, 1, 0, 0)
def __get_pdt_calendar():
import parsedatetime as pdt
consts = pdt.Constants(usePyICU=False)
consts.DOWParseStyle = -1 # "Monday" will be either today or the last Monday
calendar = pdt.Calendar(consts, version=pdt.VERSION_CONTEXT_STYLE)
return calendar
def parse(
date_str: str | datetime.datetime,
inclusive: bool = False,
default_hour: int | None = None,
default_minute: int | None = None,
bracketed: bool = False,
) -> datetime.datetime | None:
"""Parses a string containing a fuzzy date and returns a datetime.datetime object"""
if not date_str:
return None
elif isinstance(date_str, datetime.datetime):
return date_str
# Don't try to parse anything with 6 or fewer characters and was parsed from the
# existing journal. It's probably a markdown footnote
if len(date_str) <= 6 and bracketed:
return None
default_date = DEFAULT_FUTURE if inclusive else DEFAULT_PAST
date = None
year_present = False
hasTime = False
hasDate = False
while not date:
try:
from dateutil.parser import parse as dateparse
date = dateparse(date_str, default=default_date)
if date.year == FAKE_YEAR:
date = datetime.datetime(
datetime.datetime.now().year, date.timetuple()[1:6]
)
else:
year_present = True
hasTime = not (date.hour == date.minute == 0)
hasDate = True
date = date.timetuple()
except Exception as e:
if e.args[0] == "day is out of range for month":
y, m, d, H, M, S = default_date.timetuple()[:6]
default_date = datetime.datetime(y, m, d - 1, H, M, S)
else:
calendar = __get_pdt_calendar()
date, parse_context = calendar.parse(date_str)
hasTime = parse_context.hasTime
hasDate = parse_context.hasDate
if not hasDate and not hasTime:
try: # Try and parse this as a single year
year = int(date_str)
return datetime.datetime(year, 1, 1)
except ValueError:
return None
except TypeError:
return None
if hasDate and not hasTime:
date = datetime.datetime( # Use the default time
*date[:3],
hour=23 if inclusive else default_hour or 0,
minute=59 if inclusive else default_minute or 0,
second=59 if inclusive else 0
)
else:
date = datetime.datetime(*date[:6])
# Ugly heuristic: if the date is more than 4 weeks in the future, we got the year
# wrong. Rather than this, we would like to see parsedatetime patched so we can
# tell it to prefer past dates
dt = datetime.datetime.now() - date
if dt.days < -28 and not year_present:
date = date.replace(date.year - 1)
return date
def is_valid_date(year: int, month: int, day: int) -> bool:
try:
datetime.datetime(year, month, day)
return True
except ValueError:
return False
| 3,395
|
Python
|
.py
| 85
| 31.435294
| 88
| 0.620595
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,576
|
__init__.py
|
jrnl-org_jrnl/jrnl/__init__.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
try:
from jrnl.__version__ import __version__
except ImportError:
__version__ = "source"
__title__ = "jrnl"
| 213
|
Python
|
.py
| 7
| 28.142857
| 52
| 0.692683
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,577
|
editor.py
|
jrnl-org_jrnl/jrnl/editor.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import logging
import os
import subprocess
import sys
import tempfile
from pathlib import Path
from jrnl.exception import JrnlException
from jrnl.messages import Message
from jrnl.messages import MsgStyle
from jrnl.messages import MsgText
from jrnl.os_compat import on_windows
from jrnl.os_compat import split_args
from jrnl.output import print_msg
from jrnl.path import absolute_path
from jrnl.path import get_templates_path
def get_text_from_editor(config: dict, template: str = "") -> str:
suffix = ".jrnl"
if config["template"]:
template_filename = Path(config["template"]).name
suffix = "-" + template_filename
filehandle, tmpfile = tempfile.mkstemp(prefix="jrnl", text=True, suffix=suffix)
os.close(filehandle)
with open(tmpfile, "w", encoding="utf-8") as f:
if template:
f.write(template)
try:
subprocess.call(split_args(config["editor"]) + [tmpfile])
except FileNotFoundError:
raise JrnlException(
Message(
MsgText.EditorMisconfigured,
MsgStyle.ERROR,
{"editor_key": config["editor"]},
)
)
with open(tmpfile, "r", encoding="utf-8") as f:
raw = f.read()
os.remove(tmpfile)
if not raw:
raise JrnlException(Message(MsgText.NoTextReceived, MsgStyle.NORMAL))
return raw
def get_text_from_stdin() -> str:
print_msg(
Message(
MsgText.WritingEntryStart,
MsgStyle.TITLE,
{
"how_to_quit": (
MsgText.HowToQuitWindows if on_windows() else MsgText.HowToQuitLinux
)
},
)
)
try:
raw = sys.stdin.read()
except KeyboardInterrupt:
logging.error("Append mode: keyboard interrupt")
raise JrnlException(
Message(MsgText.KeyboardInterruptMsg, MsgStyle.ERROR_ON_NEW_LINE),
Message(MsgText.JournalNotSaved, MsgStyle.WARNING),
)
return raw
def get_template_path(template_path: str, jrnl_template_dir: str) -> str:
actual_template_path = os.path.join(jrnl_template_dir, template_path)
if not os.path.exists(actual_template_path):
logging.debug(
f"Couldn't open {actual_template_path}. "
"Treating template path like a local / abs path."
)
actual_template_path = absolute_path(template_path)
return actual_template_path
def read_template_file(template_path: str) -> str:
"""
Reads the template file given a template path in this order:
* Check $XDG_DATA_HOME/jrnl/templates/template_path.
* Check template_arg as an absolute / relative path.
If a file is found, its contents are returned as a string.
If not, a JrnlException is raised.
"""
jrnl_template_dir = get_templates_path()
actual_template_path = get_template_path(template_path, jrnl_template_dir)
try:
with open(actual_template_path, encoding="utf-8") as f:
template_data = f.read()
return template_data
except FileNotFoundError:
raise JrnlException(
Message(
MsgText.CantReadTemplate,
MsgStyle.ERROR,
{
"template_path": template_path,
"actual_template_path": actual_template_path,
"jrnl_template_dir": str(jrnl_template_dir) + os.sep,
},
)
)
| 3,593
|
Python
|
.py
| 99
| 28.080808
| 88
| 0.635945
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,578
|
color.py
|
jrnl-org_jrnl/jrnl/color.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import re
from string import punctuation
from string import whitespace
from typing import TYPE_CHECKING
import colorama
from jrnl.os_compat import on_windows
if TYPE_CHECKING:
from jrnl.journals import Entry
if on_windows():
colorama.init()
def colorize(string: str, color: str, bold: bool = False) -> str:
"""Returns the string colored with colorama.Fore.color. If the color set by
the user is "NONE" or the color doesn't exist in the colorama.Fore attributes,
it returns the string without any modification."""
color_escape = getattr(colorama.Fore, color.upper(), None)
if not color_escape:
return string
elif not bold:
return color_escape + string + colorama.Fore.RESET
else:
return colorama.Style.BRIGHT + color_escape + string + colorama.Style.RESET_ALL
def highlight_tags_with_background_color(
entry: "Entry", text: str, color: str, is_title: bool = False
) -> str:
"""
Takes a string and colorizes the tags in it based upon the config value for
color.tags, while colorizing the rest of the text based on `color`.
:param entry: Entry object, for access to journal config
:param text: Text to be colorized
:param color: Color for non-tag text, passed to colorize()
:param is_title: Boolean flag indicating if the text is a title or not
:return: Colorized str
"""
def colorized_text_generator(fragments):
"""Efficiently generate colorized tags / text from text fragments.
Taken from @shobrook. Thanks, buddy :)
:param fragments: List of strings representing parts of entry (tag or word).
:rtype: List of tuples
:returns [(colorized_str, original_str)]"""
for part in fragments:
if part and part[0] not in config["tagsymbols"]:
yield colorize(part, color, bold=is_title), part
elif part:
yield colorize(part, config["colors"]["tags"], bold=True), part
config = entry.journal.config
if config["highlight"]: # highlight tags
text_fragments = re.split(entry.tag_regex(config["tagsymbols"]), text)
# Colorizing tags inside of other blocks of text
final_text = ""
previous_piece = ""
for colorized_piece, piece in colorized_text_generator(text_fragments):
# If this piece is entirely punctuation or whitespace or the start
# of a line or the previous piece was a tag or this piece is a tag,
# then add it to the final text without a leading space.
if (
all(char in punctuation + whitespace for char in piece)
or previous_piece.endswith("\n")
or (previous_piece and previous_piece[0] in config["tagsymbols"])
or piece[0] in config["tagsymbols"]
):
final_text += colorized_piece
else:
# Otherwise add a leading space and then append the piece.
final_text += " " + colorized_piece
previous_piece = piece
return final_text.lstrip()
else:
return text
| 3,227
|
Python
|
.py
| 70
| 38.171429
| 87
| 0.660623
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,579
|
args.py
|
jrnl-org_jrnl/jrnl/args.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import argparse
import re
import textwrap
from jrnl.commands import postconfig_decrypt
from jrnl.commands import postconfig_encrypt
from jrnl.commands import postconfig_import
from jrnl.commands import postconfig_list
from jrnl.commands import preconfig_diagnostic
from jrnl.commands import preconfig_version
from jrnl.output import deprecated_cmd
from jrnl.plugins import EXPORT_FORMATS
from jrnl.plugins import IMPORT_FORMATS
from jrnl.plugins import util
class WrappingFormatter(argparse.RawTextHelpFormatter):
"""Used in help screen"""
def _split_lines(self, text: str, width: int) -> list[str]:
text = text.split("\n\n")
text = map(lambda t: self._whitespace_matcher.sub(" ", t).strip(), text)
text = map(lambda t: textwrap.wrap(t, width=56), text)
text = [item for sublist in text for item in sublist]
return text
class IgnoreNoneAppendAction(argparse._AppendAction):
"""
Pass -not without a following string and avoid appending
a None value to the excluded list
"""
def __call__(self, parser, namespace, values, option_string=None):
if values is not None:
super().__call__(parser, namespace, values, option_string)
def parse_not_arg(
args: list[str], parsed_args: argparse.Namespace, parser: argparse.ArgumentParser
) -> argparse.Namespace:
"""
It's possible to use -not as a precursor to -starred and -tagged
to reverse their behaviour, however this requires some extra logic
to parse, and to ensure we still do not allow passing an empty -not
"""
parsed_args.exclude_starred = False
parsed_args.exclude_tagged = False
if "-not-starred" in "".join(args):
parsed_args.starred = False
parsed_args.exclude_starred = True
if "-not-tagged" in "".join(args):
parsed_args.tagged = False
parsed_args.exclude_tagged = True
if "-not" in args and not any(
[parsed_args.exclude_starred, parsed_args.exclude_tagged, parsed_args.excluded]
):
parser.error("argument -not: expected 1 argument")
return parsed_args
def parse_args(args: list[str] = []) -> argparse.Namespace:
"""
Argument parsing that is doable before the config is available.
Everything else goes into "text" for later parsing.
"""
parser = argparse.ArgumentParser(
formatter_class=WrappingFormatter,
add_help=False,
description="Collect your thoughts and notes without leaving the command line",
epilog=textwrap.dedent(
"""
We gratefully thank all contributors!
Come see the whole list of code and financial contributors at https://github.com/jrnl-org/jrnl
And special thanks to Bad Lip Reading for the Yoda joke in the Writing section above :)""" # noqa: E501
),
)
optional = parser.add_argument_group("Optional Arguments")
optional.add_argument(
"--debug",
dest="debug",
action="store_true",
help="Print information useful for troubleshooting",
)
standalone = parser.add_argument_group(
"Standalone Commands",
"These commands will exit after they complete. You may only run one at a time.",
)
standalone.add_argument("--help", action="help", help="Show this help message")
standalone.add_argument("-h", action="help", help=argparse.SUPPRESS)
standalone.add_argument(
"--version",
action="store_const",
const=preconfig_version,
dest="preconfig_cmd",
help="Print version information",
)
standalone.add_argument(
"-v",
action="store_const",
const=preconfig_version,
dest="preconfig_cmd",
help=argparse.SUPPRESS,
)
standalone.add_argument(
"--diagnostic",
action="store_const",
const=preconfig_diagnostic,
dest="preconfig_cmd",
help=argparse.SUPPRESS,
)
standalone.add_argument(
"--list",
action="store_const",
const=postconfig_list,
dest="postconfig_cmd",
help="""
List all configured journals.
Optional parameters:
--format [json or yaml]
""",
)
standalone.add_argument(
"--ls",
action="store_const",
const=postconfig_list,
dest="postconfig_cmd",
help=argparse.SUPPRESS,
)
standalone.add_argument(
"-ls",
action="store_const",
const=lambda **kwargs: deprecated_cmd(
"-ls", "--list or --ls", callback=postconfig_list, **kwargs
),
dest="postconfig_cmd",
help=argparse.SUPPRESS,
)
standalone.add_argument(
"--encrypt",
help="Encrypt selected journal with a password",
action="store_const",
metavar="TYPE",
const=postconfig_encrypt,
dest="postconfig_cmd",
)
standalone.add_argument(
"--decrypt",
help="Decrypt selected journal and store it in plain text",
action="store_const",
metavar="TYPE",
const=postconfig_decrypt,
dest="postconfig_cmd",
)
standalone.add_argument(
"--import",
action="store_const",
metavar="TYPE",
const=postconfig_import,
dest="postconfig_cmd",
help=f"""
Import entries from another journal.
Optional parameters:
--file FILENAME (default: uses stdin)
--format [{util.oxford_list(IMPORT_FORMATS)}] (default: jrnl)
""",
)
standalone.add_argument(
"--file",
metavar="FILENAME",
dest="filename",
help=argparse.SUPPRESS,
default=None,
)
standalone.add_argument("-i", dest="filename", help=argparse.SUPPRESS)
compose_msg = """
To add a new entry into your journal, simply write it on the command line:
jrnl yesterday: I was walking and I found this big log.
The date and the following colon ("yesterday:") are optional. If you leave
them out, "now" will be used:
jrnl Then I rolled the log over.
Also, you can mark extra special entries ("star" them) with an asterisk:
jrnl *And underneath was a tiny little stick.
Please note that asterisks might be a special character in your shell, so you
might have to escape them. When in doubt about escaping, put quotes around
your entire entry:
jrnl "saturday at 2am: *Then I was like 'That log had a child!'" """
composing = parser.add_argument_group(
"Writing", textwrap.dedent(compose_msg).strip()
)
composing.add_argument("text", metavar="", nargs="*")
composing.add_argument(
"--template",
dest="template",
help="Path to template file. Can be a local path, absolute path, or a path "
"relative to $XDG_DATA_HOME/jrnl/templates/",
)
read_msg = (
"To find entries from your journal, use any combination of the below filters."
)
reading = parser.add_argument_group("Searching", textwrap.dedent(read_msg))
reading.add_argument(
"-on", dest="on_date", metavar="DATE", help="Show entries on this date"
)
reading.add_argument(
"-today-in-history",
dest="today_in_history",
action="store_true",
help="Show entries of today over the years",
)
reading.add_argument(
"-month",
dest="month",
metavar="DATE",
help="Show entries on this month of any year",
)
reading.add_argument(
"-day",
dest="day",
metavar="DATE",
help="Show entries on this day of any month",
)
reading.add_argument(
"-year",
dest="year",
metavar="DATE",
help="Show entries of a specific year",
)
reading.add_argument(
"-from",
dest="start_date",
metavar="DATE",
help="Show entries after, or on, this date",
)
reading.add_argument(
"-to",
dest="end_date",
metavar="DATE",
help="Show entries before, or on, this date (alias: -until)",
)
reading.add_argument("-until", dest="end_date", help=argparse.SUPPRESS)
reading.add_argument(
"-contains",
dest="contains",
action="append",
metavar="TEXT",
help="Show entries containing specific text (put quotes around text with "
"spaces)",
)
reading.add_argument(
"-and",
dest="strict",
action="store_true",
help='Show only entries that match all conditions, like saying "x AND y" '
"(default: OR)",
)
reading.add_argument(
"-starred",
dest="starred",
action="store_true",
help="Show only starred entries (marked with *)",
)
reading.add_argument(
"-tagged",
dest="tagged",
action="store_true",
help="Show only entries that have at least one tag",
)
reading.add_argument(
"-n",
dest="limit",
default=None,
metavar="NUMBER",
help="Show a maximum of NUMBER entries (note: '-n 3' and '-3' have the same "
"effect)",
nargs="?",
type=int,
)
reading.add_argument(
"-not",
dest="excluded",
nargs="?",
default=[],
metavar="TAG/FLAG",
action=IgnoreNoneAppendAction,
help=(
"If passed a string, will exclude entries with that tag. "
"Can be also used before -starred or -tagged flags, to exclude "
"starred or tagged entries respectively."
),
)
search_options_msg = (
" " # Preserves indentation
"""
These help you do various tasks with the selected entries from your search.
If used on their own (with no search), they will act on your entire journal"""
)
exporting = parser.add_argument_group(
"Searching Options", textwrap.dedent(search_options_msg)
)
exporting.add_argument(
"--edit",
dest="edit",
help="Opens the selected entries in your configured editor",
action="store_true",
)
exporting.add_argument(
"--delete",
dest="delete",
action="store_true",
help="Interactively deletes selected entries",
)
exporting.add_argument(
"--change-time",
dest="change_time",
nargs="?",
metavar="DATE",
const="now",
help="Change timestamp for selected entries (default: now)",
)
exporting.add_argument(
"--format",
metavar="TYPE",
dest="export",
choices=EXPORT_FORMATS,
help=f"""
Display selected entries in an alternate format.
TYPE can be: {util.oxford_list(EXPORT_FORMATS)}.
Optional parameters:
--file FILENAME Write output to file instead of stdout
""",
default=False,
)
exporting.add_argument(
"--export",
metavar="TYPE",
dest="export",
choices=EXPORT_FORMATS,
help=argparse.SUPPRESS,
)
exporting.add_argument(
"--tags",
dest="tags",
action="store_true",
help="Alias for '--format tags'. Returns a list of all tags and number of "
"occurrences",
)
exporting.add_argument(
"--short",
dest="short",
action="store_true",
help="Show only titles or line containing the search tags",
)
exporting.add_argument(
"-s",
dest="short",
action="store_true",
help=argparse.SUPPRESS,
)
exporting.add_argument(
"-o",
dest="filename",
help=argparse.SUPPRESS,
)
config_overrides = parser.add_argument_group(
"Config file override",
textwrap.dedent("Apply a one-off override of the config file option"),
)
config_overrides.add_argument(
"--config-override",
dest="config_override",
action="append",
type=str,
nargs=2,
default=[],
metavar="CONFIG_KV_PAIR",
help="""
Override configured key-value pair with CONFIG_KV_PAIR for this command invocation only.
Examples: \n
\t - Use a different editor for this jrnl entry, call: \n
\t jrnl --config-override editor "nano" \n
\t - Override color selections\n
\t jrnl --config-override colors.body blue --config-override colors.title green
""", # noqa: E501
)
config_overrides.add_argument(
"--co",
dest="config_override",
action="append",
type=str,
nargs=2,
default=[],
help=argparse.SUPPRESS,
)
alternate_config = parser.add_argument_group(
"Specifies alternate config to be used",
textwrap.dedent("Applies alternate config for current session"),
)
alternate_config.add_argument(
"--config-file",
dest="config_file_path",
type=str,
default="",
help="""
Overrides default (created when first installed) config file for this command only.
Examples: \n
\t - Use a work config file for this jrnl entry, call: \n
\t jrnl --config-file /home/user1/work_config.yaml
\t - Use a personal config file stored on a thumb drive: \n
\t jrnl --config-file /media/user1/my-thumb-drive/personal_config.yaml
""", # noqa: E501
)
alternate_config.add_argument(
"--cf", dest="config_file_path", type=str, default="", help=argparse.SUPPRESS
)
# Handle '-123' as a shortcut for '-n 123'
num = re.compile(r"^-(\d+)$")
args = [num.sub(r"-n \1", arg) for arg in args]
parsed_args = parser.parse_intermixed_args(args)
parsed_args = parse_not_arg(args, parsed_args, parser)
return parsed_args
| 13,964
|
Python
|
.py
| 412
| 26.410194
| 112
| 0.61756
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,580
|
override.py
|
jrnl-org_jrnl/jrnl/override.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
from typing import TYPE_CHECKING
from jrnl.config import make_yaml_valid_dict
from jrnl.config import update_config
if TYPE_CHECKING:
from argparse import Namespace
# import logging
def apply_overrides(args: "Namespace", base_config: dict) -> dict:
"""Unpack CLI provided overrides into the configuration tree.
:param overrides: List of configuration key-value pairs collected from the CLI
:type overrides: list
:param base_config: Configuration Loaded from the saved YAML
:type base_config: dict
:return: Configuration to be used during runtime with the overrides applied
:rtype: dict
"""
overrides = vars(args).get("config_override")
if not overrides:
return base_config
cfg_with_overrides = base_config.copy()
for pairs in overrides:
pairs = make_yaml_valid_dict(pairs)
key_as_dots, override_value = _get_key_and_value_from_pair(pairs)
keys = _convert_dots_to_list(key_as_dots)
cfg_with_overrides = _recursively_apply(
cfg_with_overrides, keys, override_value
)
update_config(base_config, cfg_with_overrides, None)
return base_config
def _get_key_and_value_from_pair(pairs: dict) -> tuple:
key_as_dots, override_value = list(pairs.items())[0]
return key_as_dots, override_value
def _convert_dots_to_list(key_as_dots: str) -> list[str]:
keys = key_as_dots.split(".")
keys = [k for k in keys if k != ""] # remove empty elements
return keys
def _recursively_apply(tree: dict, nodes: list, override_value) -> dict:
"""Recurse through configuration and apply overrides at the leaf of the config tree
Credit to iJames on SO: https://stackoverflow.com/a/47276490 for algorithm
Args:
config (dict): Configuration to modify
nodes (list): Vector of override keys; the length of the vector indicates tree
depth
override_value (str): Runtime override passed from the command-line
"""
key = nodes[0]
if len(nodes) == 1:
tree[key] = override_value
else:
next_key = nodes[1:]
next_node = _get_config_node(tree, key)
_recursively_apply(next_node, next_key, override_value)
return tree
def _get_config_node(config: dict, key: str):
if key in config:
pass
else:
config[key] = None
return config[key]
| 2,469
|
Python
|
.py
| 60
| 35.366667
| 87
| 0.690795
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,581
|
main.py
|
jrnl-org_jrnl/jrnl/main.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import logging
import sys
import traceback
from rich.logging import RichHandler
from jrnl import controller
from jrnl.args import parse_args
from jrnl.exception import JrnlException
from jrnl.messages import Message
from jrnl.messages import MsgStyle
from jrnl.messages import MsgText
from jrnl.output import print_msg
def configure_logger(debug: bool = False) -> None:
if not debug:
logging.disable()
return
logging.basicConfig(
level=logging.DEBUG,
datefmt="[%X]",
format="%(message)s",
handlers=[RichHandler()],
)
logging.getLogger("parsedatetime").setLevel(logging.INFO)
logging.getLogger("keyring.backend").setLevel(logging.ERROR)
logging.debug("Logging start")
def run(manual_args: list[str] | None = None) -> int:
try:
if manual_args is None:
manual_args = sys.argv[1:]
args = parse_args(manual_args)
configure_logger(args.debug)
logging.debug("Parsed args:\n%s", args)
status_code = controller.run(args)
except JrnlException as e:
status_code = 1
e.print()
except KeyboardInterrupt:
status_code = 1
print_msg(
Message(
MsgText.KeyboardInterruptMsg,
MsgStyle.ERROR_ON_NEW_LINE,
)
)
except Exception as e:
# uncaught exception
status_code = 1
debug = False
try:
if args.debug: # type: ignore
debug = True
except NameError:
# This should only happen when the exception
# happened before the args were parsed
if "--debug" in sys.argv:
debug = True
if debug:
from rich.console import Console
traceback.print_tb(sys.exc_info()[2])
Console(stderr=True).print_exception(extra_lines=1)
print_msg(
Message(
MsgText.UncaughtException,
MsgStyle.ERROR,
{"name": type(e).__name__, "exception": e},
)
)
# This should be the only exit point
return status_code
| 2,263
|
Python
|
.py
| 70
| 23.985714
| 64
| 0.615632
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,582
|
keyring.py
|
jrnl-org_jrnl/jrnl/keyring.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import keyring
from jrnl.messages import Message
from jrnl.messages import MsgStyle
from jrnl.messages import MsgText
from jrnl.output import print_msg
def get_keyring_password(journal_name: str = "default") -> str | None:
try:
return keyring.get_password("jrnl", journal_name)
except keyring.errors.KeyringError as e:
if not isinstance(e, keyring.errors.NoKeyringError):
print_msg(Message(MsgText.KeyringRetrievalFailure, MsgStyle.ERROR))
return None
def set_keyring_password(password: str, journal_name: str = "default") -> None:
try:
return keyring.set_password("jrnl", journal_name, password)
except keyring.errors.KeyringError as e:
if isinstance(e, keyring.errors.NoKeyringError):
msg = Message(MsgText.KeyringBackendNotFound, MsgStyle.WARNING)
else:
msg = Message(MsgText.KeyringRetrievalFailure, MsgStyle.ERROR)
print_msg(msg)
| 1,045
|
Python
|
.py
| 23
| 39.478261
| 79
| 0.728346
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,583
|
install.py
|
jrnl-org_jrnl/jrnl/install.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import contextlib
import glob
import logging
import os
import sys
from rich.pretty import pretty_repr
from jrnl import __version__
from jrnl.config import DEFAULT_JOURNAL_KEY
from jrnl.config import get_config_path
from jrnl.config import get_default_colors
from jrnl.config import get_default_config
from jrnl.config import get_default_journal_path
from jrnl.config import load_config
from jrnl.config import save_config
from jrnl.config import verify_config_colors
from jrnl.exception import JrnlException
from jrnl.messages import Message
from jrnl.messages import MsgStyle
from jrnl.messages import MsgText
from jrnl.output import print_msg
from jrnl.path import absolute_path
from jrnl.path import expand_path
from jrnl.path import home_dir
from jrnl.prompt import yesno
from jrnl.upgrade import is_old_version
def upgrade_config(config_data: dict, alt_config_path: str | None = None) -> None:
"""Checks if there are keys missing in a given config dict, and if so, updates the
config file accordingly. This essentially automatically ports jrnl installations
if new config parameters are introduced in later versions. Also checks for
existence of and difference in version number between config dict
and current jrnl version, and if so, update the config file accordingly.
Supply alt_config_path if using an alternate config through --config-file."""
default_config = get_default_config()
missing_keys = set(default_config).difference(config_data)
if missing_keys:
for key in missing_keys:
config_data[key] = default_config[key]
different_version = config_data["version"] != __version__
if different_version:
config_data["version"] = __version__
if missing_keys or different_version:
save_config(config_data, alt_config_path)
config_path = alt_config_path if alt_config_path else get_config_path()
print_msg(
Message(
MsgText.ConfigUpdated, MsgStyle.NORMAL, {"config_path": config_path}
)
)
def find_default_config() -> str:
config_path = (
get_config_path()
if os.path.exists(get_config_path())
else os.path.join(home_dir(), ".jrnl_config")
)
return config_path
def find_alt_config(alt_config: str) -> str:
if not os.path.exists(alt_config):
raise JrnlException(
Message(
MsgText.AltConfigNotFound, MsgStyle.ERROR, {"config_file": alt_config}
)
)
return alt_config
def load_or_install_jrnl(alt_config_path: str) -> dict:
"""
If jrnl is already installed, loads and returns a default config object.
If alternate config is specified via --config-file flag, it will be used.
Else, perform various prompts to install jrnl.
"""
config_path = (
find_alt_config(alt_config_path) if alt_config_path else find_default_config()
)
if os.path.exists(config_path):
logging.debug("Reading configuration from file %s", config_path)
config = load_config(config_path)
if config is None:
raise JrnlException(
Message(
MsgText.CantParseConfigFile,
MsgStyle.ERROR,
{
"config_path": config_path,
},
)
)
if is_old_version(config_path):
from jrnl import upgrade
upgrade.upgrade_jrnl(config_path)
upgrade_config(config, alt_config_path)
verify_config_colors(config)
else:
logging.debug("Configuration file not found, installing jrnl...")
config = install()
logging.debug('Using configuration:\n"%s"', pretty_repr(config))
return config
def install() -> dict:
_initialize_autocomplete()
# Where to create the journal?
default_journal_path = get_default_journal_path()
user_given_path = print_msg(
Message(
MsgText.InstallJournalPathQuestion,
MsgStyle.PROMPT,
params={
"default_journal_path": default_journal_path,
},
),
get_input=True,
)
journal_path = absolute_path(user_given_path or default_journal_path)
default_config = get_default_config()
default_config["journals"][DEFAULT_JOURNAL_KEY]["journal"] = journal_path
# If the folder doesn't exist, create it
path = os.path.split(journal_path)[0]
with contextlib.suppress(OSError):
os.makedirs(path)
# Encrypt it?
encrypt = yesno(Message(MsgText.EncryptJournalQuestion), default=False)
if encrypt:
default_config["encrypt"] = True
print_msg(Message(MsgText.JournalEncrypted, MsgStyle.NORMAL))
# Use colors?
use_colors = yesno(Message(MsgText.UseColorsQuestion), default=True)
if use_colors:
default_config["colors"] = get_default_colors()
save_config(default_config)
print_msg(
Message(
MsgText.InstallComplete,
MsgStyle.NORMAL,
params={"config_path": get_config_path()},
)
)
return default_config
def _initialize_autocomplete() -> None:
# readline is not included in Windows Active Python and perhaps some other distss
if sys.modules.get("readline"):
import readline
readline.set_completer_delims(" \t\n;")
readline.parse_and_bind("tab: complete")
readline.set_completer(_autocomplete_path)
def _autocomplete_path(text: str, state: int) -> list[str | None]:
expansions = glob.glob(expand_path(text) + "*")
expansions = [e + "/" if os.path.isdir(e) else e for e in expansions]
expansions.append(None)
return expansions[state]
| 5,849
|
Python
|
.py
| 148
| 32.472973
| 86
| 0.675785
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,584
|
commands.py
|
jrnl-org_jrnl/jrnl/commands.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
"""
Functions in this file are standalone commands. All standalone commands are split into
two categories depending on whether they require the config to be loaded to be able to
run.
1. "preconfig" commands don't require the config at all, and can be run before the
config has been loaded.
2. "postconfig" commands require to config to have already been loaded, parsed, and
scoped before they can be run.
Also, please note that all (non-builtin) imports should be scoped to each function to
avoid any possible overhead for these standalone commands.
"""
import argparse
import logging
import platform
import sys
from jrnl.config import cmd_requires_valid_journal_name
from jrnl.exception import JrnlException
from jrnl.messages import Message
from jrnl.messages import MsgStyle
from jrnl.messages import MsgText
from jrnl.output import print_msg
def preconfig_diagnostic(_) -> None:
from jrnl import __title__
from jrnl import __version__
print(
f"{__title__}: {__version__}\n"
f"Python: {sys.version}\n"
f"OS: {platform.system()} {platform.release()}"
)
def preconfig_version(_) -> None:
import textwrap
from jrnl import __title__
from jrnl import __version__
output = f"""
{__title__} {__version__}
Copyright © 2012-2023 jrnl contributors
This is free software, and you are welcome to redistribute it under certain
conditions; for details, see: https://www.gnu.org/licenses/gpl-3.0.html
"""
output = textwrap.dedent(output).strip()
print(output)
def postconfig_list(args: argparse.Namespace, config: dict, **_) -> int:
from jrnl.output import list_journals
print(list_journals(config, args.export))
return 0
@cmd_requires_valid_journal_name
def postconfig_import(args: argparse.Namespace, config: dict, **_) -> int:
from jrnl.journals import open_journal
from jrnl.plugins import get_importer
# Requires opening the journal
journal = open_journal(args.journal_name, config)
format = args.export if args.export else "jrnl"
get_importer(format).import_(journal, args.filename)
return 0
@cmd_requires_valid_journal_name
def postconfig_encrypt(
args: argparse.Namespace, config: dict, original_config: dict
) -> int:
"""
Encrypt a journal in place, or optionally to a new file
"""
from jrnl.config import update_config
from jrnl.install import save_config
from jrnl.journals import open_journal
# Open the journal
journal = open_journal(args.journal_name, config)
if hasattr(journal, "can_be_encrypted") and not journal.can_be_encrypted:
raise JrnlException(
Message(
MsgText.CannotEncryptJournalType,
MsgStyle.ERROR,
{
"journal_name": args.journal_name,
"journal_type": journal.__class__.__name__,
},
)
)
# If journal is encrypted, create new password
logging.debug("Clearing encryption method...")
if journal.config["encrypt"] is True:
logging.debug("Journal already encrypted. Re-encrypting...")
print(f"Journal {journal.name} is already encrypted. Create a new password.")
journal.encryption_method.clear()
else:
journal.config["encrypt"] = True
journal.encryption_method = None
journal.write(args.filename)
print_msg(
Message(
MsgText.JournalEncryptedTo,
MsgStyle.NORMAL,
{"path": args.filename or journal.config["journal"]},
)
)
# Update the config, if we encrypted in place
if not args.filename:
update_config(
original_config, {"encrypt": True}, args.journal_name, force_local=True
)
save_config(original_config)
return 0
@cmd_requires_valid_journal_name
def postconfig_decrypt(
args: argparse.Namespace, config: dict, original_config: dict
) -> int:
"""Decrypts to file. If filename is not set, we encrypt the journal file itself."""
from jrnl.config import update_config
from jrnl.install import save_config
from jrnl.journals import open_journal
journal = open_journal(args.journal_name, config)
logging.debug("Clearing encryption method...")
journal.config["encrypt"] = False
journal.encryption_method = None
journal.write(args.filename)
print_msg(
Message(
MsgText.JournalDecryptedTo,
MsgStyle.NORMAL,
{"path": args.filename or journal.config["journal"]},
)
)
# Update the config, if we decrypted in place
if not args.filename:
update_config(
original_config, {"encrypt": False}, args.journal_name, force_local=True
)
save_config(original_config)
return 0
| 4,936
|
Python
|
.py
| 130
| 31.784615
| 87
| 0.687106
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,585
|
Entry.py
|
jrnl-org_jrnl/jrnl/journals/Entry.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import datetime
import logging
import os
import re
from typing import TYPE_CHECKING
from jrnl.color import colorize
from jrnl.color import highlight_tags_with_background_color
from jrnl.output import wrap_with_ansi_colors
if TYPE_CHECKING:
from .Journal import Journal
class Entry:
def __init__(
self,
journal: "Journal",
date: datetime.datetime | None = None,
text: str = "",
starred: bool = False,
):
self.journal = journal # Reference to journal mainly to access its config
self.date = date or datetime.datetime.now()
self.text = text
self._title = None
self._body = None
self._tags = None
self.starred = starred
self.modified = False
@property
def fulltext(self) -> str:
return self.title + " " + self.body
def _parse_text(self):
raw_text = self.text
lines = raw_text.splitlines()
if lines and lines[0].strip().endswith("*"):
self.starred = True
raw_text = lines[0].strip("\n *") + "\n" + "\n".join(lines[1:])
self._title, self._body = split_title(raw_text)
if self._tags is None:
self._tags = list(self._parse_tags())
@property
def title(self) -> str:
if self._title is None:
self._parse_text()
return self._title
@title.setter
def title(self, x: str):
self._title = x
@property
def body(self) -> str:
if self._body is None:
self._parse_text()
return self._body
@body.setter
def body(self, x: str):
self._body = x
@property
def tags(self) -> list[str]:
if self._tags is None:
self._parse_text()
return self._tags
@tags.setter
def tags(self, x: list[str]):
self._tags = x
@staticmethod
def tag_regex(tagsymbols: str) -> re.Pattern:
pattern = rf"(?<!\S)([{tagsymbols}][-+*#/\w]+)"
return re.compile(pattern)
def _parse_tags(self) -> set[str]:
tagsymbols = self.journal.config["tagsymbols"]
return {
tag.lower() for tag in re.findall(Entry.tag_regex(tagsymbols), self.text)
}
def __str__(self):
"""Returns string representation of the entry to be written to journal file."""
date_str = self.date.strftime(self.journal.config["timeformat"])
title = "[{}] {}".format(date_str, self.title.rstrip("\n "))
if self.starred:
title += " *"
return "{title}{sep}{body}\n".format(
title=title,
sep="\n" if self.body.rstrip("\n ") else "",
body=self.body.rstrip("\n "),
)
def pprint(self, short: bool = False) -> str:
"""Returns a pretty-printed version of the entry.
If short is true, only print the title."""
# Handle indentation
if self.journal.config["indent_character"]:
indent = self.journal.config["indent_character"].rstrip() + " "
else:
indent = ""
date_str = colorize(
self.date.strftime(self.journal.config["timeformat"]),
self.journal.config["colors"]["date"],
bold=True,
)
if not short and self.journal.config["linewrap"]:
columns = self.journal.config["linewrap"]
if columns == "auto":
try:
columns = os.get_terminal_size().columns
except OSError:
logging.debug(
"Can't determine terminal size automatically 'linewrap': '%s'",
self.journal.config["linewrap"],
)
columns = 79
# Color date / title and bold title
title = wrap_with_ansi_colors(
date_str
+ " "
+ highlight_tags_with_background_color(
self,
self.title,
self.journal.config["colors"]["title"],
is_title=True,
),
columns,
)
body = highlight_tags_with_background_color(
self, self.body.rstrip(" \n"), self.journal.config["colors"]["body"]
)
body = wrap_with_ansi_colors(body, columns - len(indent))
if indent:
# Without explicitly colorizing the indent character, it will lose its
# color after a tag appears.
body = "\n".join(
colorize(indent, self.journal.config["colors"]["body"]) + line
for line in body.splitlines()
)
body = colorize(body, self.journal.config["colors"]["body"])
else:
title = (
date_str
+ " "
+ highlight_tags_with_background_color(
self,
self.title.rstrip("\n"),
self.journal.config["colors"]["title"],
is_title=True,
)
)
body = highlight_tags_with_background_color(
self, self.body.rstrip("\n "), self.journal.config["colors"]["body"]
)
# Suppress bodies that are just blanks and new lines.
has_body = len(self.body) > 20 or not all(
char in (" ", "\n") for char in self.body
)
if short:
return title
else:
return "{title}{sep}{body}\n".format(
title=title, sep="\n" if has_body else "", body=body if has_body else ""
)
def __repr__(self):
return "<Entry '{}' on {}>".format(
self.title.strip(), self.date.strftime("%Y-%m-%d %H:%M")
)
def __hash__(self):
return hash(self.__repr__())
def __eq__(self, other: "Entry"):
if (
not isinstance(other, Entry)
or self.title.strip() != other.title.strip()
or self.body.rstrip() != other.body.rstrip()
or self.date != other.date
or self.starred != other.starred
):
return False
return True
def __ne__(self, other: "Entry"):
return not self.__eq__(other)
# Based on Segtok by Florian Leitner
# https://github.com/fnl/segtok
SENTENCE_SPLITTER = re.compile(
r"""
(
[.!?\u2026\u203C\u203D\u2047\u2048\u2049\u22EF\uFE52\uFE57] # Sequence starting with a sentence terminal,
[\'\u2019\"\u201D]? # an optional right quote,
[\]\)]* # optional closing bracket
\s+ # AND a sequence of required spaces.
)
|[\uFF01\uFF0E\uFF1F\uFF61\u3002] # CJK full/half width terminals usually do not have following spaces.
""", # noqa: E501
re.VERBOSE,
)
SENTENCE_SPLITTER_ONLY_NEWLINE = re.compile("\n")
def split_title(text: str) -> tuple[str, str]:
"""Splits the first sentence off from a text."""
sep = SENTENCE_SPLITTER_ONLY_NEWLINE.search(text.lstrip())
if not sep:
sep = SENTENCE_SPLITTER.search(text)
if not sep:
return text, ""
return text[: sep.end()].strip(), text[sep.end() :].strip()
| 7,338
|
Python
|
.py
| 197
| 27.040609
| 109
| 0.540312
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,586
|
FolderJournal.py
|
jrnl-org_jrnl/jrnl/journals/FolderJournal.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import codecs
import os
import pathlib
from typing import TYPE_CHECKING
from jrnl import time
from .Journal import Journal
if TYPE_CHECKING:
from jrnl.journals import Entry
# glob search patterns for folder/file structure
DIGIT_PATTERN = "[0123456789]"
YEAR_PATTERN = DIGIT_PATTERN * 4
MONTH_PATTERN = "[01]" + DIGIT_PATTERN
DAY_PATTERN = "[0123]" + DIGIT_PATTERN + ".txt"
class Folder(Journal):
"""A Journal handling multiple files in a folder"""
def __init__(self, name: str = "default", **kwargs):
self.entries = []
self._diff_entry_dates = []
self.can_be_encrypted = False
super().__init__(name, **kwargs)
def open(self) -> "Folder":
filenames = []
self.entries = []
if os.path.exists(self.config["journal"]):
filenames = Folder._get_files(self.config["journal"])
for filename in filenames:
with codecs.open(filename, "r", "utf-8") as f:
journal = f.read()
self.entries.extend(self._parse(journal))
self.sort()
return self
def write(self) -> None:
"""Writes only the entries that have been modified into proper files."""
# Create a list of dates of modified entries. Start with diff_entry_dates
modified_dates = self._diff_entry_dates
seen_dates = set(self._diff_entry_dates)
for e in self.entries:
if e.modified:
if e.date not in modified_dates:
modified_dates.append(e.date)
if e.date not in seen_dates:
seen_dates.add(e.date)
# For every date that had a modified entry, write to a file
for d in modified_dates:
write_entries = []
filename = os.path.join(
self.config["journal"],
d.strftime("%Y"),
d.strftime("%m"),
d.strftime("%d") + ".txt",
)
dirname = os.path.dirname(filename)
# create directory if it doesn't exist
if not os.path.exists(dirname):
os.makedirs(dirname)
for e in self.entries:
if (
e.date.year == d.year
and e.date.month == d.month
and e.date.day == d.day
):
write_entries.append(e)
journal = "\n".join([e.__str__() for e in write_entries])
with codecs.open(filename, "w", "utf-8") as journal_file:
journal_file.write(journal)
# look for and delete empty files
filenames = []
filenames = Folder._get_files(self.config["journal"])
for filename in filenames:
if os.stat(filename).st_size <= 0:
os.remove(filename)
def delete_entries(self, entries_to_delete: list["Entry"]) -> None:
"""Deletes specific entries from a journal."""
for entry in entries_to_delete:
self.entries.remove(entry)
self._diff_entry_dates.append(entry.date)
self.deleted_entry_count += 1
def change_date_entries(self, date: str, entries_to_change: list["Entry"]) -> None:
"""Changes entry dates to given date."""
date = time.parse(date)
self._diff_entry_dates.append(date)
for entry in entries_to_change:
self._diff_entry_dates.append(entry.date)
entry.date = date
entry.modified = True
def parse_editable_str(self, edited: str) -> None:
"""Parses the output of self.editable_str and updates its entries."""
mod_entries = self._parse(edited)
diff_entries = set(self.entries) - set(mod_entries)
for e in diff_entries:
self._diff_entry_dates.append(e.date)
# Match those entries that can be found in self.entries and set
# these to modified, so we can get a count of how many entries got
# modified and how many got deleted later.
for entry in mod_entries:
entry.modified = not any(entry == old_entry for old_entry in self.entries)
self.increment_change_counts_by_edit(mod_entries)
self.entries = mod_entries
@staticmethod
def _get_files(journal_path: str) -> list[str]:
"""Searches through sub directories starting with journal_path and find all text
files that look like entries"""
for year_folder in Folder._get_year_folders(pathlib.Path(journal_path)):
for month_folder in Folder._get_month_folders(year_folder):
yield from Folder._get_day_files(month_folder)
@staticmethod
def _get_year_folders(path: pathlib.Path) -> list[pathlib.Path]:
for child in path.glob(YEAR_PATTERN):
if child.is_dir():
yield child
return
@staticmethod
def _get_month_folders(path: pathlib.Path) -> list[pathlib.Path]:
for child in path.glob(MONTH_PATTERN):
if int(child.name) > 0 and int(child.name) <= 12 and path.is_dir():
yield child
return
@staticmethod
def _get_day_files(path: pathlib.Path) -> list[str]:
for child in path.glob(DAY_PATTERN):
if (
int(child.stem) > 0
and int(child.stem) <= 31
and time.is_valid_date(
year=int(path.parent.name),
month=int(path.name),
day=int(child.stem),
)
and child.is_file()
):
yield str(child)
| 5,719
|
Python
|
.py
| 133
| 32.12782
| 88
| 0.579572
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,587
|
Journal.py
|
jrnl-org_jrnl/jrnl/journals/Journal.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import datetime
import logging
import os
import re
from jrnl import time
from jrnl.config import validate_journal_name
from jrnl.encryption import determine_encryption_method
from jrnl.messages import Message
from jrnl.messages import MsgStyle
from jrnl.messages import MsgText
from jrnl.output import print_msg
from jrnl.path import expand_path
from jrnl.prompt import yesno
from .Entry import Entry
class Tag:
def __init__(self, name, count=0):
self.name = name
self.count = count
def __str__(self):
return self.name
def __repr__(self):
return f"<Tag '{self.name}'>"
class Journal:
def __init__(self, name="default", **kwargs):
self.config = {
"journal": "journal.txt",
"encrypt": False,
"default_hour": 9,
"default_minute": 0,
"timeformat": "%Y-%m-%d %H:%M",
"tagsymbols": "@",
"highlight": True,
"linewrap": 80,
"indent_character": "|",
}
self.config.update(kwargs)
# Set up date parser
self.search_tags = None # Store tags we're highlighting
self.name = name
self.entries = []
self.encryption_method = None
# Track changes to journal in session. Modified is tracked in Entry
self.added_entry_count = 0
self.deleted_entry_count = 0
def __len__(self):
"""Returns the number of entries"""
return len(self.entries)
def __iter__(self):
"""Iterates over the journal's entries."""
return (entry for entry in self.entries)
@classmethod
def from_journal(cls, other: "Journal") -> "Journal":
"""Creates a new journal by copying configuration and entries from
another journal object"""
new_journal = cls(other.name, **other.config)
new_journal.entries = other.entries
logging.debug(
"Imported %d entries from %s to %s",
len(new_journal),
other.__class__.__name__,
cls.__name__,
)
return new_journal
def import_(self, other_journal_txt: str) -> None:
imported_entries = self._parse(other_journal_txt)
for entry in imported_entries:
entry.modified = True
self.entries = list(frozenset(self.entries) | frozenset(imported_entries))
self.sort()
def _get_encryption_method(self) -> None:
encryption_method = determine_encryption_method(self.config["encrypt"])
self.encryption_method = encryption_method(self.name, self.config)
def _decrypt(self, text: bytes) -> str:
if self.encryption_method is None:
self._get_encryption_method()
return self.encryption_method.decrypt(text)
def _encrypt(self, text: str) -> bytes:
if self.encryption_method is None:
self._get_encryption_method()
return self.encryption_method.encrypt(text)
def open(self, filename: str | None = None) -> "Journal":
"""Opens the journal file and parses it into a list of Entries
Entries have the form (date, title, body)."""
filename = filename or self.config["journal"]
dirname = os.path.dirname(filename)
if not os.path.exists(filename):
if not os.path.isdir(dirname):
os.makedirs(dirname)
print_msg(
Message(
MsgText.DirectoryCreated,
MsgStyle.NORMAL,
{"directory_name": dirname},
)
)
self.create_file(filename)
print_msg(
Message(
MsgText.JournalCreated,
MsgStyle.NORMAL,
{
"journal_name": self.name,
"filename": filename,
},
)
)
self.write()
text = self._load(filename)
text = self._decrypt(text)
self.entries = self._parse(text)
self.sort()
logging.debug("opened %s with %d entries", self.__class__.__name__, len(self))
return self
def write(self, filename: str | None = None) -> None:
"""Dumps the journal into the config file, overwriting it"""
filename = filename or self.config["journal"]
text = self._to_text()
text = self._encrypt(text)
self._store(filename, text)
def validate_parsing(self) -> bool:
"""Confirms that the jrnl is still parsed correctly after conversion to text."""
new_entries = self._parse(self._to_text())
return all(entry == new_entries[i] for i, entry in enumerate(self.entries))
@staticmethod
def create_file(filename: str) -> None:
with open(filename, "w"):
pass
def _to_text(self) -> str:
return "\n".join([str(e) for e in self.entries])
def _load(self, filename: str) -> bytes:
with open(filename, "rb") as f:
return f.read()
def _store(self, filename: str, text: bytes) -> None:
with open(filename, "wb") as f:
f.write(text)
def _parse(self, journal_txt: str) -> list[Entry]:
"""Parses a journal that's stored in a string and returns a list of entries"""
# Return empty array if the journal is blank
if not journal_txt:
return []
# Initialise our current entry
entries = []
date_blob_re = re.compile("(?:^|\n)\\[([^\\]]+)\\] ")
last_entry_pos = 0
for match in date_blob_re.finditer(journal_txt):
date_blob = match.groups()[0]
try:
new_date = datetime.datetime.strptime(
date_blob, self.config["timeformat"]
)
except ValueError:
# Passing in a date that had brackets around it
new_date = time.parse(date_blob, bracketed=True)
if new_date:
if entries:
entries[-1].text = journal_txt[last_entry_pos : match.start()]
last_entry_pos = match.end()
entries.append(Entry(self, date=new_date))
# If no entries were found, treat all the existing text as an entry made now
if not entries:
entries.append(Entry(self, date=time.parse("now")))
# Fill in the text of the last entry
entries[-1].text = journal_txt[last_entry_pos:]
for entry in entries:
entry._parse_text()
return entries
def pprint(self, short: bool = False) -> str:
"""Prettyprints the journal's entries"""
return "\n".join([e.pprint(short=short) for e in self.entries])
def __str__(self):
return self.pprint()
def __repr__(self):
return f"<Journal with {len(self.entries)} entries>"
def sort(self) -> None:
"""Sorts the Journal's entries by date"""
self.entries = sorted(self.entries, key=lambda entry: entry.date)
def limit(self, n: int | None = None) -> None:
"""Removes all but the last n entries"""
if n:
self.entries = self.entries[-n:]
@property
def tags(self) -> list[Tag]:
"""Returns a set of tuples (count, tag) for all tags present in the journal."""
# Astute reader: should the following line leave you as puzzled as me the first
# time I came across this construction, worry not and embrace the ensuing moment
# of enlightment.
tags = [tag for entry in self.entries for tag in set(entry.tags)]
# To be read: [for entry in journal.entries: for tag in set(entry.tags): tag]
tag_counts = {(tags.count(tag), tag) for tag in tags}
return [Tag(tag, count=count) for count, tag in sorted(tag_counts)]
def filter(
self,
tags=[],
month=None,
day=None,
year=None,
start_date=None,
end_date=None,
starred=False,
tagged=False,
exclude_starred=False,
exclude_tagged=False,
strict=False,
contains=[],
exclude=[],
):
"""Removes all entries from the journal that don't match the filter.
tags is a list of tags, each being a string that starts with one of the
tag symbols defined in the config, e.g. ["@John", "#WorldDomination"].
start_date and end_date define a timespan by which to filter.
starred limits journal to starred entries
If strict is True, all tags must be present in an entry. If false, the
exclude is a list of the tags which should not appear in the results.
entry is kept if any tag is present, unless they appear in exclude."""
self.search_tags = {tag.lower() for tag in tags}
excluded_tags = {tag.lower() for tag in exclude}
end_date = time.parse(end_date, inclusive=True)
start_date = time.parse(start_date)
# If strict mode is on, all tags have to be present in entry
has_tags = (
self.search_tags.issubset if strict else self.search_tags.intersection
)
def excluded(tags):
return 0 < len([tag for tag in tags if tag in excluded_tags])
if contains:
contains_lower = [substring.casefold() for substring in contains]
# Create datetime object for comparison below
# this approach allows various formats
if month or day or year:
compare_d = time.parse(f"{month or 1}.{day or 1}.{year or 1}")
result = [
entry
for entry in self.entries
if (not tags or has_tags(entry.tags))
and (not (starred or exclude_starred) or entry.starred == starred)
and (not (tagged or exclude_tagged) or bool(entry.tags) == tagged)
and (not month or entry.date.month == compare_d.month)
and (not day or entry.date.day == compare_d.day)
and (not year or entry.date.year == compare_d.year)
and (not start_date or entry.date >= start_date)
and (not end_date or entry.date <= end_date)
and (not exclude or not excluded(entry.tags))
and (
not contains
or (
strict
and all(
substring in entry.title.casefold()
or substring in entry.body.casefold()
for substring in contains_lower
)
)
or (
not strict
and any(
substring in entry.title.casefold()
or substring in entry.body.casefold()
for substring in contains_lower
)
)
)
]
self.entries = result
def delete_entries(self, entries_to_delete: list[Entry]) -> None:
"""Deletes specific entries from a journal."""
for entry in entries_to_delete:
self.entries.remove(entry)
self.deleted_entry_count += 1
def change_date_entries(
self, date: datetime.datetime, entries_to_change: list[Entry]
) -> None:
"""Changes entry dates to given date."""
date = time.parse(date)
for entry in entries_to_change:
entry.date = date
entry.modified = True
def prompt_action_entries(self, msg: MsgText) -> list[Entry]:
"""Prompts for action for each entry in a journal, using given message.
Returns the entries the user wishes to apply the action on."""
to_act = []
def ask_action(entry):
return yesno(
Message(
msg,
params={"entry_title": entry.pprint(short=True)},
),
default=False,
)
for entry in self.entries:
if ask_action(entry):
to_act.append(entry)
return to_act
def new_entry(self, raw: str, date=None, sort: bool = True) -> Entry:
"""Constructs a new entry from some raw text input.
If a date is given, it will parse and use this, otherwise scan for a date in
the input first.
"""
raw = raw.replace("\\n ", "\n").replace("\\n", "\n")
# Split raw text into title and body
sep = re.search(r"\n|[?!.]+ +\n?", raw)
first_line = raw[: sep.end()].strip() if sep else raw
starred = False
if not date:
colon_pos = first_line.find(": ")
if colon_pos > 0:
date = time.parse(
raw[:colon_pos],
default_hour=self.config["default_hour"],
default_minute=self.config["default_minute"],
)
if date: # Parsed successfully, strip that from the raw text
starred = raw[:colon_pos].strip().endswith("*")
raw = raw[colon_pos + 1 :].strip()
starred = (
starred
or first_line.startswith("*")
or first_line.endswith("*")
or raw.startswith("*")
)
if not date: # Still nothing? Meh, just live in the moment.
date = time.parse("now")
entry = Entry(self, date, raw, starred=starred)
entry.modified = True
self.entries.append(entry)
if sort:
self.sort()
return entry
def editable_str(self) -> str:
"""Turns the journal into a string of entries that can be edited
manually and later be parsed with self.parse_editable_str."""
return "\n".join([str(e) for e in self.entries])
def parse_editable_str(self, edited: str) -> None:
"""Parses the output of self.editable_str and updates it's entries."""
mod_entries = self._parse(edited)
# Match those entries that can be found in self.entries and set
# these to modified, so we can get a count of how many entries got
# modified and how many got deleted later.
for entry in mod_entries:
entry.modified = not any(entry == old_entry for old_entry in self.entries)
self.increment_change_counts_by_edit(mod_entries)
self.entries = mod_entries
def increment_change_counts_by_edit(self, mod_entries: Entry) -> None:
if len(mod_entries) > len(self.entries):
self.added_entry_count += len(mod_entries) - len(self.entries)
else:
self.deleted_entry_count += len(self.entries) - len(mod_entries)
def get_change_counts(self) -> dict:
return {
"added": self.added_entry_count,
"deleted": self.deleted_entry_count,
"modified": len([e for e in self.entries if e.modified]),
}
class LegacyJournal(Journal):
"""Legacy class to support opening journals formatted with the jrnl 1.x
standard. Main difference here is that in 1.x, timestamps were not cuddled
by square brackets. You'll not be able to save these journals anymore."""
def _parse(self, journal_txt: str) -> list[Entry]:
"""Parses a journal that's stored in a string and returns a list of entries"""
# Entries start with a line that looks like 'date title' - let's figure out how
# long the date will be by constructing one
date_length = len(datetime.datetime.today().strftime(self.config["timeformat"]))
# Initialise our current entry
entries = []
current_entry = None
new_date_format_regex = re.compile(r"(^\[[^\]]+\].*?$)")
for line in journal_txt.splitlines():
line = line.rstrip()
try:
# try to parse line as date => new entry begins
new_date = datetime.datetime.strptime(
line[:date_length], self.config["timeformat"]
)
# parsing successful => save old entry and create new one
if new_date and current_entry:
entries.append(current_entry)
if line.endswith("*"):
starred = True
line = line[:-1]
else:
starred = False
current_entry = Entry(
self, date=new_date, text=line[date_length + 1 :], starred=starred
)
except ValueError:
# Happens when we can't parse the start of the line as an date.
# In this case, just append line to our body (after some
# escaping for the new format).
line = new_date_format_regex.sub(r" \1", line)
if current_entry:
current_entry.text += line + "\n"
# Append last entry
if current_entry:
entries.append(current_entry)
for entry in entries:
entry._parse_text()
return entries
def open_journal(journal_name: str, config: dict, legacy: bool = False) -> Journal:
"""
Creates a normal, encrypted or DayOne journal based on the passed config.
If legacy is True, it will open Journals with legacy classes build for
backwards compatibility with jrnl 1.x
"""
logging.debug(f"open_journal '{journal_name}'")
validate_journal_name(journal_name, config)
config = config.copy()
config["journal"] = expand_path(config["journal"])
if os.path.isdir(config["journal"]):
if config["encrypt"]:
print_msg(
Message(
MsgText.ConfigEncryptedForUnencryptableJournalType,
MsgStyle.WARNING,
{
"journal_name": journal_name,
},
)
)
if config["journal"].strip("/").endswith(".dayone") or "entries" in os.listdir(
config["journal"]
):
from jrnl.journals import DayOne
return DayOne(**config).open()
else:
from jrnl.journals import Folder
return Folder(journal_name, **config).open()
if not config["encrypt"]:
if legacy:
return LegacyJournal(journal_name, **config).open()
if config["journal"].endswith(os.sep):
from jrnl.journals import Folder
return Folder(journal_name, **config).open()
return Journal(journal_name, **config).open()
if legacy:
config["encrypt"] = "jrnlv1"
return LegacyJournal(journal_name, **config).open()
return Journal(journal_name, **config).open()
| 18,836
|
Python
|
.py
| 438
| 31.860731
| 88
| 0.572224
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,588
|
__init__.py
|
jrnl-org_jrnl/jrnl/journals/__init__.py
|
from .DayOneJournal import DayOne
from .Entry import Entry
from .FolderJournal import Folder
from .Journal import Journal
from .Journal import open_journal
| 156
|
Python
|
.py
| 5
| 30.2
| 33
| 0.860927
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,589
|
DayOneJournal.py
|
jrnl-org_jrnl/jrnl/journals/DayOneJournal.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import contextlib
import datetime
import fnmatch
import os
import platform
import plistlib
import re
import socket
import time
import uuid
import zoneinfo
from pathlib import Path
from xml.parsers.expat import ExpatError
import tzlocal
from jrnl import __title__
from jrnl import __version__
from .Entry import Entry
from .Journal import Journal
class DayOne(Journal):
"""A special Journal handling DayOne files"""
# InvalidFileException was added to plistlib in Python3.4
PLIST_EXCEPTIONS = (
(ExpatError, plistlib.InvalidFileException)
if hasattr(plistlib, "InvalidFileException")
else ExpatError
)
def __init__(self, **kwargs):
self.entries = []
self._deleted_entries = []
self.can_be_encrypted = False
super().__init__(**kwargs)
def open(self) -> "DayOne":
filenames = []
for root, dirnames, f in os.walk(self.config["journal"]):
for filename in fnmatch.filter(f, "*.doentry"):
filenames.append(os.path.join(root, filename))
self.entries = []
for filename in filenames:
with open(filename, "rb") as plist_entry:
try:
dict_entry = plistlib.load(plist_entry, fmt=plistlib.FMT_XML)
except self.PLIST_EXCEPTIONS:
pass
else:
try:
timezone = zoneinfo.ZoneInfo(dict_entry["Time Zone"])
except KeyError:
timezone_name = str(tzlocal.get_localzone())
timezone = zoneinfo.ZoneInfo(timezone_name)
date = dict_entry["Creation Date"]
# convert the date to UTC rather than keep messing with
# timezones
if timezone.key != "UTC":
date = date.replace(fold=1) + timezone.utcoffset(date)
entry = Entry(
self,
date,
text=dict_entry["Entry Text"],
starred=dict_entry["Starred"],
)
entry.uuid = dict_entry["UUID"]
entry._tags = [
self.config["tagsymbols"][0] + tag.lower()
for tag in dict_entry.get("Tags", [])
]
if entry._tags:
entry._tags.sort()
"""Extended DayOne attributes"""
# just ignore it if the keys don't exist
with contextlib.suppress(KeyError):
entry.creator_device_agent = dict_entry["Creator"][
"Device Agent"
]
entry.creator_host_name = dict_entry["Creator"]["Host Name"]
entry.creator_os_agent = dict_entry["Creator"]["OS Agent"]
entry.creator_software_agent = dict_entry["Creator"][
"Software Agent"
]
entry.location = dict_entry["Location"]
entry.weather = dict_entry["Weather"]
entry.creator_generation_date = dict_entry.get("Creator", {}).get(
"Generation Date", date
)
self.entries.append(entry)
self.sort()
return self
def write(self) -> None:
"""Writes only the entries that have been modified into plist files."""
for entry in self.entries:
if entry.modified:
utc_time = datetime.datetime.utcfromtimestamp(
time.mktime(entry.date.timetuple())
)
if not hasattr(entry, "uuid"):
entry.uuid = uuid.uuid1().hex
if not hasattr(entry, "creator_device_agent"):
entry.creator_device_agent = "" # iPhone/iPhone5,3
if not hasattr(entry, "creator_generation_date"):
entry.creator_generation_date = utc_time
if not hasattr(entry, "creator_host_name"):
entry.creator_host_name = socket.gethostname()
if not hasattr(entry, "creator_os_agent"):
entry.creator_os_agent = "{}/{}".format(
platform.system(), platform.release()
)
if not hasattr(entry, "creator_software_agent"):
entry.creator_software_agent = "{}/{}".format(
__title__, __version__
)
fn = (
Path(self.config["journal"])
/ "entries"
/ (entry.uuid.upper() + ".doentry")
)
entry_plist = {
"Creation Date": utc_time,
"Starred": entry.starred if hasattr(entry, "starred") else False,
"Entry Text": entry.title + "\n" + entry.body,
"Time Zone": str(tzlocal.get_localzone()),
"UUID": entry.uuid.upper(),
"Tags": [
tag.strip(self.config["tagsymbols"]).replace("_", " ")
for tag in entry.tags
],
"Creator": {
"Device Agent": entry.creator_device_agent,
"Generation Date": entry.creator_generation_date,
"Host Name": entry.creator_host_name,
"OS Agent": entry.creator_os_agent,
"Software Agent": entry.creator_software_agent,
},
}
if hasattr(entry, "location"):
entry_plist["Location"] = entry.location
if hasattr(entry, "weather"):
entry_plist["Weather"] = entry.weather
# plistlib expects a binary object
with fn.open(mode="wb") as f:
plistlib.dump(entry_plist, f, fmt=plistlib.FMT_XML, sort_keys=False)
for entry in self._deleted_entries:
filename = os.path.join(
self.config["journal"], "entries", entry.uuid + ".doentry"
)
os.remove(filename)
def editable_str(self) -> str:
"""Turns the journal into a string of entries that can be edited
manually and later be parsed with eslf.parse_editable_str."""
return "\n".join([f"{str(e)}\n# {e.uuid}\n" for e in self.entries])
def _update_old_entry(self, entry: Entry, new_entry: Entry) -> None:
for attr in ("title", "body", "date", "tags"):
old_attr = getattr(entry, attr)
new_attr = getattr(new_entry, attr)
if old_attr != new_attr:
entry.modified = True
setattr(entry, attr, new_attr)
def _get_and_remove_uuid_from_entry(self, entry: Entry) -> Entry:
uuid_regex = "^ *?# ([a-zA-Z0-9]+) *?$"
m = re.search(uuid_regex, entry.body, re.MULTILINE)
entry.uuid = m.group(1) if m else None
# remove the uuid from the body
entry.body = re.sub(uuid_regex, "", entry.body, flags=re.MULTILINE, count=1)
entry.body = entry.body.rstrip()
return entry
def parse_editable_str(self, edited: str) -> None:
"""Parses the output of self.editable_str and updates its entries."""
# Method: create a new list of entries from the edited text, then match
# UUIDs of the new entries against self.entries, updating the entries
# if the edited entries differ, and deleting entries from self.entries
# if they don't show up in the edited entries anymore.
entries_from_editor = self._parse(edited)
for entry in entries_from_editor:
entry = self._get_and_remove_uuid_from_entry(entry)
if entry._tags:
entry._tags.sort()
# Remove deleted entries
edited_uuids = [e.uuid for e in entries_from_editor]
self._deleted_entries = [e for e in self.entries if e.uuid not in edited_uuids]
self.entries[:] = [e for e in self.entries if e.uuid in edited_uuids]
for entry in entries_from_editor:
for old_entry in self.entries:
if entry.uuid == old_entry.uuid:
if old_entry._tags:
tags_not_in_body = [
tag for tag in old_entry._tags if (tag not in entry._body)
]
if tags_not_in_body:
entry._tags.extend(tags_not_in_body.sort())
self._update_old_entry(old_entry, entry)
break
| 8,969
|
Python
|
.py
| 191
| 31.570681
| 88
| 0.517143
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,590
|
MsgStyle.py
|
jrnl-org_jrnl/jrnl/messages/MsgStyle.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
from enum import Enum
from typing import Callable
from typing import NamedTuple
from rich import box
from rich.panel import Panel
from jrnl.messages.MsgText import MsgText
class MsgStyle(Enum):
class _Color(NamedTuple):
"""
String representing a standard color to display
see: https://rich.readthedocs.io/en/stable/appendix/colors.html
"""
color: str
class _Decoration(Enum):
NONE = {
"callback": lambda x, **_: x,
"args": {},
}
BOX = {
"callback": Panel,
"args": {
"expand": False,
"padding": (0, 2),
"title_align": "left",
"box": box.HEAVY,
},
}
@property
def callback(self) -> Callable:
return self.value["callback"]
@property
def args(self) -> dict:
return self.value["args"]
PROMPT = {
"decoration": _Decoration.NONE,
"color": _Color("white"),
"append_space": True,
}
TITLE = {
"decoration": _Decoration.BOX,
"color": _Color("cyan"),
}
NORMAL = {
"decoration": _Decoration.BOX,
"color": _Color("white"),
}
WARNING = {
"decoration": _Decoration.BOX,
"color": _Color("yellow"),
}
ERROR = {
"decoration": _Decoration.BOX,
"color": _Color("red"),
"box_title": str(MsgText.Error),
}
ERROR_ON_NEW_LINE = {
"decoration": _Decoration.BOX,
"color": _Color("red"),
"prepend_newline": True,
"box_title": str(MsgText.Error),
}
@property
def decoration(self) -> _Decoration:
return self.value["decoration"]
@property
def color(self) -> _Color:
return self.value["color"].color
@property
def prepend_newline(self) -> bool:
return self.value.get("prepend_newline", False)
@property
def append_space(self) -> bool:
return self.value.get("append_space", False)
@property
def box_title(self) -> MsgText:
return self.value.get("box_title")
| 2,253
|
Python
|
.py
| 78
| 21.076923
| 71
| 0.55787
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,591
|
MsgText.py
|
jrnl-org_jrnl/jrnl/messages/MsgText.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
from enum import Enum
class MsgText(Enum):
def __str__(self) -> str:
return self.value
# -- Welcome --- #
WelcomeToJrnl = """
Welcome to jrnl {version}!
It looks like you've been using an older version of jrnl until now. That's
okay - jrnl will now upgrade your configuration and journal files. Afterwards
you can enjoy all of the great new features that come with jrnl 2:
- Support for storing your journal in multiple files
- Faster reading and writing for large journals
- New encryption back-end that makes installing jrnl much easier
- Tons of bug fixes
Please note that jrnl 1.x is NOT forward compatible with this version of jrnl.
If you choose to proceed, you will not be able to use your journals with
older versions of jrnl anymore.
"""
AllDoneUpgrade = "We're all done here and you can start enjoying jrnl 2"
InstallComplete = """
jrnl configuration created at {config_path}
For advanced features, read the docs at https://jrnl.sh
"""
# --- Prompts --- #
InstallJournalPathQuestion = """
Path to your journal file (leave blank for {default_journal_path}):
"""
DeleteEntryQuestion = "Delete entry '{entry_title}'?"
ChangeTimeEntryQuestion = "Change time for '{entry_title}'?"
EncryptJournalQuestion = """
Do you want to encrypt your journal? (You can always change this later)
"""
UseColorsQuestion = """
Do you want jrnl to use colors to display entries? (You can always change this later)
""" # noqa: E501 - the line is still under 88 when dedented
YesOrNoPromptDefaultYes = "[Y/n]"
YesOrNoPromptDefaultNo = "[y/N]"
ContinueUpgrade = "Continue upgrading jrnl?"
# these should be lowercase, if possible in language
# "lowercase" means whatever `.lower()` returns
OneCharacterYes = "y"
OneCharacterNo = "n"
# --- Exceptions ---#
Error = "Error"
UncaughtException = """
{name}
{exception}
This is probably a bug. Please file an issue at:
https://github.com/jrnl-org/jrnl/issues/new/choose
"""
ConfigDirectoryIsFile = """
Problem with config file!
The path to your jrnl configuration directory is a file, not a directory:
{config_directory_path}
Removing this file will allow jrnl to save its configuration.
"""
CantParseConfigFile = """
Unable to parse config file at:
{config_path}
"""
LineWrapTooSmallForDateFormat = """
The provided linewrap value of {config_linewrap} is too small by
{columns} columns to display the timestamps in the configured time
format for journal {journal}.
You can avoid this error by specifying a linewrap value that is larger
by at least {columns} in the configuration file or by using
--config-override at the command line
"""
CannotEncryptJournalType = """
The journal {journal_name} can't be encrypted because it is a
{journal_type} journal.
To encrypt it, create a new journal referencing a file, export
this journal to the new journal, then encrypt the new journal.
"""
ConfigEncryptedForUnencryptableJournalType = """
The config for journal "{journal_name}" has 'encrypt' set to true, but this type
of journal can't be encrypted. Please fix your config file.
"""
DecryptionFailedGeneric = "The decryption of journal data failed."
KeyboardInterruptMsg = "Aborted by user"
CantReadTemplate = """
Unable to find a template file {template_path}.
The following paths were checked:
* {jrnl_template_dir}{template_path}
* {actual_template_path}
"""
NoNamedJournal = "No '{journal_name}' journal configured\n{journals}"
DoesNotExist = "{name} does not exist"
# --- Journal status ---#
JournalNotSaved = "Entry NOT saved to journal"
JournalEntryAdded = "Entry added to {journal_name} journal"
JournalCountAddedSingular = "{num} entry added"
JournalCountModifiedSingular = "{num} entry modified"
JournalCountDeletedSingular = "{num} entry deleted"
JournalCountAddedPlural = "{num} entries added"
JournalCountModifiedPlural = "{num} entries modified"
JournalCountDeletedPlural = "{num} entries deleted"
JournalCreated = "Journal '{journal_name}' created at {filename}"
DirectoryCreated = "Directory {directory_name} created"
JournalEncrypted = "Journal will be encrypted"
JournalEncryptedTo = "Journal encrypted to {path}"
JournalDecryptedTo = "Journal decrypted to {path}"
BackupCreated = "Created a backup at {filename}"
# --- Editor ---#
WritingEntryStart = """
Writing Entry
To finish writing, press {how_to_quit} on a blank line.
"""
HowToQuitWindows = "Ctrl+z and then Enter"
HowToQuitLinux = "Ctrl+d"
EditorMisconfigured = """
No such file or directory: '{editor_key}'
Please check the 'editor' key in your config file for errors:
editor: '{editor_key}'
"""
EditorNotConfigured = """
There is no editor configured
To use the --edit option, please specify an editor your config file:
{config_file}
For examples of how to configure an external editor, see:
https://jrnl.sh/en/stable/external-editors/
"""
NoEditsReceivedJournalNotDeleted = """
No text received from editor. Were you trying to delete all the entries?
This seems a bit drastic, so the operation was cancelled.
To delete all entries, use the --delete option.
"""
NoEditsReceived = "No edits to save, because nothing was changed"
NoTextReceived = """
No entry to save, because no text was received
"""
NoChangesToTemplate = """
No entry to save, because the template was not changed
"""
# --- Upgrade --- #
JournalFailedUpgrade = """
The following journal{s} failed to upgrade:
{failed_journals}
Please tell us about this problem at the following URL:
https://github.com/jrnl-org/jrnl/issues/new?title=JournalFailedUpgrade
"""
UpgradeAborted = "jrnl was NOT upgraded"
AbortingUpgrade = "Aborting upgrade..."
ImportAborted = "Entries were NOT imported"
JournalsToUpgrade = """
The following journals will be upgraded to jrnl {version}:
"""
JournalsToIgnore = """
The following journals will not be touched:
"""
UpgradingJournal = """
Upgrading '{journal_name}' journal stored in {path}...
"""
UpgradingConfig = "Upgrading config..."
PaddedJournalName = "{journal_name:{pad}} -> {path}"
# -- Config --- #
AltConfigNotFound = """
Alternate configuration file not found at the given path:
{config_file}
"""
ConfigUpdated = """
Configuration updated to newest version at {config_path}
"""
ConfigDoubleKeys = """
There is at least one duplicate key in your configuration file.
Details:
{error_message}
"""
# --- Password --- #
Password = "Password:"
PasswordFirstEntry = "Enter password for journal '{journal_name}': "
PasswordConfirmEntry = "Enter password again: "
PasswordMaxTriesExceeded = "Too many attempts with wrong password"
PasswordCanNotBeEmpty = "Password can't be empty!"
PasswordDidNotMatch = "Passwords did not match, please try again"
WrongPasswordTryAgain = "Wrong password, try again"
PasswordStoreInKeychain = "Do you want to store the password in your keychain?"
# --- Search --- #
NothingToDelete = """
No entries to delete, because the search returned no results
"""
NothingToModify = """
No entries to modify, because the search returned no results
"""
NoEntriesFound = "no entries found"
EntryFoundCountSingular = "{num} entry found"
EntryFoundCountPlural = "{num} entries found"
# --- Formats --- #
HeadingsPastH6 = """
Headings increased past H6 on export - {date} {title}
"""
YamlMustBeDirectory = """
YAML export must be to a directory, not a single file
"""
JournalExportedTo = "Journal exported to {path}"
# --- Import --- #
ImportSummary = """
{count} imported to {journal_name} journal
"""
# --- Color --- #
InvalidColor = "{key} set to invalid color: {color}"
# --- Keyring --- #
KeyringBackendNotFound = """
Keyring backend not found.
Please install one of the supported backends by visiting:
https://pypi.org/project/keyring/
"""
KeyringRetrievalFailure = "Failed to retrieve keyring"
# --- Deprecation --- #
DeprecatedCommand = """
The command {old_cmd} is deprecated and will be removed from jrnl soon.
Please use {new_cmd} instead.
"""
| 9,246
|
Python
|
.py
| 215
| 35.590698
| 93
| 0.655692
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,592
|
Message.py
|
jrnl-org_jrnl/jrnl/messages/Message.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
from typing import TYPE_CHECKING
from typing import Mapping
from typing import NamedTuple
from jrnl.messages.MsgStyle import MsgStyle
if TYPE_CHECKING:
from jrnl.messages.MsgText import MsgText
class Message(NamedTuple):
text: "MsgText"
style: MsgStyle = MsgStyle.NORMAL
params: Mapping = {}
| 409
|
Python
|
.py
| 12
| 31.333333
| 52
| 0.790816
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,593
|
__init__.py
|
jrnl-org_jrnl/jrnl/messages/__init__.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
from jrnl.messages import Message
from jrnl.messages import MsgStyle
from jrnl.messages import MsgText
Message = Message.Message
MsgStyle = MsgStyle.MsgStyle
MsgText = MsgText.MsgText
| 282
|
Python
|
.py
| 8
| 34
| 52
| 0.830882
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,594
|
util.py
|
jrnl-org_jrnl/jrnl/plugins/util.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
from collections import Counter
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from jrnl.journals import Journal
class NestedDict(dict):
"""https://stackoverflow.com/a/74873621/8740440"""
def __missing__(self, x):
self[x] = NestedDict()
return self[x]
def get_tags_count(journal: "Journal") -> set[tuple[int, str]]:
"""Returns a set of tuples (count, tag) for all tags present in the journal."""
# Astute reader: should the following line leave you as puzzled as me the first time
# I came across this construction, worry not and embrace the ensuing moment of
# enlightment.
tags = [tag for entry in journal.entries for tag in set(entry.tags)]
# To be read: [for entry in journal.entries: for tag in set(entry.tags): tag]
tag_counts = {(tags.count(tag), tag) for tag in tags}
return tag_counts
def oxford_list(lst: list) -> str:
"""Return Human-readable list of things obeying the object comma)"""
lst = sorted(lst)
if not lst:
return "(nothing)"
elif len(lst) == 1:
return lst[0]
elif len(lst) == 2:
return lst[0] + " or " + lst[1]
else:
return ", ".join(lst[:-1]) + ", or " + lst[-1]
def get_journal_frequency_nested(journal: "Journal") -> NestedDict:
"""Returns a NestedDict of the form {year: {month: {day: count}}}"""
journal_frequency = NestedDict()
for entry in journal.entries:
date = entry.date.date()
if date.day in journal_frequency[date.year][date.month]:
journal_frequency[date.year][date.month][date.day] += 1
else:
journal_frequency[date.year][date.month][date.day] = 1
return journal_frequency
def get_journal_frequency_one_level(journal: "Journal") -> Counter:
"""Returns a Counter of the form {date (YYYY-MM-DD): count}"""
date_counts = Counter()
for entry in journal.entries:
# entry.date.date() gets date without time
date = str(entry.date.date())
date_counts[date] += 1
return date_counts
| 2,144
|
Python
|
.py
| 49
| 37.979592
| 88
| 0.661221
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,595
|
markdown_exporter.py
|
jrnl-org_jrnl/jrnl/plugins/markdown_exporter.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import os
import re
from typing import TYPE_CHECKING
from jrnl.messages import Message
from jrnl.messages import MsgStyle
from jrnl.messages import MsgText
from jrnl.output import print_msg
from jrnl.plugins.text_exporter import TextExporter
if TYPE_CHECKING:
from jrnl.journals import Entry
from jrnl.journals import Journal
class MarkdownExporter(TextExporter):
"""This Exporter can convert entries and journals into Markdown."""
names = ["md", "markdown"]
extension = "md"
@classmethod
def export_entry(cls, entry: "Entry", to_multifile: bool = True) -> str:
"""Returns a markdown representation of a single entry."""
date_str = entry.date.strftime(entry.journal.config["timeformat"])
body_wrapper = "\n" if entry.body else ""
body = body_wrapper + entry.body
if to_multifile is True:
heading = "#"
else:
heading = "###"
"""Increase heading levels in body text"""
newbody = ""
previous_line = ""
warn_on_heading_level = False
for line in body.splitlines(True):
if re.match(r"^#+ ", line):
"""ATX style headings"""
newbody = newbody + previous_line + heading + line
if re.match(r"^#######+ ", heading + line):
warn_on_heading_level = True
line = ""
elif re.match(r"^=+$", line.rstrip()) and not re.match(
r"^$", previous_line.strip()
):
"""Setext style H1"""
newbody = newbody + heading + "# " + previous_line
line = ""
elif re.match(r"^-+$", line.rstrip()) and not re.match(
r"^$", previous_line.strip()
):
"""Setext style H2"""
newbody = newbody + heading + "## " + previous_line
line = ""
else:
newbody = newbody + previous_line
previous_line = line
newbody = newbody + previous_line # add very last line
# make sure the export ends with a blank line
if previous_line not in ["\r", "\n", "\r\n", "\n\r"]:
newbody = newbody + os.linesep
if warn_on_heading_level is True:
print_msg(
Message(
MsgText.HeadingsPastH6,
MsgStyle.WARNING,
{"date": date_str, "title": entry.title},
)
)
return f"{heading} {date_str} {entry.title}\n{newbody} "
@classmethod
def export_journal(cls, journal: "Journal") -> str:
"""Returns a Markdown representation of an entire journal."""
out = []
year, month = -1, -1
for e in journal.entries:
if e.date.year != year:
year = e.date.year
out.append("# " + str(year))
out.append("")
if e.date.month != month:
month = e.date.month
out.append("## " + e.date.strftime("%B"))
out.append("")
out.append(cls.export_entry(e, False))
result = "\n".join(out)
return result
| 3,309
|
Python
|
.py
| 83
| 28.975904
| 76
| 0.539683
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,596
|
fancy_exporter.py
|
jrnl-org_jrnl/jrnl/plugins/fancy_exporter.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import logging
import os
from textwrap import TextWrapper
from typing import TYPE_CHECKING
from jrnl.exception import JrnlException
from jrnl.messages import Message
from jrnl.messages import MsgStyle
from jrnl.messages import MsgText
from jrnl.plugins.text_exporter import TextExporter
if TYPE_CHECKING:
from jrnl.journals import Entry
from jrnl.journals import Journal
class FancyExporter(TextExporter):
"""This Exporter converts entries and journals into text with unicode boxes."""
names = ["fancy", "boxed"]
extension = "txt"
# Top border of the card
border_a = "┎"
border_b = "─"
border_c = "╮"
border_d = "╘"
border_e = "═"
border_f = "╕"
border_g = "┃"
border_h = "│"
border_i = "┠"
border_j = "╌"
border_k = "┤"
border_l = "┖"
border_m = "┘"
@classmethod
def export_entry(cls, entry: "Entry") -> str:
"""Returns a fancy unicode representation of a single entry."""
date_str = entry.date.strftime(entry.journal.config["timeformat"])
if entry.journal.config["linewrap"]:
linewrap = entry.journal.config["linewrap"]
if linewrap == "auto":
try:
linewrap = os.get_terminal_size().columns
except OSError:
logging.debug(
"Can't determine terminal size automatically 'linewrap': '%s'",
entry.journal.config["linewrap"],
)
linewrap = 79
else:
linewrap = 79
initial_linewrap = max((1, linewrap - len(date_str) - 2))
body_linewrap = linewrap - 2
card = [
cls.border_a + cls.border_b * (initial_linewrap) + cls.border_c + date_str
]
check_provided_linewrap_viability(linewrap, card, entry.journal.name)
w = TextWrapper(
width=initial_linewrap,
initial_indent=cls.border_g + " ",
subsequent_indent=cls.border_g + " ",
)
title_lines = w.wrap(entry.title) or [""]
card.append(
title_lines[0].ljust(initial_linewrap + 1)
+ cls.border_d
+ cls.border_e * (len(date_str) - 1)
+ cls.border_f
)
w.width = body_linewrap
if len(title_lines) > 1:
for line in w.wrap(
" ".join(
[
title_line[len(w.subsequent_indent) :]
for title_line in title_lines[1:]
]
)
):
card.append(line.ljust(body_linewrap + 1) + cls.border_h)
if entry.body:
card.append(cls.border_i + cls.border_j * body_linewrap + cls.border_k)
for line in entry.body.splitlines():
body_lines = w.wrap(line) or [cls.border_g]
for body_line in body_lines:
card.append(body_line.ljust(body_linewrap + 1) + cls.border_h)
card.append(cls.border_l + cls.border_b * body_linewrap + cls.border_m)
return "\n".join(card)
@classmethod
def export_journal(cls, journal) -> str:
"""Returns a unicode representation of an entire journal."""
return "\n".join(cls.export_entry(entry) for entry in journal)
def check_provided_linewrap_viability(
linewrap: int, card: list[str], journal: "Journal"
):
if len(card[0]) > linewrap:
width_violation = len(card[0]) - linewrap
raise JrnlException(
Message(
MsgText.LineWrapTooSmallForDateFormat,
MsgStyle.NORMAL,
{
"config_linewrap": linewrap,
"columns": width_violation,
"journal": journal,
},
)
)
| 3,977
|
Python
|
.py
| 106
| 26.971698
| 87
| 0.559185
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,597
|
text_exporter.py
|
jrnl-org_jrnl/jrnl/plugins/text_exporter.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import errno
import os
import re
import unicodedata
from typing import TYPE_CHECKING
from jrnl.messages import Message
from jrnl.messages import MsgStyle
from jrnl.messages import MsgText
from jrnl.output import print_msg
if TYPE_CHECKING:
from jrnl.journals import Entry
from jrnl.journals import Journal
class TextExporter:
"""This Exporter can convert entries and journals into text files."""
names = ["text", "txt"]
extension = "txt"
@classmethod
def export_entry(cls, entry: "Entry") -> str:
"""Returns a string representation of a single entry."""
return str(entry)
@classmethod
def export_journal(cls, journal: "Journal") -> str:
"""Returns a string representation of an entire journal."""
return "\n".join(cls.export_entry(entry) for entry in journal)
@classmethod
def write_file(cls, journal: "Journal", path: str) -> str:
"""Exports a journal into a single file."""
export_str = cls.export_journal(journal)
with open(path, "w", encoding="utf-8") as f:
f.write(export_str)
print_msg(
Message(
MsgText.JournalExportedTo,
MsgStyle.NORMAL,
{
"path": path,
},
)
)
return ""
@classmethod
def make_filename(cls, entry: "Entry") -> str:
return entry.date.strftime("%Y-%m-%d") + "_{}.{}".format(
cls._slugify(str(entry.title)), cls.extension
)
@classmethod
def write_files(cls, journal: "Journal", path: str) -> str:
"""Exports a journal into individual files for each entry."""
for entry in journal.entries:
entry_is_written = False
while not entry_is_written:
full_path = os.path.join(path, cls.make_filename(entry))
try:
with open(full_path, "w", encoding="utf-8") as f:
f.write(cls.export_entry(entry))
entry_is_written = True
except OSError as oserr:
title_length = len(str(entry.title))
if (
oserr.errno == errno.ENAMETOOLONG
or oserr.errno == errno.ENOENT
or oserr.errno == errno.EINVAL
) and title_length > 1:
shorter_file_length = title_length // 2
entry.title = str(entry.title)[:shorter_file_length]
else:
raise
print_msg(
Message(
MsgText.JournalExportedTo,
MsgStyle.NORMAL,
{"path": path},
)
)
return ""
def _slugify(string: str) -> str:
"""Slugifies a string.
Based on public domain code from https://github.com/zacharyvoase/slugify
"""
normalized_string = str(unicodedata.normalize("NFKD", string))
no_punctuation = re.sub(r"[^\w\s-]", "", normalized_string).strip().lower()
slug = re.sub(r"[-\s]+", "-", no_punctuation)
return slug
@classmethod
def export(cls, journal: "Journal", output: str | None = None) -> str:
"""Exports to individual files if output is an existing path, or into
a single file if output is a file name, or returns the exporter's
representation as string if output is None."""
if output and os.path.isdir(output): # multiple files
return cls.write_files(journal, output)
elif output: # single file
return cls.write_file(journal, output)
else:
return cls.export_journal(journal)
| 3,844
|
Python
|
.py
| 96
| 29.364583
| 83
| 0.572423
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,598
|
jrnl_importer.py
|
jrnl-org_jrnl/jrnl/plugins/jrnl_importer.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import sys
from typing import TYPE_CHECKING
from jrnl.exception import JrnlException
from jrnl.messages import Message
from jrnl.messages import MsgStyle
from jrnl.messages import MsgText
from jrnl.output import print_msg
if TYPE_CHECKING:
from jrnl.journals import Journal
class JRNLImporter:
"""This plugin imports entries from other jrnl files."""
names = ["jrnl"]
@staticmethod
def import_(journal: "Journal", input: str | None = None) -> None:
"""Imports from an existing file if input is specified, and
standard input otherwise."""
old_cnt = len(journal.entries)
if input:
with open(input, "r", encoding="utf-8") as f:
other_journal_txt = f.read()
else:
try:
other_journal_txt = sys.stdin.read()
except KeyboardInterrupt:
raise JrnlException(
Message(MsgText.KeyboardInterruptMsg, MsgStyle.ERROR_ON_NEW_LINE),
Message(MsgText.ImportAborted, MsgStyle.WARNING),
)
journal.import_(other_journal_txt)
new_cnt = len(journal.entries)
journal.write()
print_msg(
Message(
MsgText.ImportSummary,
MsgStyle.NORMAL,
{
"count": new_cnt - old_cnt,
"journal_name": journal.name,
},
)
)
| 1,549
|
Python
|
.py
| 43
| 26.27907
| 86
| 0.599466
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,599
|
json_exporter.py
|
jrnl-org_jrnl/jrnl/plugins/json_exporter.py
|
# Copyright © 2012-2023 jrnl contributors
# License: https://www.gnu.org/licenses/gpl-3.0.html
import json
from typing import TYPE_CHECKING
from jrnl.plugins.text_exporter import TextExporter
from jrnl.plugins.util import get_tags_count
if TYPE_CHECKING:
from jrnl.journals import Entry
from jrnl.journals import Journal
class JSONExporter(TextExporter):
"""This Exporter can convert entries and journals into json."""
names = ["json"]
extension = "json"
@classmethod
def entry_to_dict(cls, entry: "Entry") -> dict:
entry_dict = {
"title": entry.title,
"body": entry.body,
"date": entry.date.strftime("%Y-%m-%d"),
"time": entry.date.strftime("%H:%M"),
"tags": entry.tags,
"starred": entry.starred,
}
if hasattr(entry, "uuid"):
entry_dict["uuid"] = entry.uuid
if (
hasattr(entry, "creator_device_agent")
or hasattr(entry, "creator_generation_date")
or hasattr(entry, "creator_host_name")
or hasattr(entry, "creator_os_agent")
or hasattr(entry, "creator_software_agent")
):
entry_dict["creator"] = {}
if hasattr(entry, "creator_device_agent"):
entry_dict["creator"]["device_agent"] = entry.creator_device_agent
if hasattr(entry, "creator_generation_date"):
entry_dict["creator"]["generation_date"] = str(
entry.creator_generation_date
)
if hasattr(entry, "creator_host_name"):
entry_dict["creator"]["host_name"] = entry.creator_host_name
if hasattr(entry, "creator_os_agent"):
entry_dict["creator"]["os_agent"] = entry.creator_os_agent
if hasattr(entry, "creator_software_agent"):
entry_dict["creator"]["software_agent"] = entry.creator_software_agent
return entry_dict
@classmethod
def export_entry(cls, entry: "Entry") -> str:
"""Returns a json representation of a single entry."""
return json.dumps(cls.entry_to_dict(entry), indent=2) + "\n"
@classmethod
def export_journal(cls, journal: "Journal") -> str:
"""Returns a json representation of an entire journal."""
tags = get_tags_count(journal)
result = {
"tags": {tag: count for count, tag in tags},
"entries": [cls.entry_to_dict(e) for e in journal.entries],
}
return json.dumps(result, indent=2)
| 2,566
|
Python
|
.py
| 59
| 33.779661
| 86
| 0.599119
|
jrnl-org/jrnl
| 6,434
| 519
| 140
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|