id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
18,600
|
start.py
|
rafalp_Misago/misago/posting/forms/start.py
|
from typing import TYPE_CHECKING
from django import forms
from django.contrib.auth import get_user_model
from django.http import HttpRequest
from django.utils.translation import npgettext, pgettext
from ...core.utils import slugify
from ..state.start import StartThreadState
from .base import PostingForm
from .formset import PostingFormset
if TYPE_CHECKING:
from ...users.models import User
else:
User = get_user_model()
class StartThreadFormset(PostingFormset):
pass
class StartThreadForm(PostingForm):
template_name = "misago/posting/start_thread_form.html"
title = forms.CharField(max_length=200)
post = forms.CharField(max_length=2000, widget=forms.Textarea)
def update_state(self, state: StartThreadState):
state.set_thread_title(self.cleaned_data["title"])
state.set_post_message(self.cleaned_data["post"])
class StartPrivateThreadForm(PostingForm):
request: HttpRequest
invite_users: list["User"]
template_name = "misago/posting/start_private_thread_form.html"
users = forms.CharField(max_length=200)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop("request")
self.invite_users: list["User"] = []
super().__init__(*args, **kwargs)
def clean_users(self):
uniques: dict[str, str] = {}
for username in self.cleaned_data["users"].split():
slug = slugify(username)
if slug not in uniques:
uniques[slug] = username
if self.request.user.slug in uniques and len(uniques) == 1:
raise forms.ValidationError(
pgettext("posting form", "You can't invite yourself.")
)
uniques.pop(self.request.user.slug, None)
if not uniques:
raise forms.ValidationError(
pgettext("posting form", "Enter at least one username.")
)
limit = self.request.user_permissions.private_thread_users_limit
if len(uniques) > limit:
raise forms.ValidationError(
npgettext(
"posting form",
"You can't invite more than %(limit)s user.",
"You can't invite more than %(limit)s users.",
len(uniques),
),
params={"limit": limit},
)
users = list(User.objects.filter(slug__in=uniques, is_active=True))
if len(users) != len(uniques):
found_users: set[str] = set([u.slug for u in users])
missing_users: list[str] = set(uniques).difference(found_users)
missing_usernames: list[str] = [uniques[slug] for slug in missing_users]
if len(missing_usernames) == 1:
raise forms.ValidationError(
pgettext(
"posting form",
"One user could not be found: %(username)s",
),
params={"username": missing_usernames[0]},
)
raise forms.ValidationError(
pgettext(
"posting form",
"Users could not be found: %(usernames)s",
),
params={"usernames": ", ".join(missing_usernames)},
)
self.invite_users = users
return " ".join([u.username for u in users])
def update_state(self, state: StartThreadState):
state.set_invite_users(self.invite_users)
| 3,483
|
Python
|
.py
| 81
| 32.246914
| 84
| 0.601007
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,601
|
base.py
|
rafalp_Misago/misago/posting/forms/base.py
|
from django import forms
from ..state.base import PostingState
class PostingForm(forms.Form):
def update_state(self, state: PostingState):
pass
| 159
|
Python
|
.py
| 5
| 27.8
| 48
| 0.768212
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,602
|
formset.py
|
rafalp_Misago/misago/posting/forms/formset.py
|
from ...forms.formset import Formset
from ..state.base import PostingState
class PostingFormset(Formset):
def update_state(self, state: PostingState):
for form in self.forms.values():
if form.is_valid():
form.update_state(state)
| 271
|
Python
|
.py
| 7
| 31.714286
| 48
| 0.679389
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,603
|
test_start_private_thread_view.py
|
rafalp_Misago/misago/posting/tests/test_start_private_thread_view.py
|
from django.urls import reverse
from ...test import assert_contains
from ...threads.models import Thread
def test_start_private_thread_view_displays_login_page_to_guests(db, client):
response = client.get(reverse("misago:start-private-thread"))
assert_contains(response, "Sign in to start new thread")
def test_start_private_thread_view_displays_error_page_to_users_without_private_threads_permission(
user, user_client
):
user.group.can_use_private_threads = False
user.group.save()
response = user_client.get(reverse("misago:start-private-thread"))
assert_contains(
response,
"You can't use private threads.",
status_code=403,
)
def test_start_private_thread_view_displays_error_page_to_users_without_start_threads_permission(
user, user_client
):
user.group.can_start_private_threads = False
user.group.save()
response = user_client.get(reverse("misago:start-private-thread"))
assert_contains(
response,
"You can't start new private threads.",
status_code=403,
)
def test_start_private_thread_view_displays_form_page_to_users(user_client):
response = user_client.get(reverse("misago:start-private-thread"))
assert_contains(response, "Start new private thread")
def test_start_private_thread_view_posts_new_thread(user_client, other_user):
response = user_client.post(
reverse("misago:start-private-thread"),
{
"users-users": other_user.username,
"thread-title": "Hello world",
"thread-post": "How's going?",
},
)
assert response.status_code == 302
thread = Thread.objects.get(slug="hello-world")
assert response["location"] == reverse(
"misago:private-thread", kwargs={"id": thread.id, "slug": thread.slug}
)
def test_start_private_thread_view_previews_message(user_client, other_user):
response = user_client.post(
reverse("misago:start-private-thread"),
{
"thread-title": "Hello world",
"thread-post": "How's going?",
"preview": "true",
},
)
assert_contains(response, "Start new private thread")
assert_contains(response, "Message preview")
| 2,252
|
Python
|
.py
| 56
| 33.928571
| 99
| 0.68211
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,604
|
test_start_category_thread_view.py
|
rafalp_Misago/misago/posting/tests/test_start_category_thread_view.py
|
from django.urls import reverse
from ...permissions.enums import CategoryPermission
from ...permissions.models import CategoryGroupPermission
from ...test import assert_contains
from ...threads.models import Thread
def test_start_thread_view_displays_login_page_to_guests(client, default_category):
response = client.get(
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
)
)
assert_contains(response, "Sign in to start new thread")
def test_start_thread_view_displays_error_page_to_users_without_see_category_permission(
user_client, user, default_category
):
CategoryGroupPermission.objects.filter(
group=user.group,
permission=CategoryPermission.SEE,
).delete()
response = user_client.get(
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
)
)
assert response.status_code == 404
def test_start_thread_view_displays_error_page_to_users_without_browse_category_permission(
user_client, user, default_category
):
CategoryGroupPermission.objects.filter(
group=user.group,
permission=CategoryPermission.BROWSE,
).delete()
response = user_client.get(
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
)
)
assert_contains(
response,
"You can't browse the contents of this category.",
status_code=403,
)
def test_start_thread_view_displays_error_page_to_users_without_start_threads_permission(
user_client, user, default_category
):
CategoryGroupPermission.objects.filter(
group=user.group,
permission=CategoryPermission.START,
).delete()
response = user_client.get(
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
)
)
assert_contains(
response,
"You can't start new threads in this category.",
status_code=403,
)
def test_start_thread_view_displays_error_page_to_users_without_post_in_closed_category_permission(
user_client, default_category
):
default_category.is_closed = True
default_category.save()
response = user_client.get(
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
)
)
assert_contains(
response,
"This category is closed.",
status_code=403,
)
def test_start_thread_view_displays_form_page_to_users(user_client, default_category):
response = user_client.get(
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
)
)
assert_contains(response, "Start new thread")
def test_start_thread_view_displays_form_page_to_users_with_permission_to_post_in_closed_category(
user, user_client, default_category, members_group, moderators_group
):
default_category.is_closed = True
default_category.save()
user.set_groups(members_group, [moderators_group])
user.save()
response = user_client.get(
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
)
)
assert_contains(response, "Start new thread")
def test_start_thread_view_posts_new_thread(user_client, default_category):
response = user_client.post(
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
),
{
"thread-title": "Hello world",
"thread-post": "How's going?",
},
)
assert response.status_code == 302
thread = Thread.objects.get(slug="hello-world")
assert response["location"] == reverse(
"misago:thread", kwargs={"id": thread.pk, "slug": thread.slug}
)
def test_start_thread_view_previews_message(user_client, default_category):
response = user_client.post(
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
),
{
"thread-title": "Hello world",
"thread-post": "How's going?",
"preview": "true",
},
)
assert_contains(response, "Start new thread")
assert_contains(response, "Message preview")
| 4,606
|
Python
|
.py
| 131
| 27.977099
| 99
| 0.648011
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,605
|
test_select_category_view.py
|
rafalp_Misago/misago/posting/tests/test_select_category_view.py
|
import pytest
from django.urls import reverse
from ...categories.models import Category
from ...permissions.enums import CategoryPermission
from ...permissions.models import CategoryGroupPermission
from ...test import assert_contains, assert_not_contains
from ...testutils import grant_category_group_permissions
@pytest.fixture
def sibling_category(root_category, guests_group, members_group):
category = Category(name="Sibling Category", slug="sibling-category")
category.insert_at(root_category, position="last-child", save=True)
grant_category_group_permissions(
category,
guests_group,
CategoryPermission.SEE,
CategoryPermission.BROWSE,
CategoryPermission.START,
)
grant_category_group_permissions(
category,
members_group,
CategoryPermission.SEE,
CategoryPermission.BROWSE,
CategoryPermission.START,
)
return category
@pytest.fixture
def child_category(default_category, guests_group, members_group):
category = Category(name="Sibling Category", slug="sibling-category")
category.insert_at(default_category, position="last-child", save=True)
grant_category_group_permissions(
category,
guests_group,
CategoryPermission.SEE,
CategoryPermission.BROWSE,
CategoryPermission.START,
)
grant_category_group_permissions(
category,
members_group,
CategoryPermission.SEE,
CategoryPermission.BROWSE,
CategoryPermission.START,
)
return category
def test_select_category_view_displays_error_page_if_guest_cant_start_thread_in_any_category(
client, default_category
):
CategoryGroupPermission.objects.filter(
category=default_category, permission=CategoryPermission.START
).delete()
response = client.get(reverse("misago:start-thread"))
assert_contains(response, "You can't start new threads.", status_code=403)
def test_select_category_view_displays_error_page_if_user_cant_start_thread_in_any_category(
user_client, default_category
):
CategoryGroupPermission.objects.filter(
category=default_category, permission=CategoryPermission.START
).delete()
response = user_client.get(reverse("misago:start-thread"))
assert_contains(response, "You can't start new threads.", status_code=403)
def test_select_category_view_displays_error_message_in_htmx_if_guest_cant_start_thread_in_any_category(
client, default_category
):
CategoryGroupPermission.objects.filter(
category=default_category, permission=CategoryPermission.START
).delete()
response = client.get(
reverse("misago:start-thread"),
headers={"hx-request": "true"},
)
assert_contains(response, "Start new thread in")
assert_contains(response, "You can't start new threads.")
def test_select_category_view_displays_error_message_in_htmx_if_user_cant_start_thread_in_any_category(
user_client, default_category
):
CategoryGroupPermission.objects.filter(
category=default_category, permission=CategoryPermission.START
).delete()
response = user_client.get(
reverse("misago:start-thread"),
headers={"hx-request": "true"},
)
assert_contains(response, "Start new thread in")
assert_contains(response, "You can't start new threads.")
def test_select_category_view_displays_category_if_guest_can_start_thread_in_it(
client, default_category
):
response = client.get(reverse("misago:start-thread"))
assert_contains(response, "Start new thread in")
assert_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
),
)
def test_select_category_view_displays_category_if_user_can_start_thread_in_it(
user_client, default_category
):
response = user_client.get(reverse("misago:start-thread"))
assert_contains(response, "Start new thread in")
assert_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
),
)
def test_select_category_view_displays_category_in_htmx_if_guest_can_start_thread_in_it(
client, guests_group, default_category
):
response = client.get(
reverse("misago:start-thread"),
headers={"hx-request": "true"},
)
assert_contains(response, "Start new thread in")
assert_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
),
)
def test_select_category_view_displays_category_in_htmx_if_user_can_start_thread_in_it(
user_client, default_category
):
response = user_client.get(
reverse("misago:start-thread"),
headers={"hx-request": "true"},
)
assert_contains(response, "Start new thread in")
assert_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
),
)
def test_select_category_view_excludes_category_if_guest_cant_start_thread_in_it(
client, default_category, sibling_category
):
CategoryGroupPermission.objects.filter(
category=default_category, permission=CategoryPermission.START
).delete()
response = client.get(reverse("misago:start-thread"))
assert_contains(response, "Start new thread in")
assert_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": sibling_category.id, "slug": sibling_category.slug},
),
)
assert_not_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
),
)
def test_select_category_view_excludes_category_in_htmx_if_guest_cant_start_thread_in_it(
client, default_category, sibling_category
):
CategoryGroupPermission.objects.filter(
category=default_category, permission=CategoryPermission.START
).delete()
response = client.get(
reverse("misago:start-thread"),
headers={"hx-request": "true"},
)
assert_contains(response, "Start new thread in")
assert_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": sibling_category.id, "slug": sibling_category.slug},
),
)
assert_not_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
),
)
def test_select_category_view_excludes_category_if_user_cant_start_thread_in_it(
user_client, default_category, sibling_category
):
CategoryGroupPermission.objects.filter(
category=default_category, permission=CategoryPermission.START
).delete()
response = user_client.get(reverse("misago:start-thread"))
assert_contains(response, "Start new thread in")
assert_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": sibling_category.id, "slug": sibling_category.slug},
),
)
assert_not_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
),
)
def test_select_category_view_excludes_category_in_htmx_if_user_cant_start_thread_in_it(
user_client, default_category, sibling_category
):
CategoryGroupPermission.objects.filter(
category=default_category, permission=CategoryPermission.START
).delete()
response = user_client.get(
reverse("misago:start-thread"),
headers={"hx-request": "true"},
)
assert_contains(response, "Start new thread in")
assert_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": sibling_category.id, "slug": sibling_category.slug},
),
)
assert_not_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
),
)
def test_select_category_view_excludes_empty_vanilla_category(
user_client, default_category, sibling_category
):
default_category.is_vanilla = True
default_category.save()
response = user_client.get(reverse("misago:start-thread"))
assert_contains(response, "Start new thread in")
assert_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": sibling_category.id, "slug": sibling_category.slug},
),
)
assert_not_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
),
)
def test_select_category_view_excludes_empty_vanilla_category_in_htmx(
user_client, default_category, sibling_category
):
default_category.is_vanilla = True
default_category.save()
response = user_client.get(
reverse("misago:start-thread"),
headers={"hx-request": "true"},
)
assert_contains(response, "Start new thread in")
assert_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": sibling_category.id, "slug": sibling_category.slug},
),
)
assert_not_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
),
)
def test_select_category_view_includes_vanilla_category_with_children(
user_client, default_category, child_category
):
default_category.is_vanilla = True
default_category.save()
response = user_client.get(reverse("misago:start-thread"))
assert_contains(response, "Start new thread in")
assert_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": child_category.id, "slug": child_category.slug},
),
)
assert_not_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
),
)
def test_select_category_view_includes_vanilla_category_with_children_in_htmx(
user_client, default_category, child_category
):
default_category.is_vanilla = True
default_category.save()
response = user_client.get(
reverse("misago:start-thread"),
headers={"hx-request": "true"},
)
assert_contains(response, "Start new thread in")
assert_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": child_category.id, "slug": child_category.slug},
),
)
assert_not_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
),
)
def test_select_category_view_excludes_child_category_if_user_cant_start_thread_in_it(
user_client, default_category, child_category
):
CategoryGroupPermission.objects.filter(
category=child_category, permission=CategoryPermission.START
).delete()
response = user_client.get(reverse("misago:start-thread"))
assert_contains(response, "Start new thread in")
assert_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
),
)
assert_not_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": child_category.id, "slug": child_category.slug},
),
)
def test_select_category_view_excludes_child_category_in_htmx_if_user_cant_start_thread_in_it(
user_client, default_category, child_category
):
CategoryGroupPermission.objects.filter(
category=child_category, permission=CategoryPermission.START
).delete()
response = user_client.get(
reverse("misago:start-thread"),
headers={"hx-request": "true"},
)
assert_contains(response, "Start new thread in")
assert_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
),
)
assert_not_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": child_category.id, "slug": child_category.slug},
),
)
def test_select_category_view_includes_closed_category_if_user_can_post_in_it(
user, user_client, default_category, members_group, moderators_group
):
default_category.is_closed = True
default_category.save()
user.set_groups(members_group, [moderators_group])
user.save()
response = user_client.get(reverse("misago:start-thread"))
assert_contains(response, "Start new thread in")
assert_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
),
)
def test_select_category_view_includes_closed_category_in_htmx_if_user_can_post_in_it(
user, user_client, default_category, members_group, moderators_group
):
default_category.is_closed = True
default_category.save()
user.set_groups(members_group, [moderators_group])
user.save()
response = user_client.get(
reverse("misago:start-thread"),
headers={"hx-request": "true"},
)
assert_contains(response, "Start new thread in")
assert_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
),
)
def test_select_category_view_excludes_closed_category_if_user_cant_post_in_it(
user_client, default_category, sibling_category
):
sibling_category.is_closed = True
sibling_category.save()
response = user_client.get(reverse("misago:start-thread"))
assert_contains(response, "Start new thread in")
assert_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
),
)
assert_not_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": sibling_category.id, "slug": sibling_category.slug},
),
)
def test_select_category_view_excludes_closed_category_in_htmx_if_user_cant_post_in_it(
user_client, default_category, sibling_category
):
sibling_category.is_closed = True
sibling_category.save()
response = user_client.get(
reverse("misago:start-thread"),
headers={"hx-request": "true"},
)
assert_contains(response, "Start new thread in")
assert_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": default_category.id, "slug": default_category.slug},
),
)
assert_not_contains(
response,
reverse(
"misago:start-thread",
kwargs={"id": sibling_category.id, "slug": sibling_category.slug},
),
)
| 15,513
|
Python
|
.py
| 450
| 27.471111
| 104
| 0.656771
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,606
|
apps.py
|
rafalp_Misago/misago/components/apps.py
|
from django.apps import AppConfig
class MisagoComponentsConfig(AppConfig):
name = "misago.components"
label = "misago_components"
verbose_name = "Misago Components"
| 179
|
Python
|
.py
| 5
| 32
| 40
| 0.773256
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,607
|
misago_component.py
|
rafalp_Misago/misago/components/templatetags/misago_component.py
|
from django import template
from django.template.loader import get_template
from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag(takes_context=True)
def includecomponent(context, data):
component_context = context.flatten()
component_context.update(data)
template = get_template(data["template_name"])
return mark_safe(template.render(component_context))
| 418
|
Python
|
.py
| 10
| 38.9
| 56
| 0.802469
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,608
|
celery.py
|
rafalp_Misago/devproject/celery.py
|
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "devproject.settings")
app = Celery("devproject")
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object("django.conf:settings", namespace="CELERY")
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
| 573
|
Python
|
.py
| 12
| 46.333333
| 70
| 0.796763
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,609
|
settings.py
|
rafalp_Misago/devproject/settings.py
|
# pylint: disable=line-too-long
"""
Django settings for dev project.
Generated by 'django-admin startproject' using Django 1.11.15.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
from misago import discover_plugins
from misago.settings import *
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# Define placeholder gettext function
# This function will mark strings in settings visible to makemessages
# without need for Django's i18n features be initialized first.
_ = lambda s: s
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "1znyfpwp*_#!r0#l248lht*6)_0b+504n*2-8cxf(2u)fhi0f^"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# A list of strings representing the host/domain names that this Django site can serve.
# If you are unsure, just enter here your domain name, eg. ['mysite.com', 'www.mysite.com']
ALLOWED_HOSTS = ["*"]
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
"default": {
# Misago requires PostgreSQL to run
"ENGINE": "django.db.backends.postgresql",
"NAME": os.environ.get("POSTGRES_DB"),
"USER": os.environ.get("POSTGRES_USER"),
"PASSWORD": os.environ.get("POSTGRES_PASSWORD"),
"HOST": os.environ.get("POSTGRES_HOST"),
"PORT": 5432,
}
}
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
# Caching
# https://docs.djangoproject.com/en/1.11/topics/cache/#setting-up-the-cache
CACHES = {
"default": {
# Misago doesn't run well with LocMemCache in production environments
"BACKEND": "django.core.cache.backends.locmem.LocMemCache"
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
"OPTIONS": {"user_attributes": ["username", "email"]},
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
"OPTIONS": {"min_length": 7},
},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = os.environ.get("LANGUAGE_CODE", "") or "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = "/static/"
# User uploads (Avatars, Attachments, files uploaded in other Django apps, ect.)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
MEDIA_URL = "/media/"
# The absolute path to the directory where collectstatic will collect static files for deployment.
# https://docs.djangoproject.com/en/1.11/ref/settings/#static-root
STATIC_ROOT = os.path.join(BASE_DIR, "static")
# Absolute filesystem path to the directory that will hold user-uploaded files.
# https://docs.djangoproject.com/en/1.11/ref/settings/#media-root
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
# This setting defines the additional locations the staticfiles app will traverse if the FileSystemFinder finder
# is enabled, e.g. if you use the collectstatic or findstatic management command or use the static file serving view.
# https://docs.djangoproject.com/en/1.10/ref/settings/#staticfiles-dirs
STATICFILES_DIRS = []
# Email configuration
# https://docs.djangoproject.com/en/1.11/ref/settings/#email-backend
EMAIL_HOST = "localhost"
EMAIL_PORT = 25
# If either of these settings is empty, Django won't attempt authentication.
EMAIL_HOST_USER = ""
EMAIL_HOST_PASSWORD = ""
# Default email address to use for various automated correspondence from the site manager(s).
DEFAULT_FROM_EMAIL = "Forums <%s>" % EMAIL_HOST_USER
# Application definition
AUTH_USER_MODEL = "misago_users.User"
AUTHENTICATION_BACKENDS = ["misago.users.authbackends.MisagoBackend"]
CSRF_FAILURE_VIEW = "misago.core.errorpages.csrf_failure"
PLUGINS_DIRECTORY = os.environ.get("MISAGO_PLUGINS")
INSTALLED_PLUGINS = discover_plugins(PLUGINS_DIRECTORY)
# Combine Misago's default installed apps with plugins
INSTALLED_APPS = [
*INSTALLED_PLUGINS,
*INSTALLED_APPS,
]
INTERNAL_IPS = ["127.0.0.1"]
LOGIN_REDIRECT_URL = "misago:index"
LOGIN_URL = "misago:login"
LOGOUT_URL = "misago:logout"
MIDDLEWARE = [
"debug_toolbar.middleware.DebugToolbarMiddleware",
] + MISAGO_MIDDLEWARE
ROOT_URLCONF = "devproject.urls"
SECURE_REFERRER_POLICY = "strict-origin-when-cross-origin"
SOCIAL_AUTH_STRATEGY = "misago.socialauth.strategy.MisagoStrategy"
SOCIAL_AUTH_PIPELINE = (
# Steps required by social pipeline to work - don't delete those!
"social_core.pipeline.social_auth.social_details",
"social_core.pipeline.social_auth.social_uid",
"social_core.pipeline.social_auth.social_user",
# If enabled in admin panel, lets your users to associate their old forum account
# with social one, if both have same e-mail address.
"misago.socialauth.pipeline.associate_by_email",
# Those steps make sure banned users may not join your site or use banned name or email.
"misago.socialauth.pipeline.validate_ip_not_banned",
"misago.socialauth.pipeline.validate_user_not_banned",
# Reads user data received from social site and tries to create valid and available username
# Required if you want automatic account creation to work. Otherwise optional.
"misago.socialauth.pipeline.get_username",
# Uncomment next line to enable automatic account creation if data from social site is valid
# and get_username found valid name for new user account:
# 'misago.socialauth.pipeline.create_user',
# This step asks user to complete simple, pre filled registration form containing username,
# email, legal note if you remove it without adding custom one, users will have no fallback
# for joining your site using their social account.
"misago.socialauth.pipeline.create_user_with_form",
# Steps finalizing social authentication flow - don't delete those!
"social_core.pipeline.social_auth.associate_user",
"social_core.pipeline.social_auth.load_extra_data",
"misago.socialauth.pipeline.require_activation",
)
SOCIAL_AUTH_JSONFIELD_ENABLED = True
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": TEMPLATE_CONTEXT_PROCESSORS,
},
}
]
WSGI_APPLICATION = "devproject.wsgi.application"
# Django Debug Toolbar
# http://django-debug-toolbar.readthedocs.io/en/stable/configuration.html
DEBUG_TOOLBAR_PANELS = [
"debug_toolbar.panels.versions.VersionsPanel",
"debug_toolbar.panels.timer.TimerPanel",
"debug_toolbar.panels.settings.SettingsPanel",
"debug_toolbar.panels.headers.HeadersPanel",
"debug_toolbar.panels.request.RequestPanel",
"debug_toolbar.panels.sql.SQLPanel",
"misago.permissions.panels.MisagoUserPermissionsPanel",
"misago.acl.panels.MisagoACLPanel",
"debug_toolbar.panels.staticfiles.StaticFilesPanel",
"debug_toolbar.panels.templates.TemplatesPanel",
"debug_toolbar.panels.cache.CachePanel",
"debug_toolbar.panels.signals.SignalsPanel",
]
# Django Rest Framework
# http://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
"DEFAULT_PERMISSION_CLASSES": [
"misago.core.rest_permissions.IsAuthenticatedOrReadOnly"
],
"DEFAULT_RENDERER_CLASSES": ["rest_framework.renderers.JSONRenderer"],
"EXCEPTION_HANDLER": "misago.core.exceptionhandler.handle_api_exception",
"UNAUTHENTICATED_USER": "misago.users.models.AnonymousUser",
"URL_FORMAT_OVERRIDE": None,
}
# Celery - Distributed Task Queue
# http://docs.celeryproject.org/en/latest/userguide/configuration.html
# Configure Celery to use Redis as message broker.
CELERY_BROKER_URL = "redis://redis:6379/0"
# Celery workers may leak the memory, eventually depriving the instance of resources.
# This setting forces celery to stop worker, clean after it and create new one
# after worker has processed 10 tasks.
CELERY_WORKER_MAX_TASKS_PER_CHILD = 10
# Misago specific settings
# https://misago.readthedocs.io/en/latest/developers/settings.html
# On dev instance, generate only three sizes of avatars instead of default 6 sizes.
MISAGO_AVATARS_SIZES = [400, 200, 100]
# PostgreSQL text search configuration to use in searches
# Defaults to "simple", for list of installed configurations run "\dF" in "psql".
# Standard configs as of PostgreSQL 9.5 are: dutch, english, finnish, french,
# german, hungarian, italian, norwegian, portuguese, romanian, russian, simple,
# spanish, swedish and turkish
# Example on adding custom language can be found here: https://github.com/lemonskyjwt/plpstgrssearch
MISAGO_SEARCH_CONFIG = "simple"
# Path to the directory that Misago should use to prepare user data downloads.
# Should not be accessible from internet.
MISAGO_USER_DATA_DOWNLOADS_WORKING_DIR = os.path.join(BASE_DIR, "userdata")
# Path to directory containing avatar galleries
# Those galleries can be loaded by running loadavatargallery command
MISAGO_AVATAR_GALLERY = os.path.join(BASE_DIR, "avatargallery")
# Profile fields
MISAGO_PROFILE_FIELDS = [
{
"name": _("Personal"),
"fields": [
"misago.users.profilefields.default.RealNameField",
"misago.users.profilefields.default.GenderField",
"misago.users.profilefields.default.BioField",
"misago.users.profilefields.default.LocationField",
],
},
{
"name": _("Contact"),
"fields": [
"misago.users.profilefields.default.TwitterHandleField",
"misago.users.profilefields.default.SkypeIdField",
"misago.users.profilefields.default.WebsiteField",
],
},
{
"name": _("IP address"),
"fields": ["misago.users.profilefields.default.JoinIpField"],
},
]
# Set dev instance to send e-mails to the mailpit
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_HOST = "mailpit"
EMAIL_PORT = "1025"
DEFAULT_FROM_EMAIL = "Misago <misago@example.com>"
# Display debug toolbar if IN_MISAGO_DOCKER enviroment var is set to "1"
DEBUG_TOOLBAR_CONFIG = {
"SHOW_TOOLBAR_CALLBACK": "misago.conf.debugtoolbar.enable_debug_toolbar"
}
| 10,909
|
Python
|
.py
| 238
| 41.983193
| 117
| 0.746686
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,610
|
urls.py
|
rafalp_Misago/devproject/urls.py
|
"""devproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from django.utils import timezone
from django.views.decorators.cache import cache_page
from django.views.decorators.http import last_modified
from django.views.i18n import JavaScriptCatalog
from misago import __released__, __version__
from misago.plugins.urlpatterns import discover_plugins_urlpatterns
from misago.users.forms.auth import AdminAuthenticationForm
# Cache key for django-i18n.js view that invalidates cache when
# Misago version, release status or language code changes
misago_i18n_cache_key = (
(f"misagojsi18n_{settings.LANGUAGE_CODE}_{__version__}_{__released__}")
.replace(".", "_")
.replace("-", "_")
.lower()
)
admin.autodiscover()
admin.site.login_form = AdminAuthenticationForm
urlpatterns = discover_plugins_urlpatterns(settings.INSTALLED_PLUGINS) + [
path("", include("misago.urls", namespace="misago")),
# Javascript translations
path(
"django-i18n.js",
last_modified(lambda req, **kw: timezone.now())(
cache_page(86400 * 21, key_prefix=misago_i18n_cache_key)(
JavaScriptCatalog.as_view(packages=["misago"])
)
),
name="django-i18n",
),
# Uncomment next line if you plan to use Django admin for 3rd party apps
path("django-admin/", admin.site.urls),
]
# If debug mode is enabled, include debug toolbar
if settings.DEBUG:
import debug_toolbar
urlpatterns += [path("__debug__/", include(debug_toolbar.urls))]
# Use static file server for static and media files (debug only)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Error Handlers
# Misago needs those handlers to deal with errors raised by it's middlewares
# If you replace those handlers with custom ones, make sure you decorate them
# with shared_403_exception_handler or shared_404_exception_handler
# decorators that are defined in misago.views.errorpages module!
handler403 = "misago.core.errorpages.permission_denied"
handler404 = "misago.core.errorpages.page_not_found"
| 2,893
|
Python
|
.py
| 64
| 41.640625
| 79
| 0.744401
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,611
|
test_settings.py
|
rafalp_Misago/devproject/test_settings.py
|
from .settings import * # pylint: disable-all
# Use in-memory cache
CACHES = {"default": {"BACKEND": "django.core.cache.backends.dummy.DummyCache"}}
# Disable Celery backend
CELERY_BROKER_URL = None
# Disable Debug Toolbar
DEBUG_TOOLBAR_CONFIG = {}
INTERNAL_IPS = []
# Store mails in memory
EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
# Disable account validation via Stop Forum Spam
MISAGO_NEW_REGISTRATIONS_VALIDATORS = ("misago.users.validators.validate_gmail_email",)
# Use MD5 password hashing to speed up test suite
PASSWORD_HASHERS = ("django.contrib.auth.hashers.MD5PasswordHasher",)
# Simplify password validation to ease writing test assertions
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
"OPTIONS": {"user_attributes": ["username", "email"]},
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
"OPTIONS": {"min_length": 7},
},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Use english search config
MISAGO_SEARCH_CONFIG = "english"
# Test assertions expect english locale
LANGUAGE_CODE = "en-us"
# Test assertions expect specific TZ
TIME_ZONE = "UTC"
# Register test post validator
MISAGO_POST_VALIDATORS = ["misago.core.testproject.validators.test_post_validator"]
# Register test post search filter
MISAGO_POST_SEARCH_FILTERS = ["misago.core.testproject.searchfilters.test_filter"]
# Default test name
TEST_NAME = "miasago_test"
| 1,563
|
Python
|
.py
| 38
| 38.421053
| 91
| 0.761243
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,612
|
wsgi.py
|
rafalp_Misago/devproject/wsgi.py
|
"""
WSGI config for devproject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "devproject.settings")
application = get_wsgi_application()
| 399
|
Python
|
.py
| 10
| 38.2
| 78
| 0.806283
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,613
|
misago_plugin.py
|
rafalp_Misago/plugins/full-manifest-plugin/full_manifest_plugin/misago_plugin.py
|
from misago import MisagoPlugin
manifest = MisagoPlugin(
name="Example plugin with complete manifest",
description="This plugin has all fields in its manifest filled in.",
license="GNU GPL v2",
icon="fa fa-wrench",
color="#9b59b6",
version="0.1DEV",
author="Rafał Pitoń",
homepage="https://misago-project.org",
sponsor="https://github.com/sponsors/rafalp",
help="https://misago-project.org/c/support/30/",
bugs="https://misago-project.org/c/bug-reports/29/",
repo="https://github.com/rafalp/misago",
)
| 554
|
Python
|
.py
| 15
| 32.466667
| 72
| 0.699065
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,614
|
misago_plugin.py
|
rafalp_Misago/plugins/misago-dev-site-fixture/misago_dev_site_fixture/misago_plugin.py
|
from misago import MisagoPlugin
manifest = MisagoPlugin(
name="Dev Site Fixture",
description="Populates development sites with some initial data.",
license="GNU GPL v2",
icon="fas fa-rocket",
color="#3b82f6",
version="1.0",
author="Rafał Pitoń",
homepage="https://misago-project.org",
sponsor="https://github.com/sponsors/rafalp",
help="https://misago-project.org/c/support/30/",
bugs="https://misago-project.org/c/bug-reports/29/",
repo="https://github.com/rafalp/misago",
)
| 529
|
Python
|
.py
| 15
| 30.8
| 70
| 0.694118
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,615
|
loaddevfixture.py
|
rafalp_Misago/plugins/misago-dev-site-fixture/misago_dev_site_fixture/management/commands/loaddevfixture.py
|
from datetime import timedelta
from random import randint
from textwrap import dedent
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
from django.utils import timezone
from misago.cache.enums import CacheName
from misago.cache.versions import get_cache_versions, invalidate_cache
from misago.categories.models import Category
from misago.conf.dynamicsettings import DynamicSettings
from misago.conf.models import Setting
from misago.permissions.enums import CategoryPermission
from misago.permissions.models import CategoryGroupPermission
from misago.threads.checksums import update_post_checksum
from misago.threads.models import Post, Thread
from misago.users.enums import DefaultGroupId
from misago.users.models import Ban, Group, Rank
from misago.users.setupnewuser import setup_new_user
User = get_user_model()
class Command(BaseCommand):
help = "Populates the database with test data."
def handle(self, *args, **options):
Setting.objects.change_setting("forum_address", "http://localhost:8000")
invalidate_cache(CacheName.SETTINGS)
root = Category.objects.root_category()
Category.objects.filter(slug="first-category").update(color="#0ea5e9")
second_category = Category(
name="Second category",
slug="second-category",
color="#a855f7",
)
second_category.insert_at(root, position="last-child", save=True)
third_category = Category(
name="Third category",
slug="third-category",
color="#84cc16",
)
third_category.insert_at(root, position="last-child", save=True)
child_category = Category(
name="Child category",
slug="child-category",
color="#fbbf24",
)
child_category.insert_at(third_category, position="last-child", save=True)
deep_category = Category(
name="Deep child",
slug="deep-child",
color="#f43f5e",
)
deep_category.insert_at(child_category, position="last-child", save=True)
second_child_category = Category(
name="Second child",
slug="second-child",
color="#818cf8",
)
second_child_category.insert_at(
third_category, position="last-child", save=True
)
vanilla_category = Category(
name="Vanilla category",
slug="vanilla-category",
is_vanilla=True,
color="#e879f9",
)
vanilla_category.insert_at(root, position="last-child", save=True)
vanilla_child_category = Category(
name="Vanilla child",
slug="vanilla-category",
color="#2dd4bf",
)
vanilla_child_category.insert_at(
vanilla_category, position="last-child", save=True
)
new_categories = (
second_category,
third_category,
child_category,
second_child_category,
deep_category,
vanilla_category,
vanilla_child_category,
)
for category in new_categories:
for group in Group.objects.all():
for permission in CategoryPermission:
CategoryGroupPermission.objects.create(
category=category,
group=group,
permission=permission,
)
invalidate_cache(CacheName.CATEGORIES)
invalidate_cache(CacheName.PERMISSIONS)
self.stdout.write("Created new categories hierarchy.")
cache_versions = get_cache_versions()
settings = DynamicSettings(cache_versions)
default_rank = Rank.objects.get_default()
members_group = Group.objects.get(id=DefaultGroupId.MEMBERS)
moderator = User.objects.create_user(
"Moderator",
"moderator@example.com",
"password",
group=Group.objects.get(id=DefaultGroupId.MODERATORS),
title="Moderator",
rank=default_rank,
)
moderator.update_acl_key()
setup_new_user(settings, moderator)
user = User.objects.create_user(
"User",
"user@example.com",
"password",
group=members_group,
rank=default_rank,
)
user.update_acl_key()
setup_new_user(settings, user)
other_user = User.objects.create_user(
"OtherUser",
"other@example.com",
"password",
group=members_group,
rank=default_rank,
)
other_user.update_acl_key()
setup_new_user(settings, other_user)
banned_user = User.objects.create_user(
"Banned",
"banned@example.com",
"password",
group=members_group,
rank=default_rank,
)
banned_user.update_acl_key()
setup_new_user(settings, banned_user)
Ban.objects.create(check_type=Ban.USERNAME, banned_value="Banned")
invalidate_cache(CacheName.BANS)
self.stdout.write("Created user accounts.")
timestamp = timezone.now()
first_category = (
Category.objects.filter(tree_id=root.tree_id, level__gt=root.level)
.order_by("lft")
.first()
)
previous_year_timestamp = timestamp.replace(
year=timestamp.year - 1,
month=randint(1, 10),
day=randint(1, 28),
hour=randint(0, 23),
minute=randint(0, 59),
second=randint(0, 59),
)
previous_year_thread = Thread.objects.create(
category=first_category,
title="Thread with last activity from previous year",
slug="thread-with-last-activity-from-previous-year",
started_on=previous_year_timestamp,
last_post_on=previous_year_timestamp,
starter=None,
starter_name="Misago",
starter_slug="misago",
last_poster=None,
last_poster_name="Misago",
last_poster_slug="misago",
)
previous_year_post = Post.objects.create(
category=first_category,
thread=previous_year_thread,
poster=None,
poster_name="Misago",
posted_on=previous_year_timestamp,
updated_on=previous_year_timestamp,
)
previous_year_thread.first_post = previous_year_thread.last_post = (
previous_year_post
)
previous_year_thread.save()
previous_year_post.original = (
"This thread shows timestamps for content from another year."
)
previous_year_post.parsed = (
"<p>This thread shows timestamps for content from another year.</p>"
)
previous_year_post.search_document = previous_year_post.original
update_post_checksum(previous_year_post)
previous_year_post.update_search_vector()
previous_year_post.save()
current_year_timestamp = timestamp.replace(
month=randint(1, timestamp.month),
day=randint(1, 28),
hour=randint(0, 23),
minute=randint(0, 59),
second=randint(0, 59),
)
current_year_thread = Thread.objects.create(
category=first_category,
title="Thread with last activity from current year",
slug="thread-with-last-activity-from-current-year",
started_on=current_year_timestamp,
last_post_on=current_year_timestamp,
starter=None,
starter_name="Misago",
starter_slug="misago",
last_poster=None,
last_poster_name="Misago",
last_poster_slug="misago",
)
current_year_post = Post.objects.create(
category=first_category,
thread=current_year_thread,
poster=None,
poster_name="Misago",
posted_on=current_year_timestamp,
updated_on=current_year_timestamp,
)
current_year_thread.first_post = current_year_thread.last_post = (
current_year_post
)
current_year_thread.save()
current_year_post.original = (
"This thread shows timestamps for content from current year."
)
current_year_post.parsed = (
"<p>This thread shows timestamps for content from current year.</p>"
)
current_year_post.search_document = current_year_post.original
update_post_checksum(current_year_post)
current_year_post.update_search_vector()
current_year_post.save()
current_week_timestamp = timestamp.replace(
hour=randint(0, 23),
minute=randint(0, 59),
second=randint(0, 59),
) - timedelta(days=3)
current_week_thread = Thread.objects.create(
category=first_category,
title="Thread with last activity from current week",
slug="thread-with-last-activity-from-current-week",
started_on=current_week_timestamp,
last_post_on=current_week_timestamp,
starter=None,
starter_name="Misago",
starter_slug="misago",
last_poster=None,
last_poster_name="Misago",
last_poster_slug="misago",
)
current_week_post = Post.objects.create(
category=first_category,
thread=current_week_thread,
poster=None,
poster_name="Misago",
posted_on=current_week_timestamp,
updated_on=current_week_timestamp,
)
current_week_thread.first_post = current_week_thread.last_post = (
current_week_post
)
current_week_thread.save()
current_week_post.original = (
"This thread shows timestamps for content from current week."
)
current_week_post.parsed = (
"<p>This thread shows timestamps for content from current week.</p>"
)
current_week_post.search_document = current_week_post.original
update_post_checksum(current_week_post)
current_week_post.update_search_vector()
current_week_post.save()
yesterday_timestamp = timestamp.replace(
hour=randint(0, 15),
minute=randint(0, 59),
second=randint(0, 59),
) - timedelta(days=1)
yesterday_thread = Thread.objects.create(
category=first_category,
title="Thread with last activity from yesterday",
slug="thread-with-last-activity-from-yesterday-week",
started_on=yesterday_timestamp,
last_post_on=yesterday_timestamp,
starter=None,
starter_name="Misago",
starter_slug="misago",
last_poster=None,
last_poster_name="Misago",
last_poster_slug="misago",
)
yesterday_post = Post.objects.create(
category=first_category,
thread=yesterday_thread,
poster=None,
poster_name="Misago",
posted_on=yesterday_timestamp,
updated_on=yesterday_timestamp,
)
yesterday_thread.first_post = yesterday_thread.last_post = yesterday_post
yesterday_thread.save()
yesterday_post.original = (
"This thread shows timestamps for content from yesterday."
)
yesterday_post.parsed = (
"<p>This thread shows timestamps for content from yesterday.</p>"
)
yesterday_post.search_document = yesterday_post.original
update_post_checksum(yesterday_post)
yesterday_post.update_search_vector()
yesterday_post.save()
timestamp = timezone.now() - timedelta(minutes=randint(50, 60))
thread_with_states = Thread.objects.create(
category=first_category,
title="Different post states",
slug="different-post-states",
started_on=timestamp,
last_post_on=timestamp,
starter=None,
starter_name="Misago",
starter_slug="misago",
last_poster=None,
last_poster_name="Misago",
last_poster_slug="misago",
)
default_post = Post.objects.create(
category=first_category,
thread=thread_with_states,
poster=user,
poster_name=user.username,
posted_on=timestamp,
updated_on=timestamp,
)
default_post.original = "Post in a default state."
default_post.parsed = "<p>Post in a default state.</p>"
default_post.search_document = default_post.original
update_post_checksum(default_post)
default_post.update_search_vector()
default_post.save()
timestamp = timezone.now() - timedelta(minutes=randint(40, 50))
guest_post = Post.objects.create(
category=first_category,
thread=thread_with_states,
poster=None,
poster_name="DeletedUser",
posted_on=timestamp,
updated_on=timestamp,
)
guest_post.original = "Post by a deleted user."
guest_post.parsed = "<p>Post by a deleted user.</p>"
guest_post.search_document = guest_post.original
update_post_checksum(guest_post)
guest_post.update_search_vector()
guest_post.save()
timestamp = timezone.now() - timedelta(minutes=randint(30, 40))
hidden_post = Post.objects.create(
category=first_category,
thread=thread_with_states,
poster=other_user,
poster_name=other_user.username,
posted_on=timestamp,
updated_on=timestamp,
is_hidden=True,
)
hidden_post.original = "Hidden post."
hidden_post.parsed = "<p>Hidden post.</p>"
hidden_post.search_document = hidden_post.original
update_post_checksum(hidden_post)
hidden_post.update_search_vector()
hidden_post.save()
timestamp = timezone.now() - timedelta(minutes=randint(20, 30))
unapproved_post = Post.objects.create(
category=first_category,
thread=thread_with_states,
poster=user,
poster_name=user.username,
posted_on=timestamp,
updated_on=timestamp,
is_hidden=True,
)
unapproved_post.original = "Unapproved post."
unapproved_post.parsed = "<p>Unapproved post.</p>"
unapproved_post.search_document = unapproved_post.original
update_post_checksum(unapproved_post)
unapproved_post.update_search_vector()
unapproved_post.save()
timestamp = timezone.now() - timedelta(minutes=randint(10, 20))
locked_post = Post.objects.create(
category=first_category,
thread=thread_with_states,
poster=banned_user,
poster_name=banned_user.username,
posted_on=timestamp,
updated_on=timestamp,
is_protected=True,
)
locked_post.original = "Locked post by a banned user."
locked_post.parsed = "<p>Locked post by a banned user.</p>"
locked_post.search_document = locked_post.original
update_post_checksum(locked_post)
locked_post.update_search_vector()
locked_post.save()
edited_post = Post.objects.create(
category=first_category,
thread=thread_with_states,
poster=user,
poster_name=user.username,
posted_on=timezone.now() - timedelta(minutes=randint(5, 10)),
updated_on=timezone.now() - timedelta(minutes=randint(1, 5)),
edits=42,
last_editor=moderator,
last_editor_name=moderator.username,
last_editor_slug=moderator.slug,
)
edited_post.original = "Edited post."
edited_post.parsed = "<p>Edited post.</p>"
edited_post.search_document = edited_post.original
update_post_checksum(edited_post)
edited_post.update_search_vector()
edited_post.save()
timestamp = timezone.now() - timedelta(minutes=randint(1, 5))
moderator_post = Post.objects.create(
category=first_category,
thread=thread_with_states,
poster=moderator,
poster_name=moderator.username,
posted_on=timestamp,
updated_on=timestamp,
)
moderator_post.original = "Post by a moderator."
moderator_post.parsed = "<p>Post by a moderator.</p>"
moderator_post.search_document = moderator_post.original
update_post_checksum(moderator_post)
moderator_post.update_search_vector()
moderator_post.save()
thread_with_states.synchronize()
thread_with_states.save()
thread_length = settings.posts_per_page + settings.posts_per_page_orphans
timestamp = timezone.now() - timedelta(minutes=50)
thread_one_page = Thread.objects.create(
category=first_category,
title="Thread with max-length single page",
slug="thread-with-max-length-single-page",
started_on=timestamp,
last_post_on=timestamp,
starter=None,
starter_name="Misago",
starter_slug="misago",
last_poster=None,
last_poster_name="Misago",
last_poster_slug="misago",
)
for i in range(thread_length):
timestamp += timedelta(minutes=randint(0, 3))
post = Post.objects.create(
category=first_category,
thread=thread_one_page,
poster=None,
poster_name="Poster",
posted_on=timestamp,
updated_on=timestamp,
)
post.original = f"Post no. {i + 1}"
post.parsed = f"<p>Post no. {i + 1}</p>"
post.search_document = post.original
update_post_checksum(post)
post.update_search_vector()
post.save()
thread_one_page.synchronize()
thread_one_page.save()
thread_length = settings.posts_per_page + settings.posts_per_page_orphans + 1
thread_two_pages = Thread.objects.create(
category=first_category,
title="Thread with two pages",
slug="thread-with-two-pages",
started_on=timestamp,
last_post_on=timestamp,
starter=None,
starter_name="Misago",
starter_slug="misago",
last_poster=None,
last_poster_name="Misago",
last_poster_slug="misago",
)
for i in range(thread_length):
timestamp += timedelta(minutes=randint(0, 3))
post = Post.objects.create(
category=first_category,
thread=thread_two_pages,
poster=None,
poster_name="Poster",
posted_on=timestamp,
updated_on=timestamp,
)
post.original = f"Post no. {i + 1}"
post.parsed = f"<p>Post no. {i + 1}</p>"
post.search_document = post.original
update_post_checksum(post)
post.update_search_vector()
post.save()
thread_two_pages.synchronize()
thread_two_pages.save()
thread_length = (
(settings.posts_per_page * 2) + settings.posts_per_page_orphans + 1
)
thread_three_pages = Thread.objects.create(
category=first_category,
title="Thread with three pages",
slug="thread-with-three-pages",
started_on=timestamp,
last_post_on=timestamp,
starter=None,
starter_name="Misago",
starter_slug="misago",
last_poster=None,
last_poster_name="Misago",
last_poster_slug="misago",
)
for i in range(thread_length):
timestamp += timedelta(minutes=randint(0, 3))
post = Post.objects.create(
category=first_category,
thread=thread_three_pages,
poster=None,
poster_name="Poster",
posted_on=timestamp,
updated_on=timestamp,
)
post.original = f"Post no. {i + 1}"
post.parsed = f"<p>Post no. {i + 1}</p>"
post.search_document = post.original
update_post_checksum(post)
post.update_search_vector()
post.save()
thread_three_pages.synchronize()
thread_three_pages.save()
timestamp = timezone.now()
posts_to_update = Post.objects.filter(id__gt=yesterday_post.id).order_by("-id")
for post in posts_to_update.iterator():
if post.updated_on == post.posted_on:
post.posted_on = timestamp
post.updated_on = post.posted_on
else:
post.posted_on = timestamp
post.updated_on = timestamp + timedelta(minutes=randint(1, 10))
post.save()
timestamp -= timedelta(minutes=randint(1, 10))
for thread in Thread.objects.iterator():
thread.synchronize()
thread.save()
timestamp = timezone.now()
readme_thread = Thread.objects.create(
category=first_category,
title="Welcome to the Misago Dev Fixture! Read me first!",
slug="welcome-to-the-misago-dev-fixture-read-me-first",
started_on=timestamp,
last_post_on=timestamp,
starter=None,
starter_name="Misago",
starter_slug="misago",
last_poster=None,
last_poster_name="Misago",
last_poster_slug="misago",
)
readme_post = Post.objects.create(
category=first_category,
thread=readme_thread,
poster=None,
poster_name="Misago",
posted_on=timestamp,
updated_on=timestamp,
)
readme_thread.first_post = readme_thread.last_post = readme_post
readme_thread.save()
readme_post.original = dedent(
"""
This Misago site was pre-populated with some initial data to make starting development easier:
- Example categories hierarchy
- Threads with activity from different dates
- Moderator account
- Two regular user accounts
- Banned user account
## Extra user accounts
In addition to the default "Admin" account, the following accounts are available:
### Moderator
Global moderator.
- Username: `Moderator`
- Email: `moderator@example.com`
- Password: `password`
### User
Regular user.
- Username: `User`
- Email: `user@example.com`
- Password: `password`
### Other user
Another regular user.
- Username: `OtherUser`
- Email: `other@example.com`
- Password: `password`
### Banned user
Permanently banned user.
- Username: `Banned`
- Email: `banned@example.com`
- Password: `password`
"""
).strip()
readme_post.parsed = (
"<p>This Misago site was pre-populated with some initial data to make starting development easier:</p>"
"<ul>"
"<li>Example categories hierarchy</li>"
"<li>Threads with activity from different dates</li>"
"<li>Moderator account</li>"
"<li>Two regular user accounts</li>"
"<li>Banned user account</li>"
"</ul>"
"<h2>Extra user accounts</h2>"
"<p>In addition to the default "Admin" account, the following accounts are available:</p>"
"<h3>Moderator</h3>"
"<p>Global moderator.</p>"
"<ul>"
"<li>Username: <code>Moderator</code></li>"
"<li>Email: <code>moderator@example.com</code></li>"
"<li>Password: <code>password</code></li>"
"</ul>"
"<h3>User</h3>"
"<p>Regular user.</p>"
"<ul>"
"<li>Username: <code>User</code></li>"
"<li>Email: <code>user@example.com</code></li>"
"<li>Password: <code>password</code></li>"
"</ul>"
"<h3>Other user</h3>"
"<p>Another regular user.</p>"
"<ul>"
"<li>Username: <code>OtherUser</code></li>"
"<li>Email: <code>other@example.com</code></li>"
"<li>Password: <code>password</code></li>"
"</ul>"
"<h3>Banned user</h3>"
"<p>Permanently banned user.</p>"
"<ul>"
"<li>Username: <code>Banned</code></li>"
"<li>Email: <code>banned@example.com</code></li>"
"<li>Password: <code>password</code></li>"
"</ul>"
).strip()
readme_post.search_document = (
"This Misago site was pre-populated with some initial data to make starting development easier: "
"- Example categories hierarchy "
"- Threads with activity from different dates "
"- Moderator account "
"- Two regular user accounts "
"- Banned user account "
"## Extra user accounts "
'In addition to the default "Admin" account, the following accounts are available: '
"### Moderator "
"Global moderator. "
"- Username: `Moderator` "
"- Email: `moderator@example.com` "
"- Password: `password` "
"### User "
"Regular user. "
"- Username: `User` "
"- Email: `user@example.com` "
"- Password: `password` "
"### Other user "
"Another regular user. "
"- Username: `OtherUser` "
"- Email: `other@example.com` "
"- Password: `password` "
"### Banned user "
"Permanently banned user. "
"- Username: `Banned` "
"- Email: `banned@example.com` "
"- Password: `password`"
).strip()
update_post_checksum(readme_post)
readme_post.update_search_vector()
readme_post.save()
first_category.synchronize()
first_category.save()
self.stdout.write("Created demo threads.")
for user in User.objects.order_by("id").iterator():
user.threads = user.thread_set.count()
user.posts = user.post_set.count()
user.save()
self.stdout.write("Synchronized user accounts.")
self.stdout.write(
self.style.SUCCESS("Database has been populated with development data.")
)
| 27,408
|
Python
|
.py
| 685
| 28.363504
| 115
| 0.574734
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,616
|
attachment.js
|
rafalp_Misago/frontend/src/components/posts-list/post/attachments/attachment.js
|
import React from "react"
import misago from "misago"
import escapeHtml from "misago/utils/escape-html"
import formatFilesize from "misago/utils/file-size"
const DATE_ABBR = '<abbr title="%(absolute)s">%(relative)s</abbr>'
const USER_SPAN = '<span class="item-title">%(user)s</span>'
const USER_URL = '<a href="%(url)s" class="item-title">%(user)s</a>'
export default function (props) {
return (
<div className="col-xs-12 col-md-6">
<AttachmentPreview {...props} />
<div className="post-attachment">
<a
href={props.attachment.url.index}
className="attachment-name item-title"
target="_blank"
>
{props.attachment.filename}
</a>
<AttachmentDetails {...props} />
</div>
</div>
)
}
export function AttachmentPreview(props) {
if (props.attachment.is_image) {
return (
<div className="post-attachment-preview">
<AttachmentThumbnail {...props} />
</div>
)
} else {
return (
<div className="post-attachment-preview">
<AttachmentIcon {...props} />
</div>
)
}
}
export function AttachmentIcon(props) {
return (
<a href={props.attachment.url.index} className="material-icon">
insert_drive_file
</a>
)
}
export function AttachmentThumbnail(props) {
const url = props.attachment.url.thumb || props.attachment.url.index
return (
<a
className="post-thumbnail"
href={props.attachment.url.index}
target="_blank"
style={{ backgroundImage: 'url("' + escapeHtml(url) + '")' }}
/>
)
}
export function AttachmentDetails(props) {
let user = null
if (props.attachment.url.uploader) {
user = interpolate(
USER_URL,
{
url: escapeHtml(props.attachment.url.uploader),
user: escapeHtml(props.attachment.uploader_name),
},
true
)
} else {
user = interpolate(
USER_SPAN,
{
user: escapeHtml(props.attachment.uploader_name),
},
true
)
}
const date = interpolate(
DATE_ABBR,
{
absolute: escapeHtml(props.attachment.uploaded_on.format("LLL")),
relative: escapeHtml(props.attachment.uploaded_on.fromNow()),
},
true
)
const message = interpolate(
escapeHtml(
pgettext(
"post attachment",
"%(filetype)s, %(size)s, uploaded by %(uploader)s %(uploaded_on)s."
)
),
{
filetype: props.attachment.filetype,
size: formatFilesize(props.attachment.size),
uploader: user,
uploaded_on: date,
},
true
)
return (
<p
className="post-attachment-description"
dangerouslySetInnerHTML={{ __html: message }}
/>
)
}
| 2,715
|
Python
|
.tac
| 107
| 20.102804
| 75
| 0.626395
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,617
|
MarkupEditorAttachment.jsx
|
rafalp_Misago/frontend/src/components/MarkupEditor/MarkupEditorAttachment.jsx
|
import React from "react"
import modal from "../../services/modal"
import snackbar from "../../services/snackbar"
import formatFilesize from "../../utils/file-size"
import MarkupAttachmentModal from "./MarkupAttachmentModal"
import { getSelection, replaceSelection } from "./operations"
const MarkupEditorAttachment = ({
attachment,
disabled,
element,
setState,
update,
}) => (
<div className="markup-editor-attachments-item">
<div className="markup-editor-attachment">
<div className="markup-editor-attachment-details">
{attachment.id ? (
<a
className="item-title"
href={attachment.url.index + "?shva=1"}
target="_blank"
onClick={(event) => {
event.preventDefault()
modal.show(<MarkupAttachmentModal attachment={attachment} />)
}}
>
{attachment.filename}
</a>
) : (
<strong className="item-title">{attachment.filename}</strong>
)}
<div className="text-muted">
<ul className="list-unstyled list-inline">
{!attachment.id && <li>{attachment.progress + "%"}</li>}
{!!attachment.filetype && <li>{attachment.filetype}</li>}
{attachment.size > 0 && <li>{formatFilesize(attachment.size)}</li>}
</ul>
</div>
</div>
{!!attachment.id && (
<div className="markup-editor-attachment-buttons">
<button
className="btn btn-markup-editor-attachment btn-icon"
title={pgettext("markup editor", "Insert into message")}
type="button"
disabled={disabled}
onClick={() => {
const markup = getAttachmentMarkup(attachment)
const selection = getSelection(element)
replaceSelection(selection, update, markup)
}}
>
<span className="material-icon">flip_to_front</span>
</button>
<button
className="btn btn-markup-editor-attachment btn-icon"
title={pgettext("markup editor", "Remove attachment")}
type="button"
disabled={disabled}
onClick={() => {
setState(({ attachments }) => {
const confirm = window.confirm(
pgettext("markup editor", "Remove this attachment?")
)
if (confirm) {
return {
attachments: attachments.filter(
({ id }) => id !== attachment.id
),
}
}
})
}}
>
<span className="material-icon">close</span>
</button>
</div>
)}
{!attachment.id && !!attachment.key && (
<div className="markup-editor-attachment-buttons">
{attachment.error && (
<button
className="btn btn-markup-editor-attachment btn-icon"
title={pgettext("markup editor", "See error")}
type="button"
onClick={() => {
snackbar.error(
interpolate(
pgettext("markup editor", "%(filename)s: %(error)s"),
{ filename: attachment.filename, error: attachment.error },
true
)
)
}}
>
<span className="material-icon">warning</span>
</button>
)}
<button
className="btn btn-markup-editor-attachment btn-icon"
title={pgettext("markup editor", "Remove attachment")}
type="button"
disabled={disabled}
onClick={() => {
setState(({ attachments }) => {
return {
attachments: attachments.filter(
({ key }) => key !== attachment.key
),
}
})
}}
>
<span className="material-icon">close</span>
</button>
</div>
)}
</div>
</div>
)
export default MarkupEditorAttachment
function getAttachmentMarkup(attachment) {
let markup = "["
if (attachment.is_image) {
markup += "![" + attachment.filename + "]"
markup += "(" + (attachment.url.thumb || attachment.url.index) + "?shva=1)"
} else {
markup += attachment.filename
}
markup += "](" + attachment.url.index + "?shva=1)"
return markup
}
| 4,534
|
Python
|
.tac
| 132
| 23.257576
| 79
| 0.514104
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,618
|
MarkupEditorAttachments.jsx
|
rafalp_Misago/frontend/src/components/MarkupEditor/MarkupEditorAttachments.jsx
|
import React from "react"
import MarkupEditorAttachment from "./MarkupEditorAttachment"
const MarkupEditorAttachments = ({
attachments,
disabled,
element,
setState,
update,
}) => (
<div className="markup-editor-attachments">
<div className="markup-editor-attachments-container">
{attachments.map((attachment) => (
<MarkupEditorAttachment
key={attachment.key || attachment.id}
attachment={attachment}
disabled={disabled}
element={element}
setState={setState}
update={update}
/>
))}
</div>
</div>
)
export default MarkupEditorAttachments
| 648
|
Python
|
.tac
| 25
| 20.44
| 61
| 0.668277
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,619
|
MarkupAttachmentModal.jsx
|
rafalp_Misago/frontend/src/components/MarkupEditor/MarkupAttachmentModal.jsx
|
import React from "react"
import formatFilesize from "../../utils/file-size"
export default function MarkupAttachmentModal({ attachment }) {
return (
<div className="modal-dialog modal-lg" role="document">
<div className="modal-content">
<div className="modal-header">
<button
aria-label={pgettext("modal", "Close")}
className="close"
data-dismiss="modal"
type="button"
>
<span aria-hidden="true">×</span>
</button>
<h4 className="modal-title">
{pgettext("markup editor", "Attachment details")}
</h4>
</div>
<div className="modal-body">
{!!attachment.is_image && (
<div className="markup-editor-attachment-modal-preview">
<a href={attachment.url.index + "?shva=1"} target="_blank">
<img src={attachment.url.index + "?shva=1"} alt="" />
</a>
</div>
)}
<div className="markup-editor-attachment-modal-filename">
{attachment.filename}
</div>
<div className="row markup-editor-attachment-modal-details">
<div className="col-xs-12 col-md-3">
<strong>
{attachment.filetype + ", " + formatFilesize(attachment.size)}
</strong>
<div className="text-muted">
<small>{pgettext("markup editor", "Type and size")}</small>
</div>
</div>
<div className="col-xs-12 col-md-4">
<strong>
<abbr title={attachment.uploaded_on.format("LLL")}>
{attachment.uploaded_on.fromNow()}
</abbr>
</strong>
<div className="text-muted">
<small>{pgettext("markup editor", "Uploaded at")}</small>
</div>
</div>
<div className="col-xs-12 col-md-3">
{attachment.url.uploader ? (
<a
href={attachment.url.uploader}
target="_blank"
className="item-title"
>
{attachment.uploader_name}
</a>
) : (
<span className="item-title">{attachment.uploader_name}</span>
)}
<div className="text-muted">
<small>{pgettext("markup editor", "Uploader")}</small>
</div>
</div>
</div>
</div>
<div className="modal-footer">
<button
className="btn btn-default"
data-dismiss="modal"
type="button"
>
{pgettext("modal", "Close")}
</button>
</div>
</div>
</div>
)
}
| 2,821
|
Python
|
.tac
| 80
| 22.8
| 78
| 0.483942
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,620
|
attachments.js
|
rafalp_Misago/frontend/src/components/posting/utils/attachments.js
|
import moment from "moment"
export function clean(attachments) {
return attachments
.filter((attachment) => {
return attachment.id && !attachment.isRemoved
})
.map((a) => {
return a.id
})
}
export function hydrate(attachments) {
return attachments.map((attachment) => {
return Object.assign({}, attachment, {
uploaded_on: moment(attachment.uploaded_on),
})
})
}
| 412
|
Python
|
.tac
| 17
| 20.294118
| 51
| 0.664122
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,621
|
attachments.html
|
rafalp_Misago/misago/templates/misago/thread_old/posts/post/attachments.html
|
{% load i18n misago_batch misago_capture %}
<div class="post-attachments">
{% for row in post.attachments|batch:2 %}
<div class="row">
{% for attachment in row %}
<div class="col-xs-12 col-md-6">
<div class="post-attachment-preview">
{% if attachment.is_image %}
{% if attachment.url.thumb %}
<a href="{{ attachment.url.index }}" target="_blank" class="post-thumbnail" style='background-image: url("{{ attachment.url.thumb }}")'></a>
{% else %}
<a href="{{ attachment.url.index }}" target="_blank" class="post-thumbnail" style='background-image: url("{{ attachment.url.index }}")'></a>
{% endif %}
{% else %}
<a href="{{ attachment.url.index }}" target="_blank" class="material-icon">
insert_drive_file
</a>
{% endif %}
</div>
<div class="post-attachment">
<a href="{{ attachment.url.index }}" target="_blank" class="attachment-name item-title">{{ attachment.filename }}</a>
<p class="post-attachment-description">
{% capture trimmed as uploader %}
{% if attachment.url.uploader %}
<a href="{{ attachment.url.uploader }}" class="item-title">{{ attachment.uploader_name }}</a>
{% else %}
<span class="item-title">{{ attachment.uploader_name }}</span>
{% endif %}
{% endcapture %}
{% capture trimmed as uploaded_on %}
<abbr title="{{ attachment.uploaded_on }}">{{ attachment.uploaded_on|date }}</abbr>
{% endcapture %}
{% blocktrans trimmed with filetype=attachment.filetype size=attachment.size|filesizeformat uploader=uploader|safe uploaded_on=uploaded_on|safe context "post attachment" %}
{{ filetype }}, {{ size }}, uploaded by {{ uploader }} on {{ uploaded_on }}.
{% endblocktrans %}
</p>
</div>
</div>
{% endfor %}
</div>
{% endfor %}
</div>
| 2,115
|
Python
|
.tac
| 42
| 38.190476
| 186
| 0.53134
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,622
|
attachment.py
|
rafalp_Misago/misago/threads/models/attachment.py
|
import os
from hashlib import md5
from io import BytesIO
from django.core.files import File
from django.core.files.base import ContentFile
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.crypto import get_random_string
from PIL import Image
from ...conf import settings
from ...core.utils import slugify
from ...plugins.models import PluginDataModel
def upload_to(instance, filename):
# pylint: disable=undefined-loop-variable
spread_path = md5(str(instance.secret[:16]).encode()).hexdigest()
secret = Attachment.generate_new_secret()
filename_lowered = filename.lower().strip()
for extension in instance.filetype.extensions_list:
if filename_lowered.endswith(extension):
break
filename_clean = ".".join(
(slugify(filename[: (len(extension) + 1) * -1])[:16], extension)
)
return os.path.join(
"attachments", spread_path[:2], spread_path[2:4], secret, filename_clean
)
class Attachment(PluginDataModel):
secret = models.CharField(max_length=64)
filetype = models.ForeignKey("AttachmentType", on_delete=models.CASCADE)
post = models.ForeignKey("Post", blank=True, null=True, on_delete=models.SET_NULL)
uploaded_on = models.DateTimeField(default=timezone.now, db_index=True)
uploader = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.SET_NULL
)
uploader_name = models.CharField(max_length=255)
uploader_slug = models.CharField(max_length=255, db_index=True)
filename = models.CharField(max_length=255, db_index=True)
size = models.PositiveIntegerField(default=0, db_index=True)
thumbnail = models.ImageField(
max_length=255, blank=True, null=True, upload_to=upload_to
)
image = models.ImageField(
max_length=255, blank=True, null=True, upload_to=upload_to
)
file = models.FileField(max_length=255, blank=True, null=True, upload_to=upload_to)
def __str__(self):
return self.filename
def delete(self, *args, **kwargs):
self.delete_files()
return super().delete(*args, **kwargs)
def delete_files(self):
if self.thumbnail:
self.thumbnail.delete(save=False)
if self.image:
self.image.delete(save=False)
if self.file:
self.file.delete(save=False)
@classmethod
def generate_new_secret(cls):
return get_random_string(settings.MISAGO_ATTACHMENT_SECRET_LENGTH)
@property
def is_image(self):
return bool(self.image)
@property
def is_file(self):
return not self.is_image
def get_absolute_url(self):
return reverse(
"misago:attachment", kwargs={"pk": self.pk, "secret": self.secret}
)
def get_thumbnail_url(self):
if self.thumbnail:
return reverse(
"misago:attachment-thumbnail",
kwargs={"pk": self.pk, "secret": self.secret},
)
def set_file(self, upload):
self.file = File(upload, upload.name)
def set_image(self, upload):
fileformat = self.filetype.extensions_list[0]
self.image = File(upload, upload.name)
thumbnail = Image.open(upload)
downscale_image = (
thumbnail.size[0] > settings.MISAGO_ATTACHMENT_IMAGE_SIZE_LIMIT[0]
or thumbnail.size[1] > settings.MISAGO_ATTACHMENT_IMAGE_SIZE_LIMIT[1]
)
strip_animation = fileformat == "gif"
thumb_stream = BytesIO()
if downscale_image:
thumbnail.thumbnail(settings.MISAGO_ATTACHMENT_IMAGE_SIZE_LIMIT)
if fileformat == "jpg":
# normalize jpg to jpeg for Pillow
thumbnail.save(thumb_stream, "jpeg")
else:
thumbnail.save(thumb_stream, "png")
elif strip_animation:
thumbnail.save(thumb_stream, "png")
if downscale_image or strip_animation:
self.thumbnail = ContentFile(thumb_stream.getvalue(), upload.name)
| 4,090
|
Python
|
.tac
| 100
| 33.35
| 87
| 0.666667
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,623
|
attachmenttype.py
|
rafalp_Misago/misago/threads/models/attachmenttype.py
|
from django.db import models
from django.utils.translation import pgettext_lazy
from ...plugins.models import PluginDataModel
class AttachmentType(PluginDataModel):
ENABLED = 0
LOCKED = 1
DISABLED = 2
name = models.CharField(max_length=255)
extensions = models.CharField(max_length=255)
mimetypes = models.CharField(null=True, blank=True, max_length=255)
size_limit = models.PositiveIntegerField(default=1024)
status = models.PositiveIntegerField(
default=ENABLED,
choices=[
(
ENABLED,
pgettext_lazy(
"attachment availability choice",
"Allow uploads and downloads",
),
),
(
LOCKED,
pgettext_lazy(
"attachment availability choice",
"Allow downloads only",
),
),
(
DISABLED,
pgettext_lazy(
"attachment availability choice",
"Disallow both uploading and downloading",
),
),
],
)
limit_uploads_to = models.ManyToManyField(
"misago_acl.Role", related_name="+", blank=True
)
limit_downloads_to = models.ManyToManyField(
"misago_acl.Role", related_name="+", blank=True
)
def __str__(self):
return self.name
@property
def is_enabled(self):
return self.status == AttachmentType.ENABLED
@property
def is_locked(self):
return self.status == AttachmentType.LOCKED
@property
def extensions_list(self):
if self.extensions:
return self.extensions.split(",")
return []
@property
def mimetypes_list(self):
if self.mimetypes:
return self.mimetypes.split(",")
return []
| 1,905
|
Python
|
.tac
| 61
| 21.147541
| 71
| 0.56325
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,624
|
0003_attachment_types.py
|
rafalp_Misago/misago/threads/migrations/0003_attachment_types.py
|
# Generated by Django 1.9.7 on 2016-10-04 21:41
from django.db import migrations
ATTACHMENTS = [
{
"name": "GIF",
"extensions": ("gif",),
"mimetypes": ("image/gif",),
"size_limit": 5 * 1024,
},
{
"name": "JPG",
"extensions": ("jpg", "jpeg"),
"mimetypes": ("image/jpeg",),
"size_limit": 3 * 1024,
},
{
"name": "PNG",
"extensions": ("png",),
"mimetypes": ("image/png",),
"size_limit": 3 * 1024,
},
{
"name": "WEBP",
"extensions": ("webp",),
"mimetypes": ("image/webp",),
"size_limit": 3 * 1024,
},
{
"name": "PDF",
"extensions": ("pdf",),
"mimetypes": (
"application/pdf",
"application/x-pdf",
"application/x-bzpdf",
"application/x-gzpdf",
),
"size_limit": 4 * 1024,
},
{
"name": "Text",
"extensions": ("txt",),
"mimetypes": ("text/plain",),
"size_limit": 4 * 1024,
},
{
"name": "Markdown",
"extensions": ("md",),
"mimetypes": ("text/markdown",),
"size_limit": 4 * 1024,
},
{
"name": "reStructuredText",
"extensions": ("rst",),
"mimetypes": ("text/x-rst",),
"size_limit": 4 * 1024,
},
{
"name": "7Z",
"extensions": ("7z",),
"mimetypes": ("application/x-7z-compressed",),
"size_limit": 4 * 1024,
},
{
"name": "RAR",
"extensions": ("rar",),
"mimetypes": ("application/vnd.rar",),
"size_limit": 4 * 1024,
},
{
"name": "TAR",
"extensions": ("tar",),
"mimetypes": ("application/x-tar",),
"size_limit": 4 * 1024,
},
{
"name": "GZ",
"extensions": ("gz",),
"mimetypes": ("application/gzip",),
"size_limit": 4 * 1024,
},
{
"name": "ZIP",
"extensions": ("zip", "zipx"),
"mimetypes": ("application/zip",),
"size_limit": 4 * 1024,
},
]
def create_attachment_types(apps, schema_editor):
AttachmentType = apps.get_model("misago_threads", "AttachmentType")
for attachment in ATTACHMENTS:
kwargs = attachment
kwargs["extensions"] = ",".join(kwargs["extensions"])
kwargs["mimetypes"] = ",".join(kwargs["mimetypes"])
AttachmentType.objects.create(**kwargs)
class Migration(migrations.Migration):
dependencies = [("misago_threads", "0002_threads_settings")]
operations = [migrations.RunPython(create_attachment_types)]
| 2,628
|
Python
|
.tac
| 97
| 19.597938
| 71
| 0.487921
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,625
|
attachment.py
|
rafalp_Misago/misago/threads/views/attachment.py
|
from django.templatetags.static import static
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect
from ...conf import settings
from ..models import Attachment, AttachmentType
def attachment_server(request, pk, secret, thumbnail=False):
try:
url = serve_file(request, pk, secret, thumbnail)
return redirect(url)
except PermissionDenied:
error_image = request.settings.attachment_403_image
if not error_image:
error_image = static(settings.MISAGO_ATTACHMENT_403_IMAGE)
return redirect(error_image)
except Http404:
error_image = request.settings.attachment_404_image
if not error_image:
error_image = static(settings.MISAGO_ATTACHMENT_404_IMAGE)
return redirect(error_image)
def serve_file(request, pk, secret, thumbnail):
queryset = Attachment.objects.select_related("filetype")
attachment = get_object_or_404(queryset, pk=pk, secret=secret)
if not attachment.post_id and request.GET.get("shva") != "1":
# if attachment is orphaned, don't run acl test unless explicitly told so
# this saves user suprise of deleted attachment still showing in posts/quotes
raise Http404()
if not request.user.is_misago_admin:
allow_file_download(request, attachment)
if attachment.is_image:
if thumbnail and attachment.thumbnail:
return attachment.thumbnail.url
return attachment.image.url
if thumbnail:
raise Http404()
return attachment.file.url
def allow_file_download(request, attachment):
is_authenticated = request.user.is_authenticated
if not is_authenticated or request.user.id != attachment.uploader_id:
if not attachment.post_id:
raise Http404()
if not request.user_acl["can_download_other_users_attachments"]:
raise PermissionDenied()
allowed_roles = set(r.pk for r in attachment.filetype.limit_downloads_to.all())
if allowed_roles:
user_roles = set(r.pk for r in request.user.get_roles())
if not user_roles & allowed_roles:
raise PermissionDenied()
if attachment.filetype.status == AttachmentType.DISABLED:
raise PermissionDenied()
| 2,319
|
Python
|
.tac
| 50
| 39.26
| 85
| 0.713525
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,626
|
attachments.py
|
rafalp_Misago/misago/threads/api/attachments.py
|
from django.core.exceptions import PermissionDenied, ValidationError
from django.template.defaultfilters import filesizeformat
from django.utils.translation import pgettext
from rest_framework import viewsets
from rest_framework.response import Response
from ...acl.objectacl import add_acl_to_obj
from ...users.audittrail import create_audit_trail
from ..models import Attachment, AttachmentType
from ..serializers import AttachmentSerializer
IMAGE_EXTENSIONS = ("jpg", "jpeg", "png", "gif", "webp")
class AttachmentViewSet(viewsets.ViewSet):
def create(self, request):
if not request.user_acl["max_attachment_size"]:
raise PermissionDenied(
pgettext(
"attachments api", "You don't have permission to upload new files."
)
)
try:
return self.create_attachment(request)
except ValidationError as e:
return Response({"detail": e.args[0]}, status=400)
def create_attachment(self, request):
upload = request.FILES.get("upload")
if not upload:
raise ValidationError(
pgettext("attachments api", "No file has been uploaded.")
)
user_roles = set(r.pk for r in request.user.get_roles())
filetype = validate_filetype(upload, user_roles)
validate_filesize(upload, request.user_acl["max_attachment_size"])
attachment = Attachment(
secret=Attachment.generate_new_secret(),
filetype=filetype,
size=upload.size,
uploader=request.user,
uploader_name=request.user.username,
uploader_slug=request.user.slug,
filename=upload.name,
)
if is_upload_image(upload):
try:
attachment.set_image(upload)
except IOError:
raise ValidationError(
pgettext(
"attachments api", "Uploaded image is unsupported or invalid."
)
)
else:
attachment.set_file(upload)
attachment.save()
add_acl_to_obj(request.user_acl, attachment)
create_audit_trail(request, attachment)
return Response(
AttachmentSerializer(attachment, context={"user": request.user}).data
)
def validate_filetype(upload, user_roles):
filename = upload.name.strip().lower()
queryset = AttachmentType.objects.filter(status=AttachmentType.ENABLED)
for filetype in queryset.prefetch_related("limit_uploads_to"):
for extension in filetype.extensions_list:
if filename.endswith(".%s" % extension):
break
else:
continue
if (
filetype.mimetypes_list
and upload.content_type not in filetype.mimetypes_list
):
continue
if filetype.limit_uploads_to.exists():
allowed_roles = set(r.pk for r in filetype.limit_uploads_to.all())
if not user_roles & allowed_roles:
continue
return filetype
raise ValidationError(
pgettext("attachments api", "You can't upload files of this type.")
)
def validate_filesize(upload, upload_limit):
if upload.size > upload_limit * 1024:
message = pgettext(
"attachments api",
"You can't upload files larger than %(limit)s (your file has %(upload)s).",
)
raise ValidationError(
message
% {
"upload": filesizeformat(upload.size).rstrip(".0"),
"limit": filesizeformat(upload_limit * 1024).rstrip(".0"),
}
)
def is_upload_image(upload):
filename = upload.name.strip().lower()
for extension in IMAGE_EXTENSIONS:
if filename.endswith(".%s" % extension):
return True
return False
| 3,919
|
Python
|
.tac
| 98
| 29.765306
| 87
| 0.616803
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,627
|
attachments.py
|
rafalp_Misago/misago/threads/api/postingendpoint/attachments.py
|
from django.utils.translation import npgettext, pgettext
from rest_framework import serializers
from rest_framework.fields import empty
from . import PostingEndpoint, PostingMiddleware
from ....acl.objectacl import add_acl_to_obj
from ...serializers import AttachmentSerializer
class AttachmentsMiddleware(PostingMiddleware):
def use_this_middleware(self):
return bool(self.user_acl["max_attachment_size"])
def get_serializer(self):
return AttachmentsSerializer(
data=self.request.data,
context={
"mode": self.mode,
"user": self.user,
"user_acl": self.user_acl,
"post": self.post,
"settings": self.settings,
},
)
def save(self, serializer):
serializer.save()
class AttachmentsSerializer(serializers.Serializer):
attachments = serializers.ListField(
child=serializers.IntegerField(), required=False
)
def __init__(self, *args, **kwargs):
self.update_attachments = False
self.removed_attachments = []
self.final_attachments = []
super().__init__(*args, **kwargs)
def validate_attachments(self, ids):
ids = list(set(ids))
validate_attachments_count(ids, self.context["settings"])
attachments = self.get_initial_attachments()
new_attachments = self.get_new_attachments(ids)
if not attachments and not new_attachments:
return [] # no attachments
# clean existing attachments
for attachment in attachments:
if attachment.pk in ids:
self.final_attachments.append(attachment)
else:
if attachment.acl["can_delete"]:
self.update_attachments = True
self.removed_attachments.append(attachment)
else:
message = pgettext(
"posting api",
'You don\'t have permission to remove "%(attachment)s" attachment.',
)
raise serializers.ValidationError(
message % {"attachment": attachment.filename}
)
if new_attachments:
self.update_attachments = True
self.final_attachments += new_attachments
self.final_attachments.sort(key=lambda a: a.pk, reverse=True)
def get_initial_attachments(self):
attachments = []
if self.context["mode"] == PostingEndpoint.EDIT:
queryset = self.context["post"].attachment_set.select_related("filetype")
attachments = list(queryset)
add_acl_to_obj(self.context["user_acl"], attachments)
return attachments
def get_new_attachments(self, ids):
if not ids:
return []
queryset = (
self.context["user"]
.attachment_set.select_related("filetype")
.filter(post__isnull=True, id__in=ids)
)
return list(queryset)
def save(self):
if not self.update_attachments:
return
if self.removed_attachments:
for attachment in self.removed_attachments:
attachment.delete_files()
self.context["post"].attachment_set.filter(
id__in=[a.id for a in self.removed_attachments]
).delete()
if self.final_attachments:
# sort final attachments by id, descending
self.final_attachments.sort(key=lambda a: a.pk, reverse=True)
self.context["user"].attachment_set.filter(
id__in=[a.id for a in self.final_attachments]
).update(post=self.context["post"])
self.sync_attachments_cache(self.context["post"], self.final_attachments)
def sync_attachments_cache(self, post, attachments):
if attachments:
post.attachments_cache = AttachmentSerializer(attachments, many=True).data
for attachment in post.attachments_cache:
del attachment["acl"]
del attachment["post"]
else:
post.attachments_cache = None
post.update_fields.append("attachments_cache")
def validate_attachments_count(data, settings):
total_attachments = len(data)
if total_attachments > settings.post_attachments_limit:
# pylint: disable=line-too-long
message = npgettext(
"posting api",
"You can't attach more than %(limit_value)s file to single post (added %(show_value)s).",
"You can't attach more than %(limit_value)s flies to single post (added %(show_value)s).",
settings.post_attachments_limit,
)
raise serializers.ValidationError(
message
% {
"limit_value": settings.post_attachments_limit,
"show_value": total_attachments,
}
)
| 4,967
|
Python
|
.tac
| 116
| 31.422414
| 102
| 0.601865
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,628
|
test_attachments_middleware.py
|
rafalp_Misago/misago/threads/tests/test_attachments_middleware.py
|
from unittest.mock import Mock
import pytest
from rest_framework import serializers
from .. import test
from ...conf.test import override_dynamic_settings
from ..api.postingendpoint import PostingEndpoint
from ..api.postingendpoint.attachments import (
AttachmentsMiddleware,
validate_attachments_count,
)
from ..models import Attachment, AttachmentType
@pytest.fixture
def context(default_category, dynamic_settings, user, user_acl):
thread = test.post_thread(category=default_category)
post = thread.first_post
post.update_fields = []
return {
"category": default_category,
"thread": thread,
"post": post,
"settings": dynamic_settings,
"user": user,
"user_acl": user_acl,
}
def create_attachment(*, post=None, user=None):
return Attachment.objects.create(
secret=Attachment.generate_new_secret(),
filetype=AttachmentType.objects.order_by("id").last(),
post=post,
size=1000,
uploader=user if user else None,
uploader_name=user.username if user else "testuser",
uploader_slug=user.slug if user else "testuser",
filename="testfile_%s.zip" % (Attachment.objects.count() + 1),
)
def test_middleware_is_used_if_user_has_permission_to_upload_attachments(context):
context["user_acl"]["max_attachment_size"] = 1024
middleware = AttachmentsMiddleware(**context)
assert middleware.use_this_middleware()
def test_middleware_is_not_used_if_user_has_no_permission_to_upload_attachments(
context,
):
context["user_acl"]["max_attachment_size"] = 0
middleware = AttachmentsMiddleware(**context)
assert not middleware.use_this_middleware()
def test_middleware_handles_no_data(context):
middleware = AttachmentsMiddleware(
request=Mock(data={}), mode=PostingEndpoint.START, **context
)
serializer = middleware.get_serializer()
assert serializer.is_valid()
def test_middleware_handles_empty_data(context):
middleware = AttachmentsMiddleware(
request=Mock(data={"attachments": []}), mode=PostingEndpoint.START, **context
)
serializer = middleware.get_serializer()
assert serializer.is_valid()
def test_data_validation_fails_if_attachments_data_is_not_iterable(context):
middleware = AttachmentsMiddleware(
request=Mock(data={"attachments": "none"}),
mode=PostingEndpoint.START,
**context
)
serializer = middleware.get_serializer()
assert not serializer.is_valid()
def test_data_validation_fails_if_attachments_data_has_non_int_values(context):
middleware = AttachmentsMiddleware(
request=Mock(data={"attachments": [1, "b"]}),
mode=PostingEndpoint.START,
**context
)
serializer = middleware.get_serializer()
assert not serializer.is_valid()
@override_dynamic_settings(post_attachments_limit=2)
def test_data_validation_fails_if_attachments_data_is_longer_than_allowed(context):
middleware = AttachmentsMiddleware(
request=Mock(data={"attachments": range(5)}),
mode=PostingEndpoint.START,
**context
)
serializer = middleware.get_serializer()
assert not serializer.is_valid()
def test_middleware_adds_attachment_to_new_post(context):
new_attachment = create_attachment(user=context["user"])
middleware = AttachmentsMiddleware(
request=Mock(data={"attachments": [new_attachment.id]}),
mode=PostingEndpoint.START,
**context
)
serializer = middleware.get_serializer()
serializer.is_valid()
middleware.save(serializer)
new_attachment.refresh_from_db()
assert new_attachment.post == context["post"]
def test_middleware_adds_attachment_to_attachments_cache(context):
new_attachment = create_attachment(user=context["user"])
middleware = AttachmentsMiddleware(
request=Mock(data={"attachments": [new_attachment.id]}),
mode=PostingEndpoint.START,
**context
)
serializer = middleware.get_serializer()
serializer.is_valid()
middleware.save(serializer)
attachments_cache = context["post"].attachments_cache
assert len(attachments_cache) == 1
assert attachments_cache[0]["id"] == new_attachment.id
def test_middleware_adds_attachment_to_existing_post(context):
new_attachment = create_attachment(user=context["user"])
middleware = AttachmentsMiddleware(
request=Mock(data={"attachments": [new_attachment.id]}),
mode=PostingEndpoint.EDIT,
**context
)
serializer = middleware.get_serializer()
serializer.is_valid()
middleware.save(serializer)
new_attachment.refresh_from_db()
assert new_attachment.post == context["post"]
def test_middleware_adds_attachment_to_post_with_existing_attachment(context):
old_attachment = create_attachment(post=context["post"])
new_attachment = create_attachment(user=context["user"])
middleware = AttachmentsMiddleware(
request=Mock(data={"attachments": [old_attachment.id, new_attachment.id]}),
mode=PostingEndpoint.EDIT,
**context
)
serializer = middleware.get_serializer()
serializer.is_valid()
middleware.save(serializer)
new_attachment.refresh_from_db()
assert new_attachment.post == context["post"]
old_attachment.refresh_from_db()
assert old_attachment.post == context["post"]
def test_middleware_adds_attachment_to_existing_attachments_cache(context):
old_attachment = create_attachment(post=context["post"])
new_attachment = create_attachment(user=context["user"])
middleware = AttachmentsMiddleware(
request=Mock(data={"attachments": [old_attachment.id, new_attachment.id]}),
mode=PostingEndpoint.EDIT,
**context
)
serializer = middleware.get_serializer()
serializer.is_valid()
middleware.save(serializer)
attachments_cache = context["post"].attachments_cache
assert len(attachments_cache) == 2
assert attachments_cache[0]["id"] == new_attachment.id
assert attachments_cache[1]["id"] == old_attachment.id
def test_other_user_attachment_cant_be_added_to_post(context):
attachment = create_attachment()
middleware = AttachmentsMiddleware(
request=Mock(data={"attachments": [attachment.id]}),
mode=PostingEndpoint.EDIT,
**context
)
serializer = middleware.get_serializer()
serializer.is_valid()
middleware.save(serializer)
attachment.refresh_from_db()
assert not attachment.post
def test_other_post_attachment_cant_be_added_to_new_post(context, default_category):
post = test.post_thread(category=default_category).first_post
attachment = create_attachment(post=post, user=context["user"])
middleware = AttachmentsMiddleware(
request=Mock(data={"attachments": [attachment.id]}),
mode=PostingEndpoint.EDIT,
**context
)
serializer = middleware.get_serializer()
serializer.is_valid()
middleware.save(serializer)
attachment.refresh_from_db()
assert attachment.post == post
def test_middleware_removes_attachment_from_post(context):
attachment = create_attachment(post=context["post"], user=context["user"])
middleware = AttachmentsMiddleware(
request=Mock(data={"attachments": []}), mode=PostingEndpoint.EDIT, **context
)
serializer = middleware.get_serializer()
serializer.is_valid()
middleware.save(serializer)
context["post"].refresh_from_db()
assert not context["post"].attachment_set.exists()
def test_middleware_removes_attachment_from_attachments_cache(context):
attachment = create_attachment(post=context["post"], user=context["user"])
middleware = AttachmentsMiddleware(
request=Mock(data={"attachments": []}), mode=PostingEndpoint.EDIT, **context
)
serializer = middleware.get_serializer()
serializer.is_valid()
middleware.save(serializer)
assert not context["post"].attachments_cache
def test_middleware_deletes_attachment_removed_from_post(context):
attachment = create_attachment(post=context["post"], user=context["user"])
middleware = AttachmentsMiddleware(
request=Mock(data={"attachments": []}), mode=PostingEndpoint.EDIT, **context
)
serializer = middleware.get_serializer()
serializer.is_valid()
middleware.save(serializer)
with pytest.raises(Attachment.DoesNotExist):
attachment.refresh_from_db()
def test_middleware_blocks_user_from_removing_other_user_attachment_without_permission(
context,
):
attachment = create_attachment(post=context["post"])
middleware = AttachmentsMiddleware(
request=Mock(data={"attachments": []}), mode=PostingEndpoint.EDIT, **context
)
serializer = middleware.get_serializer()
assert not serializer.is_valid()
middleware.save(serializer)
attachment.refresh_from_db()
assert attachment.post == context["post"]
def test_middleware_allows_user_with_permission_to_remove_other_user_attachment(
context,
):
context["user_acl"]["can_delete_other_users_attachments"] = True
attachment = create_attachment(post=context["post"])
middleware = AttachmentsMiddleware(
request=Mock(data={"attachments": []}), mode=PostingEndpoint.EDIT, **context
)
serializer = middleware.get_serializer()
serializer.is_valid()
middleware.save(serializer)
context["post"].refresh_from_db()
assert not context["post"].attachment_set.exists()
def test_attachments_count_validator_allows_attachments_within_limit():
settings = Mock(post_attachments_limit=5)
validate_attachments_count(range(5), settings)
def test_attachments_count_validator_raises_validation_error_on_too_many_attachmes():
settings = Mock(post_attachments_limit=2)
with pytest.raises(serializers.ValidationError):
validate_attachments_count(range(5), settings)
| 9,916
|
Python
|
.tac
| 235
| 36.638298
| 87
| 0.722592
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,629
|
test_attachments_proxy.py
|
rafalp_Misago/misago/threads/tests/test_attachments_proxy.py
|
import pytest
from django.urls import reverse
from ...acl.models import Role
from ...acl.test import patch_user_acl
from ...conf import settings
from ...conf.test import override_dynamic_settings
from ..models import Attachment, AttachmentType
@pytest.fixture
def attachment_type(db):
return AttachmentType.objects.order_by("id").first()
@pytest.fixture
def attachment(attachment_type, post, user):
return Attachment.objects.create(
secret="secret",
filetype=attachment_type,
post=post,
uploader=user,
uploader_name=user.username,
uploader_slug=user.slug,
filename="test.txt",
file="test.txt",
size=1000,
)
@pytest.fixture
def image(post, user):
return Attachment.objects.create(
secret="secret",
filetype=AttachmentType.objects.get(mimetypes="image/png"),
post=post,
uploader=user,
uploader_name=user.username,
uploader_slug=user.slug,
filename="test.png",
image="test.png",
size=1000,
)
@pytest.fixture
def image_with_thumbnail(post, user):
return Attachment.objects.create(
secret="secret",
filetype=AttachmentType.objects.get(mimetypes="image/png"),
post=post,
uploader=user,
uploader_name=user.username,
uploader_slug=user.slug,
filename="test.png",
image="test.png",
thumbnail="test-thumbnail.png",
size=1000,
)
@pytest.fixture
def other_users_attachment(attachment, other_user):
attachment.uploader = other_user
attachment.save()
return attachment
@pytest.fixture
def orphaned_attachment(attachment):
attachment.post = None
attachment.save()
return attachment
@pytest.fixture
def other_users_orphaned_attachment(other_users_attachment):
other_users_attachment.post = None
other_users_attachment.save()
return other_users_attachment
def assert_403(response):
assert response.status_code == 302
assert response["location"].endswith(settings.MISAGO_ATTACHMENT_403_IMAGE)
def assert_404(response):
assert response.status_code == 302
assert response["location"].endswith(settings.MISAGO_ATTACHMENT_404_IMAGE)
def test_proxy_redirects_client_to_attachment_file(client, attachment):
response = client.get(attachment.get_absolute_url())
assert response.status_code == 302
assert response["location"].endswith("test.txt")
def test_proxy_redirects_client_to_attachment_image(client, image):
response = client.get(image.get_absolute_url())
assert response.status_code == 302
assert response["location"].endswith("test.png")
def test_proxy_redirects_client_to_attachment_thumbnail(client, image_with_thumbnail):
response = client.get(image_with_thumbnail.get_thumbnail_url())
assert response.status_code == 302
assert response["location"].endswith("test-thumbnail.png")
def test_proxy_redirects_to_404_image_for_nonexistant_attachment(db, client):
response = client.get(
reverse("misago:attachment", kwargs={"pk": 1, "secret": "secret"})
)
assert_404(response)
def test_proxy_redirects_to_404_image_for_url_with_invalid_attachment_secret(
client, attachment
):
response = client.get(
reverse("misago:attachment", kwargs={"pk": attachment.id, "secret": "invalid"})
)
assert_404(response)
@patch_user_acl({"can_download_other_users_attachments": False})
def test_proxy_redirects_to_403_image_for_user_without_permission_to_see_attachment(
user_client, other_users_attachment
):
response = user_client.get(other_users_attachment.get_absolute_url())
assert_403(response)
def test_thumbnail_proxy_redirects_to_404_for_non_image_attachment(client, attachment):
response = client.get(
reverse(
"misago:attachment-thumbnail",
kwargs={"pk": attachment.pk, "secret": attachment.secret},
)
)
assert_404(response)
def test_thumbnail_proxy_redirects_to_regular_image_for_image_without_thumbnail(
client, image
):
response = client.get(
reverse(
"misago:attachment-thumbnail",
kwargs={"pk": image.pk, "secret": image.secret},
)
)
assert response.status_code == 302
assert response["location"].endswith("test.png")
def test_thumbnail_proxy_redirects_to_thumbnail_image(client, image_with_thumbnail):
response = client.get(
reverse(
"misago:attachment-thumbnail",
kwargs={
"pk": image_with_thumbnail.pk,
"secret": image_with_thumbnail.secret,
},
)
)
assert response.status_code == 302
assert response["location"].endswith("test-thumbnail.png")
def test_proxy_blocks_user_from_their_orphaned_attachment(
user_client, orphaned_attachment
):
response = user_client.get(orphaned_attachment.get_absolute_url())
assert_404(response)
def test_proxy_redirects_user_to_their_orphaned_attachment_if_link_has_shva_key(
user_client, orphaned_attachment
):
response = user_client.get("%s?shva=1" % orphaned_attachment.get_absolute_url())
assert response.status_code == 302
assert response["location"].endswith("test.txt")
def test_proxy_blocks_user_from_other_users_orphaned_attachment(
user_client, other_users_orphaned_attachment
):
response = user_client.get(other_users_orphaned_attachment.get_absolute_url())
assert_404(response)
def test_proxy_blocks_user_from_other_users_orphaned_attachment_if_link_has_shva_key(
user_client, other_users_orphaned_attachment
):
response = user_client.get(
"%s?shva=1" % other_users_orphaned_attachment.get_absolute_url()
)
assert_404(response)
def test_proxy_redirects_admin_to_other_users_orphaned_attachment(
admin_client, orphaned_attachment
):
response = admin_client.get("%s?shva=1" % orphaned_attachment.get_absolute_url())
assert response.status_code == 302
assert response["location"].endswith("test.txt")
def test_proxy_blocks_user_from_attachment_with_disabled_type(
user_client, attachment, attachment_type
):
attachment_type.status = AttachmentType.DISABLED
attachment_type.save()
response = user_client.get(attachment.get_absolute_url())
assert_403(response)
@pytest.fixture
def role(db):
return Role.objects.create(name="Test")
@pytest.fixture
def limited_attachment_type(attachment_type, role):
attachment_type.limit_downloads_to.add(role)
return attachment_type
def test_proxy_blocks_user_without_role_from_attachment_with_limited_type(
user_client, attachment, limited_attachment_type
):
response = user_client.get(attachment.get_absolute_url())
assert_403(response)
def test_proxy_allows_user_with_role_to_download_attachment_with_limited_type(
user, user_client, role, attachment, limited_attachment_type
):
user.roles.add(role)
response = user_client.get(attachment.get_absolute_url())
assert response.status_code == 302
assert response["location"].endswith("test.txt")
def test_proxy_allows_admin_without_role_to_download_attachment_with_limited_type(
admin_client, attachment, limited_attachment_type
):
response = admin_client.get(attachment.get_absolute_url())
assert response.status_code == 302
assert response["location"].endswith("test.txt")
@override_dynamic_settings(attachment_403_image="custom-403-image.png")
@patch_user_acl({"can_download_other_users_attachments": False})
def test_proxy_uses_custom_permission_denied_image_if_one_is_set(
user_client, other_users_attachment
):
response = user_client.get(other_users_attachment.get_absolute_url())
assert response.status_code == 302
assert response["location"].endswith("custom-403-image.png")
@override_dynamic_settings(attachment_404_image="custom-404-image.png")
def test_proxy_uses_custom_not_found_image_if_one_is_set(db, client):
response = client.get(
reverse("misago:attachment", kwargs={"pk": 1, "secret": "secret"})
)
assert response.status_code == 302
assert response["location"].endswith("custom-404-image.png")
| 8,186
|
Python
|
.tac
| 208
| 34.163462
| 87
| 0.722264
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,630
|
test_clearattachments.py
|
rafalp_Misago/misago/threads/tests/test_clearattachments.py
|
from datetime import timedelta
from io import StringIO
import pytest
from django.core import management
from django.utils import timezone
from ...conf.test import override_dynamic_settings
from ..management.commands import clearattachments
from ..models import Attachment, AttachmentType
@pytest.fixture
def attachment_type(db):
return AttachmentType.objects.order_by("id").last()
def create_attachment(attachment_type, uploaded_on, post=None):
return Attachment.objects.create(
secret=Attachment.generate_new_secret(),
post=post,
filetype=attachment_type,
size=1000,
uploaded_on=uploaded_on,
uploader_name="User",
uploader_slug="user",
filename="testfile_%s.zip" % (Attachment.objects.count() + 1),
)
def call_command():
command = clearattachments.Command()
out = StringIO()
management.call_command(command, stdout=out)
return out.getvalue().strip().splitlines()[-1].strip()
def test_command_works_if_there_are_no_attachments(db):
command_output = call_command()
assert command_output == "No unused attachments were cleared"
@override_dynamic_settings(unused_attachments_lifetime=2)
def test_recent_attachment_is_not_cleared(attachment_type):
attachment = create_attachment(attachment_type, timezone.now())
command_output = call_command()
assert command_output == "No unused attachments were cleared"
@override_dynamic_settings(unused_attachments_lifetime=2)
def test_old_used_attachment_is_not_cleared(attachment_type, post):
uploaded_on = timezone.now() - timedelta(hours=3)
attachment = create_attachment(attachment_type, uploaded_on, post)
command_output = call_command()
assert command_output == "No unused attachments were cleared"
@override_dynamic_settings(unused_attachments_lifetime=2)
def test_old_unused_attachment_is_cleared(attachment_type):
uploaded_on = timezone.now() - timedelta(hours=3)
attachment = create_attachment(attachment_type, uploaded_on)
command_output = call_command()
assert command_output == "Cleared 1 attachments"
with pytest.raises(Attachment.DoesNotExist):
attachment.refresh_from_db()
| 2,197
|
Python
|
.tac
| 49
| 40.285714
| 70
| 0.752582
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,631
|
test_attachments_api.py
|
rafalp_Misago/misago/threads/tests/test_attachments_api.py
|
import os
from django.urls import reverse
from PIL import Image
from ...acl.models import Role
from ...acl.test import patch_user_acl
from ...conf import settings
from ...users.test import AuthenticatedUserTestCase
from ..models import Attachment, AttachmentType
TESTFILES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "testfiles")
TEST_DOCUMENT_PATH = os.path.join(TESTFILES_DIR, "document.pdf")
TEST_LARGEPNG_PATH = os.path.join(TESTFILES_DIR, "large.png")
TEST_SMALLJPG_PATH = os.path.join(TESTFILES_DIR, "small.jpg")
TEST_ANIMATEDGIF_PATH = os.path.join(TESTFILES_DIR, "animated.gif")
TEST_CORRUPTEDIMG_PATH = os.path.join(TESTFILES_DIR, "corrupted.gif")
class AttachmentsApiTestCase(AuthenticatedUserTestCase):
def setUp(self):
super().setUp()
AttachmentType.objects.all().delete()
self.api_link = reverse("misago:api:attachment-list")
def test_anonymous(self):
"""user has to be authenticated to be able to upload files"""
self.logout_user()
response = self.client.post(self.api_link)
self.assertEqual(response.status_code, 403)
@patch_user_acl({"max_attachment_size": 0})
def test_no_permission(self):
"""user needs permission to upload files"""
response = self.client.post(self.api_link)
self.assertEqual(response.status_code, 403)
self.assertEqual(
response.json(),
{"detail": "You don't have permission to upload new files."},
)
def test_no_file_uploaded(self):
"""no file uploaded scenario is handled"""
response = self.client.post(self.api_link)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), {"detail": "No file has been uploaded."})
def test_invalid_extension(self):
"""uploaded file's extension is rejected as invalid"""
AttachmentType.objects.create(
name="Test extension", extensions="jpg,jpeg", mimetypes=None
)
with open(TEST_DOCUMENT_PATH, "rb") as upload:
response = self.client.post(self.api_link, data={"upload": upload})
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(), {"detail": "You can't upload files of this type."}
)
def test_invalid_mime(self):
"""uploaded file's mimetype is rejected as invalid"""
AttachmentType.objects.create(
name="Test extension", extensions="png", mimetypes="loremipsum"
)
with open(TEST_DOCUMENT_PATH, "rb") as upload:
response = self.client.post(self.api_link, data={"upload": upload})
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(), {"detail": "You can't upload files of this type."}
)
def test_no_perm_to_type(self):
"""user needs permission to upload files of this type"""
attachment_type = AttachmentType.objects.create(
name="Test extension", extensions="png", mimetypes="application/pdf"
)
user_roles = (r.pk for r in self.user.get_roles())
attachment_type.limit_uploads_to.set(Role.objects.exclude(id__in=user_roles))
with open(TEST_DOCUMENT_PATH, "rb") as upload:
response = self.client.post(self.api_link, data={"upload": upload})
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(), {"detail": "You can't upload files of this type."}
)
def test_type_is_locked(self):
"""new uploads for this filetype are locked"""
AttachmentType.objects.create(
name="Test extension",
extensions="png",
mimetypes="application/pdf",
status=AttachmentType.LOCKED,
)
with open(TEST_DOCUMENT_PATH, "rb") as upload:
response = self.client.post(self.api_link, data={"upload": upload})
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(), {"detail": "You can't upload files of this type."}
)
def test_type_is_disabled(self):
"""new uploads for this filetype are disabled"""
AttachmentType.objects.create(
name="Test extension",
extensions="png",
mimetypes="application/pdf",
status=AttachmentType.DISABLED,
)
with open(TEST_DOCUMENT_PATH, "rb") as upload:
response = self.client.post(self.api_link, data={"upload": upload})
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(), {"detail": "You can't upload files of this type."}
)
@patch_user_acl({"max_attachment_size": 100})
def test_upload_too_big_for_user(self):
"""too big uploads are rejected"""
AttachmentType.objects.create(
name="Test extension", extensions="png", mimetypes="image/png"
)
with open(TEST_LARGEPNG_PATH, "rb") as upload:
response = self.client.post(self.api_link, data={"upload": upload})
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
"detail": (
"You can't upload files larger than 100.0\xa0KB "
"(your file has 253.9\xa0KB)."
)
},
)
def test_corrupted_image_upload(self):
"""corrupted image upload is handled"""
AttachmentType.objects.create(name="Test extension", extensions="gif")
with open(TEST_CORRUPTEDIMG_PATH, "rb") as upload:
response = self.client.post(self.api_link, data={"upload": upload})
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(), {"detail": "Uploaded image is unsupported or invalid."}
)
def test_document_upload(self):
"""successful upload creates orphan attachment"""
AttachmentType.objects.create(
name="Test extension", extensions="pdf", mimetypes="application/pdf"
)
with open(TEST_DOCUMENT_PATH, "rb") as upload:
response = self.client.post(self.api_link, data={"upload": upload})
self.assertEqual(response.status_code, 200)
response_json = response.json()
attachment = Attachment.objects.get(id=response_json["id"])
self.assertEqual(attachment.filename, "document.pdf")
self.assertTrue(attachment.is_file)
self.assertFalse(attachment.is_image)
self.assertIsNotNone(attachment.file)
self.assertTrue(not attachment.image)
self.assertTrue(not attachment.thumbnail)
self.assertTrue(str(attachment.file).endswith("document.pdf"))
self.assertIsNone(response_json["post"])
self.assertEqual(response_json["uploader_name"], self.user.username)
self.assertEqual(response_json["url"]["index"], attachment.get_absolute_url())
self.assertIsNone(response_json["url"]["thumb"])
self.assertEqual(response_json["url"]["uploader"], self.user.get_absolute_url())
self.assertEqual(self.user.audittrail_set.count(), 1)
# files associated with attachment are deleted on its deletion
file_path = attachment.file.path
self.assertTrue(os.path.exists(file_path))
attachment.delete()
self.assertFalse(os.path.exists(file_path))
def test_small_image_upload(self):
"""successful small image upload creates orphan attachment without thumbnail"""
AttachmentType.objects.create(
name="Test extension", extensions="jpeg,jpg", mimetypes="image/jpeg"
)
with open(TEST_SMALLJPG_PATH, "rb") as upload:
response = self.client.post(self.api_link, data={"upload": upload})
self.assertEqual(response.status_code, 200)
response_json = response.json()
attachment = Attachment.objects.get(id=response_json["id"])
self.assertEqual(attachment.filename, "small.jpg")
self.assertFalse(attachment.is_file)
self.assertTrue(attachment.is_image)
self.assertTrue(not attachment.file)
self.assertIsNotNone(attachment.image)
self.assertTrue(not attachment.thumbnail)
self.assertTrue(str(attachment.image).endswith("small.jpg"))
self.assertIsNone(response_json["post"])
self.assertEqual(response_json["uploader_name"], self.user.username)
self.assertEqual(response_json["url"]["index"], attachment.get_absolute_url())
self.assertIsNone(response_json["url"]["thumb"])
self.assertEqual(response_json["url"]["uploader"], self.user.get_absolute_url())
self.assertEqual(self.user.audittrail_set.count(), 1)
@patch_user_acl({"max_attachment_size": 10 * 1024})
def test_large_image_upload(self):
"""successful large image upload creates orphan attachment with thumbnail"""
AttachmentType.objects.create(
name="Test extension", extensions="png", mimetypes="image/png"
)
with open(TEST_LARGEPNG_PATH, "rb") as upload:
response = self.client.post(self.api_link, data={"upload": upload})
self.assertEqual(response.status_code, 200)
response_json = response.json()
attachment = Attachment.objects.get(id=response_json["id"])
self.assertEqual(attachment.filename, "large.png")
self.assertFalse(attachment.is_file)
self.assertTrue(attachment.is_image)
self.assertTrue(not attachment.file)
self.assertIsNotNone(attachment.image)
self.assertIsNotNone(attachment.thumbnail)
self.assertTrue(str(attachment.image).endswith("large.png"))
self.assertTrue(str(attachment.thumbnail).endswith("large.png"))
self.assertIsNone(response_json["post"])
self.assertEqual(response_json["uploader_name"], self.user.username)
self.assertEqual(response_json["url"]["index"], attachment.get_absolute_url())
self.assertEqual(response_json["url"]["thumb"], attachment.get_thumbnail_url())
self.assertEqual(response_json["url"]["uploader"], self.user.get_absolute_url())
self.assertEqual(self.user.audittrail_set.count(), 1)
# thumbnail was scaled down
thumbnail = Image.open(attachment.thumbnail.path)
self.assertEqual(
thumbnail.size[0], settings.MISAGO_ATTACHMENT_IMAGE_SIZE_LIMIT[0]
)
self.assertLess(
thumbnail.size[1], settings.MISAGO_ATTACHMENT_IMAGE_SIZE_LIMIT[1]
)
# files associated with attachment are deleted on its deletion
image_path = attachment.image.path
thumbnail_path = attachment.thumbnail.path
self.assertTrue(os.path.exists(image_path))
self.assertTrue(os.path.exists(thumbnail_path))
attachment.delete()
self.assertFalse(os.path.exists(image_path))
self.assertFalse(os.path.exists(thumbnail_path))
def test_animated_image_upload(self):
"""successful gif upload creates orphan attachment with thumbnail"""
AttachmentType.objects.create(
name="Test extension", extensions="gif", mimetypes="image/gif"
)
with open(TEST_ANIMATEDGIF_PATH, "rb") as upload:
response = self.client.post(self.api_link, data={"upload": upload})
self.assertEqual(response.status_code, 200)
response_json = response.json()
attachment = Attachment.objects.get(id=response_json["id"])
self.assertEqual(attachment.filename, "animated.gif")
self.assertFalse(attachment.is_file)
self.assertTrue(attachment.is_image)
self.assertTrue(not attachment.file)
self.assertIsNotNone(attachment.image)
self.assertIsNotNone(attachment.thumbnail)
self.assertTrue(str(attachment.image).endswith("animated.gif"))
self.assertTrue(str(attachment.thumbnail).endswith("animated.gif"))
self.assertIsNone(response_json["post"])
self.assertEqual(response_json["uploader_name"], self.user.username)
self.assertEqual(response_json["url"]["index"], attachment.get_absolute_url())
self.assertEqual(response_json["url"]["thumb"], attachment.get_thumbnail_url())
self.assertEqual(response_json["url"]["uploader"], self.user.get_absolute_url())
self.assertEqual(self.user.audittrail_set.count(), 1)
| 12,624
|
Python
|
.tac
| 244
| 41.885246
| 88
| 0.653378
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,632
|
clearattachments.py
|
rafalp_Misago/misago/threads/management/commands/clearattachments.py
|
import time
from datetime import timedelta
from django.core.management.base import BaseCommand
from django.utils import timezone
from ....conf.shortcuts import get_dynamic_settings
from ....core.management.progressbar import show_progress
from ...models import Attachment
class Command(BaseCommand):
help = "Deletes attachments not associated with any posts"
def handle(self, *args, **options):
settings = get_dynamic_settings()
cutoff = timezone.now() - timedelta(hours=settings.unused_attachments_lifetime)
queryset = Attachment.objects.filter(post__isnull=True, uploaded_on__lt=cutoff)
attachments_to_sync = queryset.count()
if not attachments_to_sync:
self.stdout.write("\n\nNo unused attachments were cleared")
else:
self.sync_attachments(queryset, attachments_to_sync)
def sync_attachments(self, queryset, attachments_to_sync):
self.stdout.write("Clearing %s attachments...\n" % attachments_to_sync)
cleared_count = 0
show_progress(self, cleared_count, attachments_to_sync)
start_time = time.time()
for attachment in queryset.iterator(chunk_size=50):
attachment.delete()
cleared_count += 1
show_progress(self, cleared_count, attachments_to_sync, start_time)
self.stdout.write("\n\nCleared %s attachments" % cleared_count)
| 1,409
|
Python
|
.tac
| 28
| 42.857143
| 87
| 0.706871
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,633
|
attachmenttypes.py
|
rafalp_Misago/misago/threads/admin/views/attachmenttypes.py
|
from django.contrib import messages
from django.db.models import Count
from django.utils.translation import pgettext, pgettext_lazy
from ....admin.views import generic
from ...models import AttachmentType
from ..forms import AttachmentTypeForm
class AttachmentTypeAdmin(generic.AdminBaseMixin):
root_link = "misago:admin:settings:attachment-types:index"
model = AttachmentType
form_class = AttachmentTypeForm
templates_dir = "misago/admin/attachmenttypes"
message_404 = pgettext_lazy(
"admin attachments types", "Requested attachment type does not exist."
)
def update_roles(self, target, roles):
target.roles.clear()
if roles:
target.roles.add(*roles)
def handle_form(self, form, request, target):
super().handle_form(form, request, target)
form.save()
class AttachmentTypesList(AttachmentTypeAdmin, generic.ListView):
ordering = (("name", None),)
def get_queryset(self):
queryset = super().get_queryset()
return queryset.annotate(num_files=Count("attachment"))
class NewAttachmentType(AttachmentTypeAdmin, generic.ModelFormView):
message_submit = pgettext_lazy(
"admin attachments types", 'New type "%(name)s" has been saved.'
)
class EditAttachmentType(AttachmentTypeAdmin, generic.ModelFormView):
message_submit = pgettext_lazy(
"admin attachments types", 'Attachment type "%(name)s" has been edited.'
)
class DeleteAttachmentType(AttachmentTypeAdmin, generic.ButtonView):
def check_permissions(self, request, target):
if target.attachment_set.exists():
message = pgettext(
"admin attachments types",
'Attachment type "%(name)s" has associated attachments and can\'t be deleted.',
)
return message % {"name": target.name}
def button_action(self, request, target):
target.delete()
message = pgettext(
"admin attachments types", 'Attachment type "%(name)s" has been deleted.'
)
messages.success(request, message % {"name": target.name})
| 2,122
|
Python
|
.tac
| 48
| 37.3125
| 95
| 0.69694
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,634
|
attachments.py
|
rafalp_Misago/misago/threads/admin/views/attachments.py
|
from django.contrib import messages
from django.db import transaction
from django.utils.translation import pgettext, pgettext_lazy
from ....admin.views import generic
from ...models import Attachment, Post
from ..forms import FilterAttachmentsForm
class AttachmentAdmin(generic.AdminBaseMixin):
root_link = "misago:admin:attachments:index"
model = Attachment
templates_dir = "misago/admin/attachments"
message_404 = pgettext_lazy(
"admin attachments", "Requested attachment does not exist."
)
def get_queryset(self):
qs = super().get_queryset()
return qs.select_related(
"filetype", "uploader", "post", "post__thread", "post__category"
)
class AttachmentsList(AttachmentAdmin, generic.ListView):
items_per_page = 20
ordering = [
("-id", pgettext_lazy("admin attachments ordering choice", "From newest")),
("id", pgettext_lazy("admin attachments ordering choice", "From oldest")),
("filename", pgettext_lazy("admin attachments ordering choice", "A to z")),
("-filename", pgettext_lazy("admin attachments ordering choice", "Z to a")),
("size", pgettext_lazy("admin attachments ordering choice", "Smallest files")),
("-size", pgettext_lazy("admin attachments ordering choice", "Largest files")),
]
selection_label = pgettext_lazy("admin attachments", "With attachments: 0")
empty_selection_label = pgettext_lazy("admin attachments", "Select attachments")
mass_actions = [
{
"action": "delete",
"name": pgettext_lazy("admin attachments", "Delete attachments"),
"confirmation": pgettext_lazy(
"admin attachments",
"Are you sure you want to delete selected attachments?",
),
"is_atomic": False,
}
]
filter_form = FilterAttachmentsForm
def action_delete(self, request, attachments):
deleted_attachments = []
desynced_posts = []
for attachment in attachments:
if attachment.post:
deleted_attachments.append(attachment.pk)
desynced_posts.append(attachment.post_id)
if desynced_posts:
with transaction.atomic():
for post in Post.objects.filter(id__in=desynced_posts):
self.delete_from_cache(post, deleted_attachments)
for attachment in attachments:
attachment.delete()
message = pgettext(
"admin attachments", "Selected attachments have been deleted."
)
messages.success(request, message)
def delete_from_cache(self, post, attachments):
if not post.attachments_cache:
return # admin action may be taken due to desynced state
clean_cache = []
for a in post.attachments_cache:
if a["id"] not in attachments:
clean_cache.append(a)
post.attachments_cache = clean_cache or None
post.save(update_fields=["attachments_cache"])
class DeleteAttachment(AttachmentAdmin, generic.ButtonView):
def button_action(self, request, target):
if target.post:
self.delete_from_cache(target)
target.delete()
message = pgettext(
"admin attachments", 'Attachment "%(filename)s" has been deleted.'
)
messages.success(request, message % {"filename": target.filename})
def delete_from_cache(self, attachment):
if not attachment.post.attachments_cache:
return # admin action may be taken due to desynced state
clean_cache = []
for a in attachment.post.attachments_cache:
if a["id"] != attachment.id:
clean_cache.append(a)
attachment.post.attachments_cache = clean_cache or None
attachment.post.save(update_fields=["attachments_cache"])
| 3,895
|
Python
|
.tac
| 86
| 36.209302
| 87
| 0.64591
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,635
|
test_attachments_views.py
|
rafalp_Misago/misago/threads/admin/tests/test_attachments_views.py
|
from django.urls import reverse
from ... import test
from ....admin.test import AdminTestCase
from ....categories.models import Category
from ...models import Attachment, AttachmentType
class AttachmentAdminViewsTests(AdminTestCase):
def setUp(self):
super().setUp()
self.category = Category.objects.get(slug="first-category")
self.post = test.post_thread(category=self.category).first_post
self.filetype = AttachmentType.objects.order_by("id").first()
self.admin_link = reverse("misago:admin:attachments:index")
def mock_attachment(self, post=None, file=None, image=None, thumbnail=None):
return Attachment.objects.create(
secret=Attachment.generate_new_secret(),
filetype=self.filetype,
post=post,
size=1000,
uploader=self.user,
uploader_name=self.user.username,
uploader_slug=self.user.slug,
filename="testfile_%s.zip" % (Attachment.objects.count() + 1),
file=None,
image=None,
thumbnail=None,
)
def test_link_registered(self):
"""admin nav contains attachments link"""
response = self.client.get(reverse("misago:admin:settings:index"))
self.assertContains(response, self.admin_link)
def test_list_view(self):
"""attachments list returns 200 and renders all attachments"""
final_link = self.client.get(self.admin_link)["location"]
response = self.client.get(final_link)
self.assertEqual(response.status_code, 200)
attachments = [
self.mock_attachment(self.post, file="somefile.pdf"),
self.mock_attachment(image="someimage.jpg"),
self.mock_attachment(
self.post, image="somelargeimage.png", thumbnail="somethumb.png"
),
]
response = self.client.get(final_link)
self.assertEqual(response.status_code, 200)
for attachment in attachments:
delete_link = reverse(
"misago:admin:attachments:delete", kwargs={"pk": attachment.pk}
)
self.assertContains(response, attachment.filename)
self.assertContains(response, delete_link)
self.assertContains(response, attachment.get_absolute_url())
self.assertContains(response, attachment.uploader.username)
self.assertContains(response, attachment.uploader.get_absolute_url())
if attachment.thumbnail:
self.assertContains(response, attachment.get_thumbnail_url())
def test_delete_multiple(self):
"""mass delete tool on list works"""
attachments = [
self.mock_attachment(self.post, file="somefile.pdf"),
self.mock_attachment(image="someimage.jpg"),
self.mock_attachment(
self.post, image="somelargeimage.png", thumbnail="somethumb.png"
),
]
self.post.attachments_cache = [{"id": attachments[-1].pk}]
self.post.save()
response = self.client.post(
self.admin_link,
data={"action": "delete", "selected_items": [a.pk for a in attachments]},
)
self.assertEqual(response.status_code, 302)
self.assertEqual(Attachment.objects.count(), 0)
# assert attachments were removed from post's cache
attachments_cache = self.category.post_set.get(
pk=self.post.pk
).attachments_cache
self.assertIsNone(attachments_cache)
def test_delete_view(self):
"""delete attachment view has no showstoppers"""
attachment = self.mock_attachment(self.post)
self.post.attachments_cache = [
{"id": attachment.pk + 1},
{"id": attachment.pk},
{"id": attachment.pk + 2},
]
self.post.save()
action_link = reverse(
"misago:admin:attachments:delete", kwargs={"pk": attachment.pk}
)
response = self.client.post(action_link)
self.assertEqual(response.status_code, 302)
# clean alert about item, grab final list url
final_link = self.client.get(self.admin_link)["location"]
response = self.client.get(final_link)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, action_link)
# assert it was removed from post's attachments cache
attachments_cache = self.category.post_set.get(
pk=self.post.pk
).attachments_cache
self.assertEqual(
attachments_cache, [{"id": attachment.pk + 1}, {"id": attachment.pk + 2}]
)
| 4,680
|
Python
|
.tac
| 103
| 35.184466
| 85
| 0.629833
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,636
|
test_attachment_types_views.py
|
rafalp_Misago/misago/threads/admin/tests/test_attachment_types_views.py
|
from django.urls import reverse
from ....acl.models import Role
from ....admin.test import AdminTestCase
from ...models import AttachmentType
class AttachmentTypeAdminViewsTests(AdminTestCase):
def setUp(self):
super().setUp()
self.admin_link = reverse("misago:admin:settings:attachment-types:index")
def test_link_registered(self):
"""admin nav contains attachment types link"""
response = self.client.get(reverse("misago:admin:settings:index"))
self.assertContains(response, self.admin_link)
def test_list_view(self):
"""attachment types list returns 200 and renders all attachment types"""
response = self.client.get(self.admin_link)
self.assertEqual(response.status_code, 200)
for attachment_type in AttachmentType.objects.all():
self.assertContains(response, attachment_type.name)
for file_extension in attachment_type.extensions_list:
self.assertContains(response, file_extension)
for mimename in attachment_type.mimetypes_list:
self.assertContains(response, mimename)
def test_new_view(self):
"""new attachment type view has no showstoppers"""
form_link = reverse("misago:admin:settings:attachment-types:new")
response = self.client.get(form_link)
self.assertEqual(response.status_code, 200)
response = self.client.post(form_link, data={})
self.assertEqual(response.status_code, 200)
response = self.client.post(
form_link,
data={
"name": "Test type",
"extensions": ".test",
"size_limit": 0,
"status": AttachmentType.ENABLED,
},
)
self.assertEqual(response.status_code, 302)
# clean alert about new item created
self.client.get(self.admin_link)
response = self.client.get(self.admin_link)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Test type")
self.assertContains(response, "test")
def test_edit_view(self):
"""edit attachment type view has no showstoppers"""
self.client.post(
reverse("misago:admin:settings:attachment-types:new"),
data={
"name": "Test type",
"extensions": ".test",
"size_limit": 0,
"status": AttachmentType.ENABLED,
},
)
test_type = AttachmentType.objects.order_by("id").last()
self.assertEqual(test_type.name, "Test type")
form_link = reverse(
"misago:admin:settings:attachment-types:edit", kwargs={"pk": test_type.pk}
)
response = self.client.get(form_link)
self.assertEqual(response.status_code, 200)
response = self.client.post(form_link, data={})
self.assertEqual(response.status_code, 200)
response = self.client.post(
form_link,
data={
"name": "Test type edited",
"extensions": ".test.extension",
"mimetypes": "test/edited-mime",
"size_limit": 512,
"status": AttachmentType.DISABLED,
"limit_uploads_to": [r.pk for r in Role.objects.all()],
"limit_downloads_to": [r.pk for r in Role.objects.all()],
},
)
self.assertEqual(response.status_code, 302)
test_type = AttachmentType.objects.order_by("id").last()
self.assertEqual(test_type.name, "Test type edited")
# clean alert about new item created
self.client.get(self.admin_link)
response = self.client.get(self.admin_link)
self.assertEqual(response.status_code, 200)
self.assertContains(response, test_type.name)
self.assertContains(response, test_type.extensions)
self.assertContains(response, test_type.mimetypes)
self.assertEqual(test_type.limit_uploads_to.count(), Role.objects.count())
self.assertEqual(test_type.limit_downloads_to.count(), Role.objects.count())
# remove limits from type
response = self.client.post(
form_link,
data={
"name": "Test type edited",
"extensions": ".test.extension",
"mimetypes": "test/edited-mime",
"size_limit": 512,
"status": AttachmentType.DISABLED,
"limit_uploads_to": [],
"limit_downloads_to": [],
},
)
self.assertEqual(response.status_code, 302)
self.assertEqual(test_type.limit_uploads_to.count(), 0)
self.assertEqual(test_type.limit_downloads_to.count(), 0)
def test_clean_params_view(self):
"""admin form nicely cleans lists of extensions/mimetypes"""
TEST_CASES = [
("test", ["test"]),
(".test", ["test"]),
(".tar.gz", ["tar.gz"]),
(". test", ["test"]),
("test, test", ["test"]),
("test, tEst", ["test"]),
("test, other, tEst", ["test", "other"]),
("test, other, tEst,OTher", ["test", "other"]),
]
for raw, final in TEST_CASES:
response = self.client.post(
reverse("misago:admin:settings:attachment-types:new"),
data={
"name": "Test type",
"extensions": raw,
"size_limit": 0,
"status": AttachmentType.ENABLED,
},
)
self.assertEqual(response.status_code, 302)
test_type = AttachmentType.objects.order_by("id").last()
self.assertEqual(set(test_type.extensions_list), set(final))
def test_delete_view(self):
"""delete attachment type view has no showstoppers"""
self.client.post(
reverse("misago:admin:settings:attachment-types:new"),
data={
"name": "Test type",
"extensions": ".test",
"size_limit": 0,
"status": AttachmentType.ENABLED,
},
)
test_type = AttachmentType.objects.order_by("id").last()
self.assertEqual(test_type.name, "Test type")
action_link = reverse(
"misago:admin:settings:attachment-types:delete", kwargs={"pk": test_type.pk}
)
response = self.client.post(action_link)
self.assertEqual(response.status_code, 302)
# clean alert about item deleted
self.client.get(self.admin_link)
response = self.client.get(self.admin_link)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, test_type.name)
def test_cant_delete_type_with_attachments_view(self):
"""delete attachment type is not allowed if it has attachments associated"""
self.client.post(
reverse("misago:admin:settings:attachment-types:new"),
data={
"name": "Test type",
"extensions": ".test",
"size_limit": 0,
"status": AttachmentType.ENABLED,
},
)
test_type = AttachmentType.objects.order_by("id").last()
self.assertEqual(test_type.name, "Test type")
test_type.attachment_set.create(
secret="loremipsum",
filetype=test_type,
uploader_name="User",
uploader_slug="user",
filename="test.zip",
file="sad76asd678as687sa.zip",
)
action_link = reverse(
"misago:admin:settings:attachment-types:delete", kwargs={"pk": test_type.pk}
)
response = self.client.post(action_link)
self.assertEqual(response.status_code, 302)
# get alert form database
AttachmentType.objects.get(pk=test_type.pk)
| 7,950
|
Python
|
.tac
| 181
| 32.342541
| 88
| 0.582406
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,637
|
attachments.py
|
rafalp_Misago/misago/threads/permissions/attachments.py
|
from django import forms
from django.utils.translation import pgettext_lazy
from ...acl import algebra
from ...acl.models import Role
from ...admin.forms import YesNoSwitch
from ..models import Attachment
# Admin Permissions Forms
class PermissionsForm(forms.Form):
legend = pgettext_lazy("attachments permission", "Attachments")
max_attachment_size = forms.IntegerField(
label=pgettext_lazy("attachments permission", "Max attached file size (in kb)"),
help_text=pgettext_lazy(
"permissions", "Enter 0 to don't allow uploading end deleting attachments."
),
initial=500,
min_value=0,
)
can_download_other_users_attachments = YesNoSwitch(
label=pgettext_lazy(
"attachments permission", "Can download other users attachments"
)
)
can_delete_other_users_attachments = YesNoSwitch(
label=pgettext_lazy(
"attachments permission", "Can delete other users attachments"
)
)
class AnonymousPermissionsForm(forms.Form):
legend = pgettext_lazy("attachments permission", "Attachments")
can_download_other_users_attachments = YesNoSwitch(
label=pgettext_lazy("attachments permission", "Can download attachments")
)
def change_permissions_form(role):
if isinstance(role, Role):
if role.special_role == "anonymous":
return AnonymousPermissionsForm
return PermissionsForm
def build_acl(acl, roles, key_name):
new_acl = {
"max_attachment_size": 0,
"can_download_other_users_attachments": False,
"can_delete_other_users_attachments": False,
}
new_acl.update(acl)
return algebra.sum_acls(
new_acl,
roles=roles,
key=key_name,
max_attachment_size=algebra.greater,
can_download_other_users_attachments=algebra.greater,
can_delete_other_users_attachments=algebra.greater,
)
def add_acl_to_attachment(user_acl, attachment):
if user_acl["is_authenticated"] and user_acl["user_id"] == attachment.uploader_id:
attachment.acl.update({"can_delete": True})
else:
user_can_delete = user_acl["can_delete_other_users_attachments"]
attachment.acl.update(
{"can_delete": user_acl["is_authenticated"] and user_can_delete}
)
def register_with(registry):
registry.acl_annotator(Attachment, add_acl_to_attachment)
| 2,427
|
Python
|
.tac
| 62
| 32.451613
| 88
| 0.690375
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,638
|
attachment.py
|
rafalp_Misago/misago/threads/serializers/attachment.py
|
from django.urls import reverse
from rest_framework import serializers
from ..models import Attachment
__all__ = ["AttachmentSerializer"]
class AttachmentSerializer(serializers.ModelSerializer):
post = serializers.PrimaryKeyRelatedField(read_only=True)
acl = serializers.SerializerMethodField()
is_image = serializers.SerializerMethodField()
filetype = serializers.SerializerMethodField()
url = serializers.SerializerMethodField()
class Meta:
model = Attachment
fields = [
"id",
"filetype",
"post",
"uploaded_on",
"uploader_name",
"filename",
"size",
"acl",
"is_image",
"url",
]
def get_acl(self, obj):
try:
return obj.acl
except AttributeError:
return None
def get_is_image(self, obj):
return obj.is_image
def get_filetype(self, obj):
return obj.filetype.name
def get_url(self, obj):
return {
"index": obj.get_absolute_url(),
"thumb": obj.get_thumbnail_url(),
"uploader": self.get_uploader_url(obj),
}
def get_uploader_url(self, obj):
if obj.uploader_id:
return reverse(
"misago:user", kwargs={"slug": obj.uploader_slug, "pk": obj.uploader_id}
)
| 1,403
|
Python
|
.tac
| 44
| 22.795455
| 88
| 0.584261
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,639
|
objectacl.py
|
rafalp_Misago/misago/acl/objectacl.py
|
from .providers import providers
def add_acl_to_obj(user_acl, obj):
"""add valid ACL to obj (iterable of objects or single object)"""
if hasattr(obj, "__iter__"):
for item in obj:
_add_acl_to_obj(user_acl, item)
else:
_add_acl_to_obj(user_acl, obj)
def _add_acl_to_obj(user_acl, obj):
"""add valid ACL to single obj, helper for add_acl function"""
obj.acl = {}
for annotator in providers.get_obj_type_annotators(obj):
annotator(user_acl, obj)
| 508
|
Python
|
.tac
| 13
| 33.076923
| 69
| 0.640816
|
rafalp/Misago
| 2,519
| 524
| 136
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,640
|
setup.py
|
openstenoproject_plover/setup.py
|
#!/usr/bin/env python3
# Copyright (c) 2010 Joshua Harlan Lifton.
# See LICENSE.txt for details.
import os
import re
import subprocess
import sys
from setuptools import setup
try:
from setuptools.extern.packaging.version import Version
except ImportError:
# Handle broken unvendored version of setuptools...
from packaging.version import Version
sys.path.insert(0, os.path.dirname(__file__))
__software_name__ = 'plover'
with open(os.path.join(__software_name__, '__init__.py')) as fp:
exec(fp.read())
from plover_build_utils.setup import (
BuildPy, BuildUi, Command, Develop, babel_options
)
BuildPy.build_dependencies.append('build_ui')
Develop.build_dependencies.append('build_py')
cmdclass = {
'build_py': BuildPy,
'build_ui': BuildUi,
'develop': Develop,
}
options = {}
PACKAGE = '%s-%s' % (
__software_name__,
__version__,
)
# Helpers. {{{
def get_version():
if not os.path.exists('.git'):
return None
version = subprocess.check_output('git describe --tags --match=v[0-9]*'.split()).strip().decode()
m = re.match(r'^v(\d[\d.]*(?:(?:\.dev|rc)\d+)?)(-\d+-g[a-f0-9]*)?$', version)
assert m is not None, version
version = m.group(1)
if m.group(2) is not None:
version += '+' + m.group(2)[1:].replace('-', '.')
return version
# }}}
# `bdist_win` command. {{{
class BinaryDistWin(Command):
description = 'create distribution(s) for MS Windows'
user_options = [
('trim', 't',
'trim the resulting distribution to reduce size'),
('zipdir', 'z',
'create a zip of the resulting directory'),
('installer', 'i',
'create an executable installer for the resulting distribution'),
('bash=', None,
'bash executable to use for running the build script'),
]
boolean_options = ['installer', 'trim', 'zipdir']
extra_args = []
def initialize_options(self):
self.bash = None
self.installer = False
self.trim = False
self.zipdir = False
def finalize_options(self):
pass
def run(self):
cmd = [self.bash or 'bash', 'windows/dist_build.sh']
if self.installer:
cmd.append('--installer')
if self.trim:
cmd.append('--trim')
if self.zipdir:
cmd.append('--zipdir')
cmd.extend((__software_name__, __version__, self.bdist_wheel()))
if self.verbose:
print('running', ' '.join(cmd))
subprocess.check_call(cmd)
if sys.platform.startswith('win32'):
cmdclass['bdist_win'] = BinaryDistWin
# }}}
# `launch` command. {{{
class Launch(Command):
description = 'run %s from source' % __software_name__.capitalize()
command_consumes_arguments = True
user_options = []
def initialize_options(self):
self.args = None
def finalize_options(self):
pass
def run(self):
with self.project_on_sys_path():
python_path = os.environ.get('PYTHONPATH', '').split(os.pathsep)
python_path.insert(0, sys.path[0])
os.environ['PYTHONPATH'] = os.pathsep.join(python_path)
cmd = [sys.executable, '-m', 'plover.scripts.main'] + self.args
if sys.platform.startswith('win32'):
# Workaround https://bugs.python.org/issue19066
subprocess.Popen(cmd, cwd=os.getcwd())
sys.exit(0)
os.execv(cmd[0], cmd)
cmdclass['launch'] = Launch
# }}}
# `patch_version` command. {{{
class PatchVersion(Command):
description = 'patch package version from VCS'
command_consumes_arguments = True
user_options = []
def initialize_options(self):
self.args = []
def finalize_options(self):
assert 0 <= len(self.args) <= 1
def run(self):
if self.args:
version = self.args[0]
# Ensure it's valid.
Version(version)
else:
version = get_version()
if version is None:
sys.exit(1)
if self.verbose:
print('patching version to', version)
version_file = os.path.join('plover', '__init__.py')
with open(version_file, 'r') as fp:
contents = fp.read().split('\n')
contents = [re.sub(r'^__version__ = .*$', "__version__ = '%s'" % version, line)
for line in contents]
with open(version_file, 'w') as fp:
fp.write('\n'.join(contents))
cmdclass['patch_version'] = PatchVersion
# }}}
# `bdist_app` and `bdist_dmg` commands. {{{
class BinaryDistApp(Command):
description = 'create an application bundle for Mac'
user_options = []
extra_args = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
cmd = ['bash', 'osx/make_app.sh', self.bdist_wheel()]
if self.verbose:
print('running', ' '.join(cmd))
subprocess.check_call(cmd)
class BinaryDistDmg(Command):
user_options = []
extra_args = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self.run_command('bdist_app')
# Encode targeted macOS plaftorm in the filename.
from wheel.bdist_wheel import get_platform
platform = get_platform('dist/Plover.app')
args = '{out!r}, {name!r}, {settings!r}, lookForHiDPI=True'.format(
out='dist/%s-%s.dmg' % (PACKAGE, platform),
name=__software_name__.capitalize(),
settings='osx/dmg_resources/settings.py',
)
if self.verbose:
print('running dmgbuild(%s)' % args)
script = "__import__('dmgbuild').build_dmg(" + args + ')'
subprocess.check_call((sys.executable, '-u', '-c', script))
if sys.platform.startswith('darwin'):
cmdclass['bdist_app'] = BinaryDistApp
cmdclass['bdist_dmg'] = BinaryDistDmg
# }}}
# `bdist_appimage` command. {{{
class BinaryDistAppImage(Command):
description = 'create AppImage distribution for Linux'
user_options = [
('docker', None,
'use docker to run the build script'),
('no-update-tools', None,
'don\'t try to update AppImage tools, only fetch missing ones'),
]
boolean_options = ['docker', 'no-update-tools']
def initialize_options(self):
self.docker = False
self.no_update_tools = False
def finalize_options(self):
pass
def run(self):
cmd = ['./linux/appimage/build.sh']
if self.docker:
cmd.append('--docker')
else:
cmd.extend(('--python', sys.executable))
if self.no_update_tools:
cmd.append('--no-update-tools')
cmd.extend(('--wheel', self.bdist_wheel()))
if self.verbose:
print('running', ' '.join(cmd))
subprocess.check_call(cmd)
if sys.platform.startswith('linux'):
cmdclass['bdist_appimage'] = BinaryDistAppImage
# }}}
# i18n support. {{{
options.update(babel_options(__software_name__))
BuildPy.build_dependencies.append('compile_catalog')
BuildUi.hooks.append('plover_build_utils.pyqt:gettext')
# }}}
def reqs(name):
with open(os.path.join('reqs', name + '.txt')) as fp:
return fp.read()
setup(
name=__software_name__,
version=__version__,
description=__description__,
url=__url__,
download_url=__download_url__,
license=__license__,
options=options,
cmdclass=cmdclass,
install_requires=reqs('dist'),
extras_require={
'gui_qt': reqs('dist_extra_gui_qt'),
'log': reqs('dist_extra_log'),
},
tests_require=reqs('test'),
)
# vim: foldmethod=marker
| 7,731
|
Python
|
.py
| 226
| 27.442478
| 101
| 0.606048
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,641
|
conftest.py
|
openstenoproject_plover/test/conftest.py
|
import os
import pytest
# Ensure i18n support does not mess us up.
os.environ['LANGUAGE'] = 'C'
from plover import system
from plover.config import DEFAULT_SYSTEM_NAME
from plover.registry import registry
@pytest.fixture(scope='session', autouse=True)
def setup_plover():
registry.update()
system.setup(DEFAULT_SYSTEM_NAME)
pytest.register_assert_rewrite('plover_build_utils.testing')
def pytest_collection_modifyitems(items):
for item in items:
# Mark `gui_qt` tests.
if 'gui_qt' in item.location[0].split(os.path.sep):
item.add_marker(pytest.mark.gui_qt)
| 604
|
Python
|
.py
| 17
| 31.764706
| 60
| 0.744828
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,642
|
test_machine.py
|
openstenoproject_plover/test/test_machine.py
|
from unittest.mock import Mock
from plover.machine.base import ThreadedStenotypeBase
class MyMachine(ThreadedStenotypeBase):
def run(self):
raise "some unexpected error"
def test_update_machine_staten_on_unhandled_exception():
machine = MyMachine()
callback = Mock()
machine.add_state_callback(callback)
machine.start_capture()
machine.join()
callback.assert_called_with('disconnected')
| 425
|
Python
|
.py
| 12
| 31.25
| 56
| 0.76399
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,643
|
test_blackbox.py
|
openstenoproject_plover/test/test_blackbox.py
|
# -*- coding: utf-8 -*-
import pytest
from plover import system
from plover.registry import Registry
from plover.system import english_stenotype
from plover_build_utils.testing import blackbox_test
@pytest.fixture
def with_melani_system(monkeypatch, request):
class Melani:
KEYS = (
'#',
'S-', 'P-', 'C-', 'T-', 'H-', 'V-', 'R-',
'I-', 'A-',
'-E', '-O',
'-c', '-s', '-t', '-h', '-p', '-r',
'*',
'-i', '-e', '-a', '-o',
)
IMPLICIT_HYPHEN_KEYS = KEYS
SUFFIX_KEYS = ()
NUMBER_KEY = '#'
NUMBERS = {
'S-': '1-',
'P-': '2-',
'T-': '3-',
'V-': '4-',
'I-': '5-',
'-O': '0-',
'-c': '-6',
'-t': '-7',
'-p': '-8',
'-i': '-9',
}
UNDO_STROKE_STENO = '*'
ORTHOGRAPHY_RULES = []
ORTHOGRAPHY_RULES_ALIASES = {}
ORTHOGRAPHY_WORDLIST = None
KEYMAPS = {}
DICTIONARIES_ROOT = None
DEFAULT_DICTIONARIES = ()
registry = Registry()
registry.register_plugin('system', 'English Stenotype', english_stenotype)
registry.register_plugin('system', 'Melani', Melani)
old_system_name = system.NAME
with monkeypatch.context() as mp:
mp.setattr('plover.system.registry', registry)
yield
system.setup(old_system_name)
@pytest.fixture
def with_korean_system(monkeypatch, request):
class KoreanCAS:
KEYS = (
'1-', '2-', '3-', '4-', '5-',
'ㅎ-', 'ㅁ-', 'ㄱ-', 'ㅈ-', 'ㄴ-',
'ㄷ-', 'ㅇ-', 'ㅅ-', 'ㅂ-', 'ㄹ-',
'ㅗ-', 'ㅏ-', 'ㅜ-',
'-*', '-ㅓ', '-ㅣ',
'-6', '-7', '-8', '-9', '-0',
'-ㅎ', '-ㅇ', '-ㄹ', '-ㄱ', '-ㄷ',
'-ㅂ', '-ㄴ', '-ㅅ', '-ㅈ', '-ㅁ',
)
IMPLICIT_HYPHEN_KEYS = (
'ㅗ-', 'ㅏ-', 'ㅜ-',
'-*', '-ㅓ', '-ㅣ',
)
SUFFIX_KEYS = ()
NUMBER_KEY = None
NUMBERS = {}
UNDO_STROKE_STENO = '-ㅂㄴ'
ORTHOGRAPHY_RULES = []
ORTHOGRAPHY_RULES_ALIASES = {}
ORTHOGRAPHY_WORDLIST = None
KEYMAPS = {}
DICTIONARIES_ROOT = None
DEFAULT_DICTIONARIES = ()
registry = Registry()
registry.register_plugin('system', 'English Stenotype', english_stenotype)
registry.register_plugin('system', 'Korean Modern C', KoreanCAS)
old_system_name = system.NAME
with monkeypatch.context() as mp:
mp.setattr('plover.system.registry', registry)
yield
system.setup(old_system_name)
@blackbox_test
class TestsBlackbox:
def test_translator_state_handling(self):
# Check if the translator curtailing the list of last translations
# according to its dictionary longest key does no affect things
# like the restrospective repeat-last-stroke command.
r'''
"TEFT": "test",
"R*S": "{*+}",
TEFT/R*S " test test"
'''
def test_force_lowercase_title(self):
r'''
"T-LT": "{MODE:title}",
"TEFT": "{>}test",
T-LT/TEFT " test"
'''
def test_bug471(self):
# Repeat-last-stroke after typing two numbers outputs the numbers
# reversed for some combos.
r'''
"R*S": "{*+}",
12/R*S " 1212"
'''
def test_bug535(self):
# Currency formatting a number with a decimal fails by not erasing
# the previous output.
r'''
"P-P": "{^.^}",
"KR*UR": "{*($c)}",
1/P-P/2/KR*UR " $1.20"
'''
def test_bug606(self):
r'''
"KWEGS": "question",
"-S": "{^s}",
"TP-PL": "{.}",
:spaces_after
KWEGS/-S/TP-PL "questions. "
'''
def test_bug535_spaces_after(self):
# Currency formatting a number with a decimal fails by not erasing
# the previous output.
r'''
"P-P": "{^.^}",
"KR*UR": "{*($c)}",
:spaces_after
1/P-P/2/KR*UR "$1.20 "
'''
def test_undo_not_defined(self):
r'''
'TEFT': 'test',
TEFT ' test'
* ''
'''
def test_undo_overriden(self):
r'''
'TEFT': 'test',
'*': 'not undo',
TEFT ' test'
* ' test not undo'
'''
def test_undo_macro(self):
r'''
'TEFT': 'test',
'TPH-D': '=undo',
TEFT ' test'
TPH-D ''
'''
def test_undo_fingerspelling_1(self):
r'''
'T*': '{>}{&t}',
'*E': '{>}{&e}',
'S*': '{>}{&s}',
'T*': '{>}{&t}',
T*/*E/S*/T* ' test'
*/* ' te'
*/* ''
'''
def test_undo_fingerspelling_2(self):
r'''
'T*': '{>}{&t}',
'*E': '{>}{&e}',
'S*': '{>}{&s}',
'T*': '{>}{&t}',
:spaces_after
T*/*E/S*/T* 'test '
*/* 'te '
*/* ''
'''
def test_undo_replaced_1(self):
r'''
"TEFT": "test",
"TEFT/TKPWOEFT": "{}",
TEFT ' test'
TKPWOEFT ''
* ' test'
'''
def test_undo_replaced_2(self):
r'''
"HROS": "loss",
"HRO*S": "lost",
"*P": "{*}",
HROS ' loss'
*P ' lost'
* ''
'''
def test_undo_replaced_3(self):
r'''
'PER': 'perfect',
'SWAEUGS': 'situation',
'PER/SWAEUGS': 'persuasion',
'SP*': '{*?}',
PER ' perfect'
SWAEUGS ' persuasion'
SP* ' perfect situation'
* ' persuasion'
'''
def test_bug557(self):
# Using the asterisk key to delete letters in fingerspelled words
# occasionally causes problems when the space placement is set to
# "After Output".
r'''
"EU": "I",
"HRAOEUBG": "like",
"T*": "{>}{&t}",
"A*": "{>}{&a}",
"KR*": "{>}{&c}",
"O*": "{>}{&o}",
"S*": "{>}{&s}",
:spaces_after
EU/HRAOEUBG/T*/A*/KR*/O*/S*/*/*/* "I like ta "
'''
def test_bug557_resumed(self):
# Using the asterisk key to delete letters in fingerspelled words
# occasionally causes problems when the space placement is set to
# "After Output".
r'''
"EU": "I",
"HRAOEUBG": "like",
"T*": "{>}{&t}",
"A*": "{>}{&a}",
"KR*": "{>}{&c}",
"O*": "{>}{&o}",
"S*": "{>}{&s}",
:spaces_after
EU/HRAOEUBG/T*/A*/KR*/O*/S*/*/*/*/*/*/HRAOEUBG "I like like "
'''
def test_bug557_capitalized(self):
# Using the asterisk key to delete letters in fingerspelled words
# occasionally causes problems when the space placement is set to
# "After Output".
r'''
"EU": "I",
"HRAOEUBG": "like",
"T*": "{-|}{&t}",
"A*": "{-|}{&a}",
"KR*": "{-|}{&c}",
"O*": "{-|}{&o}",
"S*": "{-|}{&s}",
:spaces_after
EU/HRAOEUBG/T*/A*/KR*/O*/S*/*/*/* "I like TA "
'''
def test_capitalized_fingerspelling_spaces_after(self):
r'''
"HRAOEUBG": "like",
"T*": "{&T}",
"A*": "{&A}",
"KR*": "{&C}",
"O*": "{&O}",
"S*": "{&S}",
:spaces_after
HRAOEUBG/T*/A*/KR*/O*/S* "like TACOS "
'''
def test_special_characters(self):
r'''
"R-R": "{^}\n{^}",
"TAB": "\t",
R-R/TAB "\n\t"
'''
def test_automatic_suffix_keys_1(self):
r'''
"RAEUS": "race",
"RAEUZ": "raise",
"-S": "{^s}",
"-Z": "{^s}",
RAEUSZ " races"
'''
def test_bug719(self):
# Glue (&) does not work with "Spaces After".
r'''
"P*": "{&P}"
:spaces_after
P*/P*/P*/P*/P*/P* 'PPPPPP '
'''
def test_bug741(self):
# Uppercase last word also uppercases next word's prefix.
r'''
"KPA*TS": "{*<}",
"TPAO": "foo",
"KAUPB": "{con^}",
TPAO/KPA*TS/KAUPB/TPAO " FOO confoo"
'''
def test_carry_capitalization_spacing1(self):
r'''
'S-P': '{^ ^}',
'R-R': '{^~|\n^}',
S-P/R-R ' \n'
'''
def test_carry_capitalization_spacing2(self):
r'''
'S-P': '{^ ^}',
'R-R': '{^~|\n^}',
:spaces_after
S-P/R-R ' \n'
'''
def test_orthography1(self):
r'''
'TEFT': 'test',
'-G': '{^ing}',
TEFT/-G ' testing'
'''
def test_orthography2(self):
r'''
'SKEL': 'cancel',
'-G': '{^ing}',
SKEL/-G " canceling"
'''
def test_orthography3(self):
r'''
'PREPB': '{(^}',
'SKEL': 'cancel',
'-G': '{^ing}',
PREPB/SKEL/-G " (canceling"
'''
def test_orthography4(self):
r'''
'SKEL': '{&c}{&a}{&n}{&c}{&e}{&l}',
'-G': '{^ing}',
SKEL/-G " canceling"
'''
def test_orthography5(self):
r'''
'TPAOEUPL': 'time',
'-G': '{^ing}',
TPAOEUPL/-G ' timing'
'''
def test_orthography6(self):
r'''
'PRE': '{prefix-^}',
'SKEL': 'cancel',
'-G': '{^ing}',
PRE/SKEL/-G " prefix-canceling"
'''
def test_orthography7(self):
r'''
'PRE': '{prefix^}',
'SKEL': 'cancel',
'-G': '{^ing}',
PRE/SKEL/-G " prefixcanceling"
'''
def test_orthography8(self):
r'''
'PRE': '{&p}{&r}{&e}{&f}{&i}{&x}',
"TK-LS": "{^}",
'SKEL': 'cancel',
'-G': '{^ing}',
PRE/TK-LS/SKEL/-G " prefixcanceling"
'''
def test_orthography9(self):
r'''
'PRE': '{prefix^}',
'SKEL': '{&c}{&a}{&n}{&c}{&e}{&l}',
'-G': '{^ing}',
PRE/SKEL/-G " prefixcanceling"
'''
def test_orthography10(self):
r'''
'PHO*D': "{MODE:CAMEL}",
'PRE': '{prefix^}',
'SKEL': 'cancel',
'-G': '{^ing}',
PHO*D/PRE/SKEL/-G "prefixcanceling"
'''
def test_orthography11(self):
r'''
'PHO*D': "{MODE:SNAKE}",
'TEFT': 'test',
'SKEL': 'cancel',
'-G': '{^ing}',
PHO*D/TEFT "_test"
SKEL/-G "_test_canceling"
'''
def test_orthography12(self):
r'''
'PHO*D': "{MODE:CAMEL}",
'TEFT': 'test',
'SKEL': 'cancel',
'-G': '{^ing}',
PHO*D/TEFT "test"
SKEL/-G "testCanceling"
'''
def test_orthography13(self):
r'''
'PHO*D': "{MODE:SNAKE}",
'PRE': '{prefix^}',
'TEFT': 'test',
'SKEL': 'cancel',
'-G': '{^ing}',
PHO*D/TEFT "_test"
PRE/SKEL/-G "_test_prefixcanceling"
'''
def test_orthography14(self):
r'''
'PHO*D': "{MODE:CAMEL}",
'PRE': '{prefix^}',
'TEFT': 'test',
'SKEL': 'cancel',
'-G': '{^ing}',
PHO*D/TEFT "test"
PRE/SKEL/-G "testPrefixcanceling"
'''
def test_orthography15(self):
r'''
'PHO*D': "{MODE:CAPS}",
'SKEL': 'cancel',
'-G': '{^ing}',
PHO*D/SKEL/-G " CANCELING"
'''
def test_orthography16(self):
r'''
'SKEL': '{&C}{&A}{&N}{&C}{&E}{&L}',
'-G': '{^ing}',
SKEL/-G " CANCELLing"
'''
def test_orthography17(self):
r'''
'SKEL': 'CANCEL',
'-G': '{^ing}',
SKEL/-G " CANCELLing"
'''
def test_orthography18(self):
r'''
'TPAOEUPL': 'TIME',
'-G': '{^ing}',
TPAOEUPL/-G ' TIMing'
'''
def test_orthography19(self):
r'''
'TPAOEUPL': '{&T}{&I}{&M}{&E}',
'-G': '{^ing}',
TPAOEUPL/-G ' TIMing'
'''
def test_after_initial_being(self):
r'''
'-B': 'be',
'-B/-G': 'being',
:spaces_after
-B/-G 'being '
'''
def test_after_period(self):
r'''
'TEFT': 'test',
'P-P': '{.}',
:spaces_after
TEFT/P-P/TEFT 'test. Test '
'''
def test_spaces_after1(self):
r'''
'TEFT': 'test',
'TEFTD': 'tested',
'-G': '{^ing}',
'P-P': '{.}',
:spaces_after
TEFT 'test '
-G 'testing '
P-P 'testing. '
TEFTD 'testing. Tested '
'''
def test_start_attached1(self):
r'''
'TEFT': 'test',
:start_attached
TEFT 'test'
'''
def test_start_attached2(self):
r'''
'TEFT': 'test',
:start_attached
:spaces_after
TEFT 'test '
'''
def test_space_placement_change1(self):
r'''
'TEFT': 'test',
'TEFTD': 'tested',
'TEFTS': 'tests',
:spaces_before
TEFT/TEFTD ' test tested'
:spaces_after
TEFTS/TEFT ' test tested tests test '
'''
def test_space_placement_change2(self):
r'''
'TEFT': 'test',
'TEFTD': 'tested',
'TEFTS': 'tests',
:spaces_after
TEFT/TEFTD 'test tested '
:spaces_before
TEFTS/TEFT 'test tested tests test'
'''
def test_undo_after_space_placement_change1(self):
r'''
'TEFT': 'test',
'TEFTD': 'tested',
:spaces_before
TEFT/TEFTD ' test tested'
:spaces_after
* ' test '
* ''
'''
def test_undo_after_space_placement_change2(self):
r'''
'TEFT': 'test',
'TEFTD': 'tested',
:spaces_after
TEFT/TEFTD 'test tested '
:spaces_before
* 'test'
* ''
'''
def test_undo_after_space_placement_change3(self):
r'''
'KPA': '{}{-|}',
'TEFT': 'test',
'TEFTD': 'tested',
:spaces_before
KPA/TEFT/TEFTD ' Test tested'
:spaces_after
* ' Test '
* ''
'''
def test_undo_after_space_placement_change4(self):
r'''
'KPA': '{}{-|}',
'TEFT': 'test',
'TEFTD': 'tested',
:spaces_after
KPA/TEFT/TEFTD ' Test tested '
:spaces_before
* ' Test'
* ''
'''
def test_undo_with_space_placement_changes(self):
r'''
'TEFT': 'test',
TEFT/TEFT/TEFT ' test test test'
:spaces_after
* ' test test '
:spaces_before
* ' test'
:spaces_after
* ''
'''
def test_carry_capitalization1(self):
r'''
'KA*US': "{~|'^}cause",
'TEFT': 'test',
'P-P': '{.}',
P-P/KA*US/TEFT ". 'Cause test"
'''
def test_carry_capitalization2(self):
r'''
"KR-GS": "{^~|\"}",
KR-GS '"'
'''
def test_carry_capitalization3(self):
r'''
'TP*U': '{<}',
"TK-LS": "{^}",
'TEFT': 'test',
'S-FBGS': '{^suffix}',
TP*U/TEFT/S-FBGS/TK-LS/S-FBGS ' TESTSUFFIXSUFFIX'
'''
def test_carry_capitalization4(self):
r'''
'TP*U': '{<}',
"TK-LS": "{^}",
'TEFT': 'test',
"S-P": "{^ ^}"
TP*U/TEFT/S-P/TEFT ' TEST test'
'''
def test_carry_capitalization5(self):
r'''
'TP*U': '{<}',
"TK-LS": "{^}",
'TEFT': 'test',
'S-FBGS': '{^suffix}',
"S-P": "{^ ^}"
TP*U/TEFT/S-FBGS/S-P/S-FBGS ' TESTSUFFIX suffix'
'''
def test_capitalize1(self):
r'''
'KPA': '{}{-|}',
'TEFT': 'test',
KPA/TEFT ' Test'
'''
def test_capitalize2(self):
r'''
'KPA': '{}{-|}',
'TEFT': 'test',
:start_attached
KPA/TEFT ' Test'
'''
def test_capitalize3(self):
r'''
'KPA': '{}{-|}',
'TEFT': 'test',
:spaces_after
KPA/TEFT ' Test '
'''
def test_capitalize4(self):
r'''
'KPA': '{}{-|}',
'TEFT': 'test',
:start_attached
:spaces_after
KPA/TEFT ' Test '
'''
def test_retro_capitalize1(self):
r'''
'KWEUP': 'equip',
'RUP': '{*-|}',
KWEUP/RUP ' Equip'
'''
def test_retro_capitalize2(self):
r'''
'KWEUP': 'equip',
'RUP': '{*-|}',
:spaces_after
KWEUP/RUP 'Equip '
'''
def test_retro_capitalize3(self):
r'''
'TEFT': 'tèśtîñg',
'RUP': '{*-|}',
TEFT/RUP ' Tèśtîñg'
'''
def test_retro_capitalize4(self):
r'''
'PRE': '{pre^}',
'TPEUBG': 'fix',
'RUP': '{*-|}',
PRE/TPEUBG/RUP ' Prefix'
'''
def test_retro_capitalize5(self):
r'''
'KWEUP': 'equip',
'RUP': '{:retro_case:cap_first_word}',
KWEUP/RUP ' Equip'
'''
def test_retro_currency1(self):
r'''
'TPHAPB': 'notanumber',
'R-BG': '{*($c)}',
TPHAPB/R-BG ' notanumber'
'''
def test_retro_currency2(self):
r'''
'TPHAPB': 'notanumber',
'R-BG': '{*($c)}',
:spaces_after
TPHAPB/R-BG 'notanumber '
'''
def test_retro_currency3(self):
r'''
'R-BG': '{*($c)}',
0/R-BG ' $0'
'''
def test_retro_currency4(self):
r'''
'R-BG': '{*($c)}',
:spaces_after
0/R-BG '$0 '
'''
def test_retro_currency5(self):
r'''
'R-BG': '{:retro_currency:$c}',
0/R-BG ' $0'
'''
def test_retro_currency6(self):
r'''
'R-BG': '{:retro_currency:$c}',
'THO*U': '{^},000'
23/THO*U/R-BG ' $23,000'
'''
def test_retro_currency7(self):
r'''
'R-BG': '{:retro_currency:$c}',
'P-P': '{^}.{^}',
'THO*U': '{^},000'
23/THO*U/P-P/15/R-BG ' $23,000.15'
'''
def test_retro_currency8(self):
r'''
'R-BG': '{:retro_currency:$c}',
'P-P': '{^}.{^}',
'TPR*UPBT': '{^},500,000'
4/3/1/TPR*UPBT/P-P/69/R-BG ' $431,500,000.69'
'''
def test_retro_upper1(self):
r'''
'TEFT': 'test',
'-G': '{^ing}',
'PRE': '{pre^}',
'R*U': '{*<}',
TEFT/-G/R*U/PRE " TESTING pre"
'''
def test_retro_upper2(self):
r'''
'TEFT': 'test',
'-G': '{^ing}',
'PRE': '{pre^}',
'R*U': '{*<}',
TEFT/R*U/-G/PRE " TESTing pre"
'''
def test_retro_upper3(self):
r'''
'PRE': '{prefix^}',
'WORD': 'word',
'R*U': '{*<}',
PRE/WORD/R*U " PREFIXWORD"
'''
def test_retro_upper4(self):
r'''
'S-G': 'something',
'R*U': '{*<}',
S-G/S-G/R*U/R*U " something SOMETHING"
'''
def test_retro_upper5(self):
r'''
'TEFT': 'tèśtîñg',
'RUP': '{*<}',
TEFT/RUP ' TÈŚTÎÑG'
'''
def test_retro_upper6(self):
r'''
'ST': 'it is',
'RUP': '{*<}',
ST/RUP " it IS"
'''
def test_retro_upper7(self):
r'''
'TEFT': 'test',
"W-G": "{^ing with}",
'RUP': '{*<}',
TEFT/RUP/W-G " TESTing with"
'''
def test_retro_upper8(self):
r'''
'TEFT': 'test',
"W-G": "{^ing with}",
'RUP': '{*<}',
TEFT/W-G/RUP " testing WITH"
'''
def test_retro_upper9(self):
r'''
'TEFT': 'test',
"W-G": "{^ing with}",
'RUP': '{*<}',
TEFT/RUP/W-G/W-G " TESTing withing with"
'''
def test_retro_upper10(self):
r'''
'TEFT': 'test',
"W": "with",
'RUP': '{*<}',
TEFT/RUP/W/W " TEST with with"
'''
def test_retro_upper11(self):
r'''
"PREPB": "{~|(^}",
'TEFT': 'test',
'RUP': '{*<}',
PREPB/TEFT/RUP " (TEST"
'''
def test_retro_upper12(self):
r'''
'PRE': '{pre^}',
"PREPB": "{~|(^}",
'TEFT': 'test',
'RUP': '{*<}',
PRE/PREPB/TEFT/RUP " pre(TEST"
'''
def test_retro_upper13(self):
r'''
"PEUD": "pid",
"TPAOEUL": "file",
"H*PB": "{^-^}",
'RUP': '{*<}',
PEUD/RUP/H*PB/TPAOEUL ' PID-file'
'''
def test_retro_upper14(self):
r'''
"PEUD": "pid",
"TPAOEUL": "file",
"H*PB": "{^-^}",
'RUP': '{*<}',
PEUD/H*PB/TPAOEUL/RUP ' PID-FILE'
'''
def test_retro_upper15(self):
r'''
"OEU": "{^/^}",
"T*": "{>}{&t}",
"A*": "{>}{&a}",
"KR*": "{>}{&c}",
"O*": "{>}{&o}",
"S*": "{>}{&s}",
'RUP': '{*<}',
T*/A*/OEU/KR*/O*/S*/RUP ' ta/COS'
'''
def test_retro_upper16(self):
r'''
"S*": "{>}{&s}",
"T*": "{>}{&t}",
"O*": "{>}{&o}",
"STA*R": "{^*^}",
"*E": "{>}{&e}",
"*U": "{>}{&u}",
"P*": "{>}{&p}",
"PW*": "{>}{&b}",
'RUP': '{*<}',
S*/T*/O*/STA*R/*E/*U/P*/PW*/RUP ' sto*EUPB'
'''
def test_retro_upper17(self):
r'''
"*U": "{>}{&u}",
"S*": "{>}{&s}",
"A*": "{>}{&a}",
"P-P": "{^.^}",
'RUP': '{*<}',
*U/P-P/S*/P-P/A*/P-P/RUP ' u.s.a.'
'''
def test_retro_upper18(self):
r'''
"TPAO": "foo",
"KPATS": "{*-|}",
"AES": "{^'s}",
TPAO/AES/KPATS " Foo's"
'''
def test_retro_upper19(self):
r'''
"TPAO": "foo",
"KPATS": "{*<}",
TPAO " foo"
KPATS " FOO"
* " foo"
'''
def test_retro_upper20(self):
r'''
"TPAO": "foo",
"KPATS": "{*<}",
:spaces_after
TPAO "foo "
KPATS "FOO "
* "foo "
'''
def test_retro_upper21(self):
r'''
'TEFT': 'test',
'RUP': '{:retro_case:upper_FIRST_word}',
TEFT/RUP " TEST"
'''
def test_lower_first_char_1(self):
r'''
'TEFT': '{:CASE:Lower_First_Char}TEST',
TEFT ' tEST'
'''
def test_upper1(self):
r'''
'TP*U': '{<}',
'TEFT': 'test',
'-G': '{^ing}',
'PRE': '{pre^}',
TP*U/TEFT/-G/PRE " TESTING pre"
'''
def test_upper2(self):
r'''
'TP*U': '{<}',
'TEFT': 'test',
'-G': '{^ing}',
TP*U/TEFT/-G/TEFT " TESTING test"
'''
def test_upper3(self):
r'''
'TP*U': '{<}',
'ST': 'it is',
TP*U/ST " IT is"
'''
def test_upper4(self):
r'''
'TP*U': '{<}',
'ST': 'it{ }is',
TP*U/ST " IT is"
'''
def test_upper5(self):
r'''
'TP*U': '{<}',
'TEFT': 'test',
"W-G": "{^ing with}",
TP*U/TEFT/W-G " TESTING with"
'''
def test_upper6(self):
r'''
'TP*U': '{<}',
'P-': '{foo^}',
'-S': '{^bar}',
TP*U/P-/-S " FOOBAR"
'''
def test_upper7(self):
r'''
'TP*U': '{<}',
'PRE': '{pre^}',
'TEFT': 'test',
'-G': '{^ing}',
TP*U/PRE/TEFT/-G " PRETESTING"
'''
def test_upper8(self):
r'''
'TP*U': '{<}',
'TEFT': 'test',
"W-G": "{^ing with}",
TP*U/TEFT/W-G/W-G " TESTING withing with"
'''
def test_upper9(self):
r'''
'TP*U': '{<}',
'TEFT': 'test',
"W": "with",
TP*U/TEFT/W/W " TEST with with"
'''
def test_upper10(self):
r'''
'TEFT': '{:case:upper_first_word}test',
'-G': '{^ing}',
TEFT/-G ' TESTING'
'''
def test_attach_glue_and_carry_capitalize(self):
r'''
'PH*': '{&m}',
'KW-GS': '{~|"}',
PH*/KW-GS ' m "'
'''
def test_fingerspelling_retro_meta1(self):
r'''
'K': '{&c}',
'A': '{&a}',
'T': '{&t}',
'UP': '{*<}',
K/A/T ' cat'
UP ' CAT'
* ' cat'
'''
def test_fingerspelling_retro_meta2(self):
r'''
'TPH': '{>}{&n}',
'O': '{>}{&o}',
'UP': '{*<}',
TPH/O/UP ' NO'
'''
def test_fingerspelling_retro_meta3(self):
r'''
'TEFT': '{>}{&n}{>}{&o}{*<}',
TEFT ' NO'
'''
def test_word_1(self):
r'''
"KA*PS": "{MODE:CAPS}",
"TEFT": "test",
"-G": "{^ing}",
'RUP': '{*-|}',
KA*PS/TEFT/-G ' TESTING'
RUP ' TESTING'
'''
def test_word_2(self):
r'''
"KA*PS": "{MODE:CAPS}",
"R*FT": "{MODE:RESET}",
"TEFT": "test",
"-G": "{^ing}",
'RUL': '{*>}',
KA*PS/TEFT/-G ' TESTING'
R*FT/RUL ' tESTING'
'''
def test_cat_burger_1(self):
r'''
"KAT": "cat",
"O*PB": "{^on}",
"PWURG": "{*-|}{^burg}",
"*ER": "{^er}",
"PWURG/*ER": "burger",
KAT/O*PB/PWURG ' Cattonburg'
*ER ' catton burger'
'''
def test_cat_burger_2(self):
r'''
"KAT": "cat",
"O*PB": "{^on}",
"PWURG": "{*-|}{^burg}",
"*ER": "{^er}",
KAT/O*PB/PWURG ' Cattonburg'
*ER ' Cattonburger'
'''
def test_mirrored_capitalize(self):
r'''
'KPA': '{}{-|}',
'TEFT': '{~|na^}{~|no^}{~|wri^}mo'
KPA/TEFT ' NaNoWriMo'
'''
def test_mode_1a(self):
r'''
"PHO*D": "{MODE:CAMEL}",
"TEFT": "test",
:spaces_after
TEFT 'test '
PHO*D 'test '
TEFT 'testtest'
TEFT 'testtestTest'
'''
def test_mode_1b(self):
r'''
"PHO*D": "{MODE:CAMEL}",
"TEFT": "test",
:spaces_before
TEFT ' test'
PHO*D ' test'
TEFT ' testtest'
TEFT ' testtestTest'
'''
def test_mode_2a(self):
r'''
"PHO*D": "{MODE:CAMEL}",
"TEFT": "test",
"TKPWHRAOU": "{&g}{&l}{&u}{&e}"
:spaces_after
PHO*D ''
TEFT 'test'
TKPWHRAOU 'testGlue'
'''
def test_mode_2b(self):
r'''
"PHO*D": "{MODE:CAMEL}",
"TEFT": "test",
"TKPWHRAOU": "{&g}{&l}{&u}{&e}"
:spaces_before
PHO*D ''
TEFT 'test'
TKPWHRAOU 'testGlue'
'''
def test_mode_3a(self):
r'''
"PHO*D": "{MODE:CAMEL}",
"TEFT": "test",
"TKPWHRAOU": "{&g}{&l}{&u}{&e}"
:spaces_after
PHO*D ''
TKPWHRAOU 'glue'
TEFT 'glueTest'
'''
def test_mode_3b(self):
r'''
"PHO*D": "{MODE:CAMEL}",
"TEFT": "test",
"TKPWHRAOU": "{&g}{&l}{&u}{&e}"
:spaces_before
PHO*D ''
TKPWHRAOU 'glue'
TEFT 'glueTest'
'''
def test_mode_4(self):
r'''
"PHO*D": "{:mode:Camel}",
"TEFT": "test",
"TKPWHRAOU": "{&g}{&l}{&u}{&e}"
PHO*D ''
TKPWHRAOU 'glue'
TEFT 'glueTest'
'''
def test_fingerspelling_1a(self):
r'''
'K': '{&c}',
'A': '{&a}',
'T': '{&t}',
:spaces_after
K/A/T 'cat '
'''
def test_fingerspelling_1b(self):
r'''
'K': '{&c}',
'A': '{&a}',
'T': '{&t}',
K/A/T ' cat'
'''
def test_fingerspelling_2a(self):
r'''
'K': '{-|}{&c}',
'A': '{-|}{&a}',
'T': '{-|}{&t}',
:spaces_after
K/A/T 'CAT '
'''
def test_fingerspelling_2b(self):
r'''
'K': '{-|}{&c}',
'A': '{-|}{&a}',
'T': '{-|}{&t}',
K/A/T ' CAT'
'''
def test_numbers_1a(self):
r'''
'': ''
:spaces_after
1-9 '19 '
-79 '1979 '
* '19 '
* ''
'''
def test_numbers_1b(self):
r'''
'': ''
1-9 ' 19'
-79 ' 1979'
* ' 19'
* ''
'''
def test_raw_1a(self):
r'''
'': ''
:spaces_after
RAU 'RAU '
TEGT 'RAU TEGT '
* 'RAU '
* ''
'''
def test_raw_1b(self):
r'''
'': ''
RAU ' RAU'
TEGT ' RAU TEGT'
* ' RAU'
* ''
'''
def test_bug287(self):
r'''
"*6": "a",
"R*S": "{*+}",
*6 ' a'
R*S ' a a'
'''
def test_bug470(self):
r'''
"0EU8": "80",
"R*S": "{*+}",
0EU8 ' 80'
R*S ' 8080'
'''
def test_bug851(self):
r'''
"KUPBTS": "countries",
"R-R": "{^}{#Return}{^}{-|}",
:spaces_after
KUPBTS "countries "
R-R "countries"
'''
def test_bug849_1(self):
r'''
"-P": ".",
"*P": "{*}",
"TKAOU": "due",
"TKAO*U": "dew",
TKAOU ' due'
-P ' due .'
*P ' dew'
'''
def test_bug849_2(self):
r'''
"KPA*": "{^}{-|}",
"*P": "{*}",
"TKAOU": "due",
"TKAO*U": "dew",
KPA* ''
TKAOU 'Due'
*P 'Dew'
'''
def test_carry_upper_spacing1(self):
r'''
"TEFT": "{<}test",
"-G": "{^ing}",
"S-P": "{^ ^}",
"S-G": "something",
TEFT/-G ' TESTING'
S-P ' TESTING '
S-G ' TESTING something'
'''
def test_carry_upper_spacing2(self):
r'''
"TEFT": "{<}test",
"W-G": "{^ing with}",
"S-G": "something",
TEFT/W-G ' TESTING with'
S-G ' TESTING with something'
'''
def test_carry_upper_spacing3(self):
r'''
"TEFT": "{<}test",
"-G": "{^ing}",
"R-R": "{^\n^}",
"S-G": "something",
TEFT/-G ' TESTING'
R-R ' TESTING\n'
S-G ' TESTING\nsomething'
'''
def test_carry_upper_spacing4(self):
r'''
"TEFT": "{<}test",
"W-G": "{^ing\twith}",
TEFT/W-G ' TESTING\twith'
W-G ' TESTING\twithing\twith'
'''
def test_carry_upper_spacing5(self):
r'''
"TEFT": "{<}test",
"-G": "{^ing}",
"TA*B": "{^\t^}",
"S-G": "something",
TEFT/-G ' TESTING'
TA*B ' TESTING\t'
S-G ' TESTING\tsomething'
'''
def test_carry_upper_spacing6(self):
r'''
"TEFT": "{<}test",
"W-G": "{^ing\nwith}",
TEFT/W-G ' TESTING\nwith'
W-G ' TESTING\nwithing\nwith'
'''
def test_bug961(self):
r'''
"PHAED": "made",
"KWREUS": "{^ies}",
:spaces_after
PHAED/KWREUS 'madies '
* 'made '
'''
def test_bug966_1(self):
r'''
"-G": "{^ing}",
"KPA*L": "{<}",
"TKPWAEUPL": "game",
KPA*L/TKPWAEUPLG ' GAMING'
'''
def test_bug966_2(self):
r'''
"-G": "{^ing}",
"KPA*L": "{<}",
"TKPWAEUPL": "game",
KPA*L/TKPWAEUPL/-G/* ' GAME'
'''
def test_bug980(self):
r'''
"PUFRPB": "punch",
"HRAOEUPB": "line",
"PUFRPB/HRAOEUPB": "punchline",
"-S": "{^s}",
"AFPS": "{*?}",
PUFRPB/HRAOEUPBS ' punchlines'
AFPS ' punch lines'
'''
def test_not_a_macro_1(self):
r'''
"TEFT": "=",
TEFT ' ='
'''
def test_not_a_macro_2(self):
r'''
"TEFT": "==",
TEFT ' =='
'''
def test_not_a_macro_3(self):
r'''
"TEFT": "=>",
TEFT ' =>'
'''
def test_not_a_macro_4(self):
r'''
"TEFT": "== 0",
TEFT ' == 0'
'''
def test_not_a_macro_5(self):
r'''
"TEFT": "=not a macro",
TEFT ' =not a macro'
'''
def test_not_a_macro_6(self):
r'''
"TEFT": "=not a macro:",
TEFT ' =not a macro:'
'''
def test_bad_macro_1(self):
r'''
"TEFT": "=not_a_known_macro",
TEFT raise KeyError
'''
def test_bad_macro_2(self):
# Builtin macros not taking parameters.
r'''
"1": "=undo:param",
"2": "=repeat_last_stroke:param",
"3": "=retrospective_toggle_asterisk:param",
"4": "=retrospective_delete_space:param",
"5": "=retrospective_insert_space:param",
1 raise AssertionError
2 raise AssertionError
3 raise AssertionError
4 raise AssertionError
5 raise AssertionError
'''
def test_valid_macro_1(self):
r'''
"1": "=undo",
"2": "=undo:",
1 ""
2 ""
'''
def test_valid_macro_2(self, monkeypatch):
r'''
"TEFT": "=macro: params with spaces ",
TEFT ""
'''
def macro(translator, stroke, cmdline):
assert cmdline == ' params with spaces '
registry = Registry()
registry.register_plugin('macro', 'macro', macro)
monkeypatch.setattr('plover.translation.registry', registry)
def test_valid_macro_3(self, monkeypatch):
r'''
"TEFT": "=macro",
TEFT ""
'''
def macro(translator, stroke, cmdline):
assert cmdline == ''
registry = Registry()
registry.register_plugin('macro', 'macro', macro)
monkeypatch.setattr('plover.translation.registry', registry)
def test_meta_attach_default(self):
r'''
'TEFT': 'test',
'AT': '{:attach:attach}',
TEFT/AT/TEFT ' testattachtest'
'''
def test_meta_attach_infix(self):
r'''
'TEFT': 'test',
'AT': '{:attach:^attach^}',
TEFT/AT/TEFT ' testattachtest'
'''
def test_meta_attach_prefix(self):
r'''
'TEFT': 'test',
'AT': '{:attach:attach^}',
TEFT/AT/TEFT ' test attachtest'
'''
def test_meta_attach_suffix(self):
r'''
'TEFT': 'test',
'AT': '{:attach:^attach}',
TEFT/AT/TEFT ' testattach test'
'''
def test_prefix_strokes(self):
r'''
"/S": "{prefix^}",
"S": "{^suffix}",
"O": "{O'^}{$}",
S/S/O/S/S " prefixsuffix O'prefixsuffix"
'''
def test_melani_implicit_hyphens(self, with_melani_system):
r'''
:system Melani
"15/SE/COhro": "XV secolo",
"16/SE/COhro": "XVI secolo",
15/SE/COhro/16/SE/COhro " XV secolo XVI secolo"
'''
def test_conditionals_1(self):
r'''
"*": "=undo",
"S-": "{=(?i)t/true/false}",
"TP-": "FALSE",
"T-": "TRUE",
S- ' false'
TP- ' false FALSE'
* ' false'
T- ' true TRUE'
* ' false'
S- ' false false'
TP- ' false false FALSE'
* ' false false'
T- ' true true TRUE'
'''
def test_conditionals_2(self):
r'''
"1": "{=(?i)([8aeiouxy]|11|dei|gn|ps|s[bcdfglmnpqrtv]|z)/agli/ai}",
"2": "oc{^}chi",
"3": "dei",
"4": "sti{^}vali",
1 ' ai'
2 ' agli occhi'
1 ' agli occhi ai'
3 ' agli occhi agli dei'
1 ' agli occhi agli dei ai'
4 ' agli occhi agli dei agli stivali'
'''
def test_conditionals_3(self):
r'''
"1": "{:if_next_matches:(?i)([8aeiouxy]|11|dei|gn|ps|s[bcdfglmnpqrtv]|z)/agli/ai}",
"2": "chi",
"3": "oc{^}chi",
"4": "dei",
"5": "sti{^}vali",
:spaces_after
2 'chi '
1 'chi ai '
3 'chi agli occhi '
1 'chi agli occhi ai '
4 'chi agli occhi agli dei '
1 'chi agli occhi agli dei ai '
5 'chi agli occhi agli dei agli stivali '
'''
def test_conditionals_4(self):
r'''
"1": "{=(?i)([8aeiouxy]|11|dei|gn|ps|s[bcdfglmnpqrtv]|z)/agli/ai}",
"2": "{=(?i)([8aeiouxy]|11|dei|gn|ps|s[bcdfglmnpqrtv]|z)/cogli/coi}",
"3": "ci",
1/2/1/3 ' ai cogli ai ci'
'''
def test_conditionals_5(self):
r'''
"1": "{=(?i)([8aeiouxy]|11|dei|gn|ps|s[bcdfglmnpqrtv]|z)/agli/ai}",
"2": "{=(?i)([8aeiouxy]|11|dei|gn|ps|s[bcdfglmnpqrtv]|z)/cogli/coi}",
"3": "ci",
:spaces_after
1/2/1/3 'ai cogli ai ci '
'''
def test_conditionals_6(self):
r'''
"*": "=undo",
"S-": r'{=(?i)tr\/ue/tr\/ue/fa\\lse}',
"TP-": r'FA\LSE',
"T-": 'TR/UE',
S- r' fa\lse'
TP- r' fa\lse FA\LSE'
* r' fa\lse'
T- r' tr/ue TR/UE'
* r' fa\lse'
S- r' fa\lse fa\lse'
TP- r' fa\lse fa\lse FA\LSE'
* r' fa\lse fa\lse'
T- r' tr/ue tr/ue TR/UE'
'''
def test_bug_1448_1(self, with_korean_system):
# Translator tries to represent a previous stroke
# from a different system using the current one.
#
# This test would throw a `ValueError` exception
# (invalid keys mask) in the translator when
# trying to represent the `ㅎㅁㄱㅈㄴ-ㄴㅅㅈㅁ`
# stroke with the `English Stenotype` system.
r'''
'TEFT': 'test'
'TEFT/-G': 'testing'
:system 'Korean Modern C'
ㅎㅁㄱㅈㄴ-ㄴㅅㅈㅁ ' ㅎㅁㄱㅈㄴ-ㄴㅅㅈㅁ'
:system 'English Stenotype'
TEFT ' ㅎㅁㄱㅈㄴ-ㄴㅅㅈㅁ test'
'''
def test_bug_1448_2(self, with_korean_system):
# Translator tries to represent a previous stroke
# from a different system using the current one.
#
# This test would trigger a translator lookup for
# `#STKP/-G` because `#STKP` (English Stenotype)
# and `12345` (Korean Modern C) have the same keys
# mask.
r'''
'#STKP/-G': 'game over'
:system 'Korean Modern C'
12345 ' 12345'
:system 'English Stenotype'
-G ' 12345 -G'
'''
| 39,421
|
Python
|
.py
| 1,483
| 17.732299
| 91
| 0.403574
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,644
|
test_resource.py
|
openstenoproject_plover/test/test_resource.py
|
from pathlib import Path
import inspect
import pytest
from plover.misc import normalize_path
from plover.resource import (
resource_exists,
resource_filename,
resource_timestamp,
resource_update,
)
@pytest.mark.parametrize('resource, exists, filename', (
# Relative filename.
(Path(__file__).relative_to(Path.cwd()), True, None),
# Relative directory.
(Path(__file__).parent.relative_to(Path.cwd()), True, None),
# Absolute filename.
(__file__, True, None),
# Absolute directory.
(Path(__file__).parent, True, None),
# Missing relative path.
('test/pouet', False, None),
# Missing absolute path.
(Path.cwd() / 'test' / 'pouet', False, None),
# Asset filename.
('asset:plover:assets/user.json', True,
'plover/assets/user.json'),
# Asset directory.
('asset:plover:', True, 'plover'),
('asset:plover:assets', True, 'plover/assets'),
# Missing asset.
('asset:plover:assets/pouet.json', False,
'plover/assets/pouet.json'),
# Invalid asset: missing package and path.
('asset:', ValueError, ValueError),
# Invalid asset: missing path.
('asset:package', ValueError, ValueError),
# Invalid asset: absolute resource path.
('asset:plover:/assets/user.json', ValueError, ValueError),
))
def test_resource(resource, exists, filename):
resource = str(resource)
if inspect.isclass(exists):
exception = exists
with pytest.raises(exception):
resource_exists(resource)
with pytest.raises(exception):
resource_filename(resource)
with pytest.raises(exception):
resource_timestamp(resource)
return
assert resource_exists(resource) == exists
if filename is None:
filename = resource
assert normalize_path(resource_filename(resource)) == normalize_path(filename)
if exists:
timestamp = Path(filename).stat().st_mtime
assert resource_timestamp(resource) == timestamp
else:
with pytest.raises(FileNotFoundError):
resource_timestamp(resource)
def test_resource_update(tmp_path):
# Can't update assets.
resource = 'asset:plover:assets/pouet.json'
resource_path = Path(resource_filename(resource))
with pytest.raises(ValueError):
with resource_update(resource):
resource_path.write_bytes(b'contents')
assert not resource_path.exists()
# Don't update resource on exception (but still cleanup).
resource = (tmp_path / 'resource').resolve()
exception_str = 'Houston, we have a problem'
with pytest.raises(Exception, match=exception_str):
with resource_update(str(resource)) as tmpf:
tmpf = Path(tmpf)
tmpf.write_bytes(b'contents')
raise Exception(exception_str)
assert not resource.exists()
assert not tmpf.exists()
# Normal use.
with resource_update(str(resource)) as tmpf:
tmpf = Path(tmpf).resolve()
# Temporary filename must be different.
assert tmpf != resource
# And must be empty.
assert not tmpf.stat().st_size
# Temporary file must be created in the same
# directory as the target resource (so an
# atomic rename can be used).
assert tmpf.parent == resource.parent
# Save something.
tmpf.write_bytes(b'contents')
st = tmpf.stat()
assert resource.stat() == st
assert resource.read_bytes() == b'contents'
| 3,481
|
Python
|
.py
| 94
| 30.712766
| 82
| 0.663117
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,645
|
test_steno.py
|
openstenoproject_plover/test/test_steno.py
|
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
"""Unit tests for steno.py."""
import inspect
import pytest
from plover.steno import normalize_steno, Stroke
from plover_build_utils.testing import parametrize
NORMALIZE_TESTS = (
lambda: ('S', ('S',)),
lambda: ('S-', ('S',)),
lambda: ('-S', ('-S',)),
lambda: ('ES', ('ES',)),
lambda: ('-ES', ('ES',)),
lambda: ('TW-EPBL', ('TWEPBL',)),
lambda: ('TWEPBL', ('TWEPBL',)),
lambda: ('RR', ('R-R',)),
lambda: ('19', ('1-9',)),
lambda: ('14', ('14',)),
lambda: ('146', ('14-6',)),
lambda: ('67', ('-67',)),
lambda: ('120-7', ('1207',)),
lambda: ('6', ('-6',)),
lambda: ('9', ('-9',)),
lambda: ('5', ('5',)),
lambda: ('0', ('0',)),
lambda: ('456', ('456',)),
lambda: ('46', ('4-6',)),
lambda: ('4*6', ('4*6',)),
lambda: ('456', ('456',)),
lambda: ('S46', ('14-6',)),
lambda: ('T-EFT/-G', ('TEFT', '-G')),
lambda: ('T-EFT/G', ('TEFT', '-G')),
lambda: ('/PRE', ('', 'PRE')),
lambda: ('S--T', ('S-T',)),
# Number key.
lambda: ('#', ('#',)),
lambda: ('#S', ('1',)),
lambda: ('#A', ('5',)),
lambda: ('#0', ('0',)),
lambda: ('#6', ('-6',)),
# RTF/CRE spec allow the number key letter anywhere…
lambda: ('#WS', ('#W-S',)),
lambda: ('W#S', ('#W-S',)),
lambda: ('W-S#', ('#W-S',)),
lambda: ('S#A', ('15',)),
lambda: ('2#', ('2',)),
# Implicit hyphens.
lambda: ('SA-', ('SA',)),
lambda: ('SA-R', ('SAR',)),
lambda: ('O', ('O',)),
lambda: ('O-', ('O',)),
lambda: ('S*-R', ('S*R',)),
# Invalid.
lambda: ('SRALD/invalid', (ValueError, ('SRALD', 'invalid'))),
lambda: ('SRALD//invalid', (ValueError, ('SRALD', '', 'invalid'))),
lambda: ('S-*R', (ValueError, ('S-*R',))),
lambda: ('-O-', (ValueError, ('-O-',))),
lambda: ('-O', (ValueError, ('-O',))),
lambda: ('#WS#', (ValueError, ('#WS#',))),
)
@parametrize(NORMALIZE_TESTS)
@pytest.mark.parametrize('mode', ('strict=False', 'strict=True'))
def test_normalize_steno(mode, steno, expected):
kwargs = eval('dict(' + mode + ')')
if inspect.isclass(expected[0]):
if kwargs['strict']:
with pytest.raises(expected[0]):
normalize_steno(steno)
return
expected = expected[1]
result = normalize_steno(steno, **kwargs)
msg = 'normalize_steno(%r, %s)=%r != %r' % (steno, mode, result, expected)
assert result == expected, msg
STROKE_TESTS = (
lambda: (['S-'], (['S-'], 'S')),
lambda: (['S-', 'T-'], (['S-', 'T-'], 'ST')),
lambda: (['T-', 'S-'], (['S-', 'T-'], 'ST')),
lambda: (['-P', '-P'], (['-P'], '-P')),
lambda: (['#', 'S-', '-T'], (['#', 'S-', '-T'], '1-9')),
lambda: (['1-', '-9'], (['#', 'S-', '-T'], '1-9')),
lambda: (['-P', 'X-'], ValueError),
)
@parametrize(STROKE_TESTS)
def test_stroke(keys, expected):
if inspect.isclass(expected):
with pytest.raises(expected):
Stroke(keys)
else:
steno_keys, rtfcre = expected
stroke = Stroke(keys)
assert stroke.steno_keys == steno_keys
assert stroke.rtfcre == rtfcre
| 3,180
|
Python
|
.py
| 92
| 29.445652
| 78
| 0.478063
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,646
|
test_formatting.py
|
openstenoproject_plover/test/test_formatting.py
|
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
"""Unit tests for formatting.py."""
import inspect
import pytest
from plover import formatting
from plover.formatting import Case
from plover_build_utils.testing import CaptureOutput, parametrize
def action(**kwargs):
# Support using something like `text_and_word='stuff'`
# as a shortcut for `text='stuff', word='stuff'`.
for k, v in list(kwargs.items()):
if '_and_' in k:
del kwargs[k]
for k in k.split('_and_'):
kwargs[k] = v
return formatting._Action(**kwargs)
class MockTranslation:
def __init__(self, rtfcre=tuple(), english=None, formatting=None):
self.rtfcre = rtfcre
self.english = english
self.formatting = formatting
def __str__(self):
return str(self.__dict__)
def translation(**kwargs):
return MockTranslation(**kwargs)
STARTING_STROKE_TESTS = (
lambda:
(True, True, [], [translation(rtfcre=('S'), english='hello')], None,
([action(prev_attach=True, text='Hello', trailing_space=' ', word='hello')],),
[('s', 'Hello')]),
lambda:
(False, False, [], [translation(rtfcre=('S'), english='hello')], None,
([action(text_and_word='hello', trailing_space=' ')],),
[('s', ' hello')]),
lambda:
(True, False, [], [translation(rtfcre=('S'), english='hello')], None,
([action(text='Hello', word='hello', trailing_space=' ')],),
[('s', ' Hello')]),
lambda:
(False, True, [], [translation(rtfcre=('S'), english='hello')], None,
([action(text_and_word='hello', prev_attach=True, trailing_space=' ')],),
[('s', 'hello')]),
)
@parametrize(STARTING_STROKE_TESTS)
def test_starting_stroke(capitalized, attached, undo, do, prev,
expected_formats, expected_outputs):
output = CaptureOutput()
formatter = formatting.Formatter()
formatter.set_output(output)
formatter.start_capitalized = capitalized
formatter.start_attached = attached
formatter.format(undo, do, prev)
for i, t in enumerate(do):
assert t.formatting == expected_formats[i]
assert output.instructions == expected_outputs
FORMATTER_TESTS = (
lambda:
([translation(formatting=[action(text_and_word='hello', trailing_space=' ')])],
[],
None,
(),
[('b', 6)]),
lambda:
([],
[translation(rtfcre=('S'), english='hello')],
[translation(rtfcre=('T'), english='a', formatting=[action(text_and_word='f', trailing_space=' ')])]
,
([action(text_and_word='hello', trailing_space=' ')],),
[('s', ' hello')]),
lambda:
([], [translation(rtfcre=('S'), english='hello')], None,
([action(text_and_word='hello', trailing_space=' ')],),
[('s', ' hello')]),
lambda:
([], [translation(rtfcre=('ST-T',))], None,
([action(text_and_word='ST-T', trailing_space=' ')],),
[('s', ' ST-T')]),
lambda:
([],
[translation(rtfcre=('ST-T',))],
[translation(formatting=[action(text_and_word='hi', trailing_space=' ')])],
([action(text_and_word='ST-T', trailing_space=' ')],),
[('s', ' ST-T')]),
lambda:
([translation(formatting=[action(text_and_word='test', trailing_space=' ')])],
[translation(english='rest')],
[translation(formatting=[action(next_case=Case.CAP_FIRST_WORD, trailing_space=' ')])],
([action(text='Rest', word='rest', trailing_space=' ')],),
[('b', 4), ('s', 'Rest')]),
lambda:
([translation(formatting=[action(text_and_word='dare'),
action(prev_attach=True, text='ing', word='daring', prev_replace='e')])],
[translation(english='rest')],
[translation(formatting=[action(next_case=Case.CAP_FIRST_WORD,
trailing_space=' ')])],
([action(text='Rest', word='rest', trailing_space=' ')],),
[('b', 6), ('s', 'Rest')]),
lambda:
([translation(formatting=[action(text_and_word='drive', trailing_space=' ')])],
[translation(english='driving')],
None,
([action(text_and_word='driving', trailing_space=' ')],),
[('b', 1), ('s', 'ing')]),
lambda:
([translation(formatting=[action(text_and_word='drive', trailing_space=' ')])],
[translation(english='{#c}driving')],
None,
([action(combo='c'), action(text_and_word='driving', trailing_space=' ')],),
[('b', 6), ('c', 'c'), ('s', ' driving')]),
lambda:
([translation(formatting=[action(text_and_word='drive', trailing_space=' ')])],
[translation(english='{PLOVER:c}driving')],
None,
([action(command='c'), action(text_and_word='driving', trailing_space=' ')],),
[('b', 6), ('e', 'c'), ('s', ' driving')]),
lambda:
([],
[translation(english='{PloveR:CMD}')],
None,
([action(command='CMD')],),
[('e', 'CMD')]),
lambda:
([],
[translation(english='{:coMManD:Cmd}')],
None,
([action(command='Cmd')],),
[('e', 'Cmd')]),
lambda:
([],
[translation(rtfcre=('1',))],
None,
([action(text_and_word='1', trailing_space=' ', glue=True)],),
[('s', ' 1')]),
lambda:
([],
[translation(rtfcre=('1',))],
[translation(formatting=[action(text_and_word='hi', trailing_space=' ')])],
([action(text_and_word='1', trailing_space=' ', glue=True)],),
[('s', ' 1')]),
lambda:
([],
[translation(rtfcre=('1',))],
[translation(formatting=[action(text_and_word='hi', trailing_space=' ', glue=True)])],
([action(prev_attach=True, text='1', trailing_space=' ', word='hi1', glue=True)],),
[('s', '1')]),
lambda:
([],
[translation(rtfcre=('1-9',))],
[translation(formatting=[action(text_and_word='hi', trailing_space=' ', glue=True)])],
([action(prev_attach=True, text='19', trailing_space=' ', word='hi19', glue=True)],),
[('s', '19')]),
lambda:
([],
[translation(rtfcre=('ST-PL',))],
[translation(formatting=[action(text_and_word='hi', trailing_space=' ')])],
([action(text_and_word='ST-PL', trailing_space=' ')],),
[('s', ' ST-PL')]),
lambda:
([],
[translation(rtfcre=('ST-PL',))],
None,
([action(text_and_word='ST-PL', trailing_space=' ')],),
[('s', ' ST-PL')]),
)
@parametrize(FORMATTER_TESTS)
def test_formatter(undo, do, prev, expected_formats, expected_outputs):
output = CaptureOutput()
# Add some initial blank text so
# undoing with no previous state
# does not assert.
output.text = ' ' * 128
formatter = formatting.Formatter()
formatter.set_output(output)
formatter.format(undo, do, prev)
for i, t in enumerate(do):
assert t.formatting == expected_formats[i]
assert output.instructions == expected_outputs
def test_action():
assert action(word='test') != action(word='test', next_attach=True)
assert action(text='test') == action(text='test')
assert action(text='test', word='test').copy_state() == action(word='test')
TRANSLATION_TO_ACTIONS_TESTS = (
lambda:
('test', action(),
[action(text_and_word='test', trailing_space=' ')]),
lambda:
('{^^}', action(),
[action(prev_attach=True, text_and_word='', next_attach=True, orthography=False)]),
lambda:
('1-9', action(),
[action(text_and_word='1-9', trailing_space=' ')]),
lambda:
('32', action(),
[action(text_and_word='32', trailing_space=' ', glue=True)]),
lambda:
('', action(text_and_word='test', next_attach=True),
[action(prev_attach=True, word='test', next_attach=True)]),
lambda:
(' ', action(text_and_word='test', next_attach=True),
[action(prev_attach=True, word='test', next_attach=True)]),
lambda:
('{^} {.} hello {.} {#ALT_L(Grave)}{^ ^}', action(),
[action(prev_attach=True, text_and_word='', next_attach=True, orthography=False),
action(prev_attach=True, text_and_word='.', trailing_space=' ', next_case=Case.CAP_FIRST_WORD),
action(text='Hello', word='hello', trailing_space=' '),
action(prev_attach=True, text_and_word='.', trailing_space=' ', next_case=Case.CAP_FIRST_WORD),
action(word='.', trailing_space=' ', combo='ALT_L(Grave)', next_case=Case.CAP_FIRST_WORD),
action(prev_attach=True, text=' ', word='', next_attach=True)
]),
lambda:
('{-|}{>}{&a}{>}{&b}', action(),
[action(next_case=Case.CAP_FIRST_WORD),
action(next_case=Case.LOWER_FIRST_CHAR),
action(text_and_word='a', trailing_space=' ', glue=True),
action(next_case=Case.LOWER_FIRST_CHAR, word='a', trailing_space=' ', glue=True),
action(prev_attach=True, text='b', word='ab', trailing_space=' ', glue=True),
]),
lambda:
('{-|}{>}{&a}{>}{&b}', action(),
[action(next_case=Case.CAP_FIRST_WORD),
action(next_case=Case.LOWER_FIRST_CHAR),
action(text_and_word='a', trailing_space=' ', glue=True),
action(next_case=Case.LOWER_FIRST_CHAR, word='a', trailing_space=' ', glue=True),
action(prev_attach=True, text='b', word='ab', trailing_space=' ', glue=True),
]),
lambda:
('{-|} equip {^s}', action(),
[action(next_case=Case.CAP_FIRST_WORD),
action(text='Equip', word='equip', trailing_space=' '),
action(prev_attach=True, text='s', trailing_space=' ', word='equips'),
]),
lambda:
('{-|} equip {^ed}', action(),
[action(next_case=Case.CAP_FIRST_WORD),
action(text='Equip', word='equip', trailing_space=' '),
action(prev_attach=True, text='ped', trailing_space=' ', word='equipped'),
]),
lambda:
('{>} Equip', action(),
[action(next_case=Case.LOWER_FIRST_CHAR),
action(text='equip', word='Equip', trailing_space=' ')
]),
lambda:
('{>} equip', action(),
[action(next_case=Case.LOWER_FIRST_CHAR),
action(text_and_word='equip', trailing_space=' ')
]),
lambda:
('{<} equip', action(),
[action(next_case=Case.UPPER_FIRST_WORD),
action(text='EQUIP', word='equip', trailing_space=' ', upper_carry=True)
]),
lambda:
('{<} EQUIP', action(),
[action(next_case=Case.UPPER_FIRST_WORD),
action(text_and_word='EQUIP', trailing_space=' ', upper_carry=True)
]),
lambda:
('{<} equip {^ed}', action(),
[action(next_case=Case.UPPER_FIRST_WORD),
action(text='EQUIP', word='equip', trailing_space=' ', upper_carry=True),
action(prev_attach=True, text='PED', trailing_space=' ', word='equipped', upper_carry=True)
]),
lambda:
('equip {*-|}', action(),
[action(text_and_word='equip', trailing_space=' '),
action(prev_attach=True, text='Equip', trailing_space=' ', word_and_prev_replace='equip'),
]),
lambda:
('equip {^ed} {*-|}', action(),
[action(text_and_word='equip', trailing_space=' '),
action(prev_attach=True, text='ped', trailing_space=' ', word='equipped'),
action(prev_attach=True, text='Equipped', trailing_space=' ', word_and_prev_replace='equipped'),
]),
lambda:
('Equip {*>}', action(),
[action(text_and_word='Equip', trailing_space=' '),
action(prev_attach=True, text='equip', trailing_space=' ', word_and_prev_replace='Equip'),
]),
lambda:
('Equip {^ed} {*>}', action(),
[action(text_and_word='Equip', trailing_space=' '),
action(prev_attach=True, text='ped', trailing_space=' ', word='Equipped'),
action(prev_attach=True, text='equipped', trailing_space=' ', word_and_prev_replace='Equipped'),
]),
lambda:
('equip {*<}', action(),
[action(text_and_word='equip', trailing_space=' '),
action(prev_attach=True, text='EQUIP', trailing_space=' ', word_and_prev_replace='equip'),
]),
lambda:
('equip {^ed} {*<}', action(),
[action(text_and_word='equip', trailing_space=' '),
action(prev_attach=True, text='ped', trailing_space=' ', word='equipped'),
action(prev_attach=True, text='EQUIPPED', trailing_space=' ', word_and_prev_replace='equipped'),
]),
lambda:
('notanumber {*($c)}', action(),
[action(text_and_word='notanumber', trailing_space=' '),
action(word='notanumber', trailing_space=' '),
]),
lambda:
('0 {*($c)}', action(),
[action(text_and_word='0', trailing_space=' '),
action(prev_attach=True, text='$0', word='0', trailing_space=' ', prev_replace='0'),
]),
lambda:
('0.00 {*($c)}', action(),
[action(text_and_word='0.00', trailing_space=' '),
action(prev_attach=True, text='$0.00', word='0.00', trailing_space=' ', prev_replace='0.00'),
]),
lambda:
('1234 {*($c)}', action(),
[action(text_and_word='1234', trailing_space=' '),
action(prev_attach=True, text='$1,234', word='1,234', trailing_space=' ', prev_replace='1234'),
]),
lambda:
('1234567 {*($c)}', action(),
[action(text_and_word='1234567', trailing_space=' '),
action(prev_attach=True, text='$1,234,567', word='1,234,567', trailing_space=' ', prev_replace='1234567'),
]),
lambda:
('1234.5 {*($c)}', action(),
[action(text_and_word='1234.5', trailing_space=' '),
action(prev_attach=True, text='$1,234.50', word='1,234.50', trailing_space=' ', prev_replace='1234.5'),
]),
lambda:
('1234.56 {*($c)}', action(),
[action(text_and_word='1234.56', trailing_space=' '),
action(prev_attach=True, text='$1,234.56', word='1,234.56', trailing_space=' ', prev_replace='1234.56'),
]),
lambda:
('1234.567 {*($c)}', action(),
[action(text_and_word='1234.567', trailing_space=' '),
action(prev_attach=True, text='$1,234.57', word='1,234.57', trailing_space=' ', prev_replace='1234.567'),
]),
lambda:
('equip {^} {^ed}', action(),
[action(text_and_word='equip', trailing_space=' '),
action(prev_attach=True, text='', word='equip', next_attach=True, orthography=False),
action(prev_attach=True, text='ed', trailing_space=' ', word='equiped'),
]),
lambda:
('{prefix^} test {^ing}', action(),
[action(text_and_word='prefix', next_attach=True),
action(prev_attach=True, text_and_word='test', trailing_space=' '),
action(prev_attach=True, text='ing', trailing_space=' ', word='testing'),
]),
lambda:
('{two prefix^} test {^ing}', action(),
[action(text='two prefix', word='prefix', next_attach=True),
action(prev_attach=True, text_and_word='test', trailing_space=' '),
action(prev_attach=True, text='ing', trailing_space=' ', word='testing'),
]),
lambda:
('{-|}{^|~|^}', action(),
[action(next_case=Case.CAP_FIRST_WORD),
action(prev_attach=True, text_and_word='|~|', next_attach=True),
]),
lambda:
('{-|}{~|\'^}cause', action(),
[action(next_case=Case.CAP_FIRST_WORD),
action(text_and_word='\'', next_attach=True, next_case=Case.CAP_FIRST_WORD),
action(prev_attach=True, text='Cause', trailing_space=' ', word='cause'),
]),
lambda:
('{.}{~|\'^}cuz', action(),
[action(prev_attach=True, text_and_word='.', trailing_space=' ', next_case=Case.CAP_FIRST_WORD),
action(text_and_word='\'', next_attach=True, next_case=Case.CAP_FIRST_WORD),
action(prev_attach=True, text='Cuz', trailing_space=' ', word='cuz'),
]),
lambda:
('{.}{~|\'^}cause', action(),
[action(prev_attach=True, text_and_word='.', trailing_space=' ', next_case=Case.CAP_FIRST_WORD),
action(text_and_word='\'', next_attach=True, next_case=Case.CAP_FIRST_WORD),
action(prev_attach=True, text='Cause', trailing_space=' ', word='cause'),
]),
lambda:
('{.}{^~|\"}heyyo', action(),
[action(prev_attach=True, text_and_word='.', trailing_space=' ', next_case=Case.CAP_FIRST_WORD),
action(prev_attach=True, text_and_word='"', trailing_space=' ', next_case=Case.CAP_FIRST_WORD),
action(text='Heyyo', trailing_space=' ', word='heyyo'),
]),
lambda:
('{.}{^~|^}zshrc', action(),
[action(prev_attach=True, text_and_word='.', trailing_space=' ', next_case=Case.CAP_FIRST_WORD),
action(prev_attach=True, text_and_word='', next_attach=True, next_case=Case.CAP_FIRST_WORD),
action(prev_attach=True, text='Zshrc', trailing_space=' ', word='zshrc')]),
lambda:
('{.}', action(),
[action(prev_attach=True, text_and_word='.', trailing_space=' ', next_case=Case.CAP_FIRST_WORD)]
),
lambda:
('{,}', action(),
[action(prev_attach=True, text_and_word=',', trailing_space=' ')]
),
lambda:
('test{prefix^}', action(),
[action(text_and_word='test', trailing_space=' '),
action(text_and_word='prefix', next_attach=True),
]),
lambda:
('{prefix^}{prefix^}', action(),
[action(text_and_word='prefix', next_attach=True),
action(prev_attach=True, text='prefix', word='prefixprefix', next_attach=True),
]),
lambda:
('{prefix^}{^ing}', action(),
[action(text_and_word='prefix', next_attach=True),
action(prev_attach=True, text='ing', trailing_space=' ', word='prefixing'),
]),
lambda:
('{prefix^}cancel{^ing}', action(),
[action(text_and_word='prefix', next_attach=True),
action(prev_attach=True, text='cancel', trailing_space=' ', word='cancel'),
action(prev_attach=True, text='ing', trailing_space=' ', word='canceling'),
]),
)
@parametrize(TRANSLATION_TO_ACTIONS_TESTS)
def test_translation_to_actions(translation, last_action, expected):
ctx = formatting._Context([], action())
ctx.translated(last_action)
assert formatting._translation_to_actions(translation, ctx) == expected
RAW_TO_ACTIONS_TESTS = (
lambda:
('2-6', action(),
[action(glue=True, text_and_word='26', trailing_space=' ')]),
lambda:
('2', action(),
[action(glue=True, text_and_word='2', trailing_space=' ')]),
lambda:
('-8', action(),
[action(glue=True, text_and_word='8', trailing_space=' ')]),
lambda:
('-68', action(),
[action(glue=True, text_and_word='68', trailing_space=' ')]),
lambda:
('S-T', action(),
[action(text_and_word='S-T', trailing_space=' ')]),
)
@parametrize(RAW_TO_ACTIONS_TESTS)
def test_raw_to_actions(stroke, last_action, expected):
ctx = formatting._Context([], action())
ctx.translated(last_action)
assert formatting._raw_to_actions(stroke, ctx) == expected
ATOM_TO_ACTION_TESTS = (
lambda:
('{^ed}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text='ed', trailing_space=' ', word='tested')),
lambda:
('{^ed}', action(text_and_word='carry', trailing_space=' '),
action(prev_attach=True, text='ied', trailing_space=' ', prev_replace='y', word='carried')),
lambda:
('{^er}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text='er', trailing_space=' ', word='tester')),
lambda:
('{^er}', action(text_and_word='carry', trailing_space=' '),
action(prev_attach=True, text='ier', trailing_space=' ', prev_replace='y', word='carrier')),
lambda:
('{^ing}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text='ing', trailing_space=' ', word='testing')),
lambda:
('{^ing}', action(text_and_word='begin', trailing_space=' '),
action(prev_attach=True, text='ning', trailing_space=' ', word='beginning')),
lambda:
('{^ing}', action(text_and_word='parade', trailing_space=' '),
action(prev_attach=True, text='ing', trailing_space=' ', prev_replace='e', word='parading')),
lambda:
('{^s}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text='s', trailing_space=' ', word='tests')),
lambda:
('{,}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text_and_word=',', trailing_space=' ')),
lambda:
('{:}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text_and_word=':', trailing_space=' ')),
lambda:
('{;}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text_and_word=';', trailing_space=' ')),
lambda:
('{.}', action(prev_attach=True, word='test', trailing_space=' '),
action(prev_attach=True, text_and_word='.', trailing_space=' ', next_case=Case.CAP_FIRST_WORD)),
lambda:
('{?}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text_and_word='?', trailing_space=' ', next_case=Case.CAP_FIRST_WORD)),
lambda:
('{!}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text_and_word='!', trailing_space=' ', next_case=Case.CAP_FIRST_WORD)),
lambda:
('{-|}', action(text_and_word='test', trailing_space=' '),
action(next_case=Case.CAP_FIRST_WORD, word='test', trailing_space=' ')),
lambda:
('{>}', action(text_and_word='test', trailing_space=' '),
action(next_case=Case.LOWER_FIRST_CHAR, word='test', trailing_space=' ')),
lambda:
('{<}', action(text_and_word='test', trailing_space=' '),
action(next_case=Case.UPPER_FIRST_WORD, word='test', trailing_space=' ')),
lambda:
('{*-|}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text='Test', trailing_space=' ', word_and_prev_replace='test')),
lambda:
('{*>}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text_and_word_and_prev_replace='test',
trailing_space=' ')),
lambda:
('{*<}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text='TEST', word_and_prev_replace='test', trailing_space=' ')),
lambda:
('{PLOVER:test_command}', action(text_and_word='test', trailing_space=' '),
action(word='test', command='test_command', trailing_space=' ')),
lambda:
('{&glue_text}', action(text_and_word='test', trailing_space=' '),
action(text_and_word='glue_text', trailing_space=' ', glue=True)),
lambda:
('{&glue_text}', action(text_and_word='test', trailing_space=' ', glue=True),
action(prev_attach=True, text='glue_text', trailing_space=' ', word='testglue_text', glue=True)),
lambda:
('{&glue_text}', action(text_and_word='test', next_attach=True),
action(prev_attach=True, text='glue_text', trailing_space=' ', word='glue_text', glue=True)),
lambda:
('{^attach_text}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text='attach_text', trailing_space=' ', word='testattach_text')),
lambda:
('{^attach_text^}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text='attach_text', word='testattach_text', next_attach=True)),
lambda:
('{attach_text^}', action(text_and_word='test', trailing_space=' '),
action(text_and_word='attach_text', next_attach=True)),
lambda:
('{#ALT_L(A)}', action(text_and_word='test', trailing_space=' '),
action(combo='ALT_L(A)', trailing_space=' ', word='test')),
lambda:
('text', action(text_and_word='test', trailing_space=' '),
action(text_and_word='text', trailing_space=' ')),
lambda:
('text', action(text_and_word='test', trailing_space=' ', glue=True),
action(text_and_word='text', trailing_space=' ')),
lambda:
('text', action(text_and_word='test', next_attach=True),
action(prev_attach=True, text_and_word='text', trailing_space=' ')),
lambda:
('text', action(text_and_word='test', trailing_space=' ', next_case=Case.CAP_FIRST_WORD),
action(text='Text', trailing_space=' ', word='text')),
lambda:
('some text', action(text_and_word='test', trailing_space=' '),
action(text='some text', trailing_space=' ', word='text')),
lambda:
('some text',
action(text_and_word='test', trailing_space=' ',
case=Case.TITLE,
space_char=''),
action(text='SomeText', word='text',
case=Case.TITLE,
space_char='')),
lambda:
('some text',
action(text_and_word='test', trailing_space=' ', # This is camel case
case=Case.TITLE,
space_char='', next_case=Case.LOWER_FIRST_CHAR),
action(text='someText', word='text',
case=Case.TITLE,
space_char='')),
lambda:
('some text', action(text_and_word='test', trailing_space=' ', space_char='_'),
action(text='some_text', trailing_space='_', word='text', space_char='_')),
lambda:
('some text', action(text_and_word='test', trailing_space=' ', case=Case.UPPER),
action(text='SOME TEXT', trailing_space=' ', word='text', case=Case.UPPER)),
lambda:
('sOme TexT', action(text_and_word='test', trailing_space=' ', case=Case.LOWER),
action(text='some text', trailing_space=' ', word='TexT', case=Case.LOWER)),
lambda:
('sOme TexT', action(text_and_word='test', trailing_space=' ', case=Case.TITLE),
action(text='Some Text', trailing_space=' ', word='TexT', case=Case.TITLE)),
lambda:
('{MODE:CAPS}', action(text_and_word='test', trailing_space=' '),
action(word='test', trailing_space=' ', case=Case.UPPER)),
lambda:
('{MODE:LOWER}', action(text_and_word='test', trailing_space=' '),
action(word='test', trailing_space=' ', case=Case.LOWER)),
)
@parametrize(ATOM_TO_ACTION_TESTS)
def test_atom_to_action(atom, last_action, expected):
ctx = formatting._Context((), last_action)
ctx.translated(last_action)
assert formatting._atom_to_action(atom, ctx) == expected
CHANGE_MODE_TESTS = (
# Invalid modes.
lambda:
('', action(),
ValueError),
lambda:
('ABCD', action(),
ValueError),
# CAPS: Uppercase
lambda:
('CAPS', action(),
action(case=Case.UPPER)),
# LOWER: Lowercase
lambda:
('LOWER', action(),
action(case=Case.LOWER)),
# TITLE: Titlecase
lambda:
('TITLE', action(),
action(case=Case.TITLE)),
# CAMEL: Titlecase without space
lambda:
('CAMEL', action(),
action(case=Case.TITLE, space_char='',
next_case=Case.LOWER_FIRST_CHAR)),
# SNAKE: Underscore space
lambda:
('SNAKE', action(),
action(space_char='_')),
# RESET_SPACE: Default space
lambda:
('RESET_SPACE', action(space_char='ABCD'),
action()),
# RESET_CASE: No case
lambda:
('RESET_CASE', action(case=Case.UPPER),
action()),
# SET_SPACE:xy: Set space to xy
lambda:
('SET_SPACE:', action(space_char='test'),
action(space_char='')),
lambda:
('SET_SPACE:-', action(space_char='test'),
action(space_char='-')),
lambda:
('SET_SPACE:123 45', action(space_char='test'),
action(space_char='123 45')),
# RESET: No case, default space
)
@parametrize(CHANGE_MODE_TESTS)
def test_meta_mode(meta, last_action, expected):
atom = '{MODE:' + meta + '}'
ctx = formatting._Context((), action())
ctx.translated(last_action)
if inspect.isclass(expected):
with pytest.raises(expected):
formatting._atom_to_action(atom, ctx)
else:
assert formatting._atom_to_action(atom, ctx) == expected
last_action_normal = action()
last_action_capitalized = action(next_case=Case.CAP_FIRST_WORD)
last_action_attached = action(next_attach=True)
META_CARRY_CAPITALIZE_TESTS = (
# Test word handling and space handling, standard.
lambda:
('~|*', last_action_normal,
(action(word='*', text='*', trailing_space=' '))),
# With attach flags:
lambda:
('~|*^', last_action_normal,
(action(word='*', text='*', next_attach=True))),
lambda:
('^~|*', last_action_normal,
(action(word='*', text='*', trailing_space=' ', prev_attach=True))),
lambda:
('^~|*^', last_action_normal,
(action(word='*', text='*', prev_attach=True, next_attach=True))),
# Should 'do nothing'.
lambda:
('~|', last_action_capitalized,
(last_action_capitalized)),
# Should lose 'next_attach' flag.
lambda:
('~|', last_action_attached,
(action(prev_attach=True))),
# Verify capitalize carry.
lambda:
('^~|^', last_action_capitalized,
(action(next_case=Case.CAP_FIRST_WORD, text_and_word='', prev_attach=True, next_attach=True))),
lambda:
('^~|aset^', last_action_capitalized,
(action(next_case=Case.CAP_FIRST_WORD, prev_attach=True, next_attach=True, text='Aset', word='aset'))),
lambda:
('~|aset', last_action_capitalized,
(action(next_case=Case.CAP_FIRST_WORD, text='Aset', trailing_space=' ', word='aset'))),
# Verify 'next_attach' flag overriding.
lambda:
('~|aset', last_action_attached,
(action(prev_attach=True, text_and_word='aset', trailing_space=' '))),
lambda:
('~|aset^', last_action_attached,
(action(prev_attach=True, text_and_word='aset', next_attach=True))),
)
@parametrize(META_CARRY_CAPITALIZE_TESTS)
def test_meta_carry_capitalize(meta, last_action, expected):
ctx = formatting._Context((), action())
ctx.translated(last_action)
assert formatting._atom_to_action('{' + meta + '}', ctx) == expected
def _apply_case_tests():
test = ' some test '
test2 = 'test Me'
test3 = ' SOME TEST '
return (
# INVALID
lambda: (test, '', False, ValueError),
lambda: (test, 'TEST', False, ValueError),
# NO-OP
lambda: (test, None, False, test),
lambda: (test, None, True, test),
# TITLE
lambda: (test, Case.TITLE, False, ' Some Test '),
# TITLE will not affect appended output
lambda: (test, Case.TITLE, True, ' some test '),
lambda: (test2, Case.TITLE, True, 'test Me'),
# LOWER
lambda: (test, Case.LOWER, False, ' some test '),
lambda: (test3, Case.LOWER, False, ' some test '),
lambda: (test2, Case.LOWER, True, 'test me'),
# UPPER
lambda: (test.upper(), Case.UPPER, False, ' SOME TEST '),
lambda: (test3, Case.UPPER, False, ' SOME TEST '),
lambda: (test2, Case.UPPER, True, 'TEST ME'),
)
@parametrize(_apply_case_tests())
def test_apply_case(input_text, case, appended, expected):
if inspect.isclass(expected):
with pytest.raises(expected):
formatting.apply_mode_case(input_text, case, appended)
else:
assert formatting.apply_mode_case(input_text, case, appended) == expected
def _apply_space_char_tests():
test = ' some text '
test2 = "don't"
return (
lambda: (test, '_', '_some_text_'),
lambda: (test, '', 'sometext'),
lambda: (test2, '_', test2),
lambda: (test2, '', test2),
)
@parametrize(_apply_space_char_tests())
def test_apply_space_char(text, space_char, expected):
assert formatting.apply_mode_space_char(text, space_char) == expected
@parametrize((
lambda: ('', None),
lambda: ('{abc}', 'abc'),
lambda: ('abc', None),
))
def test_get_meta(atom, meta):
assert formatting._get_meta(atom) == meta
@parametrize((
lambda: ('abc', '{&abc}'),
lambda: ('1', '{&1}'),
))
def test_glue_translation(s, expected):
assert formatting._glue_translation(s) == expected
@parametrize((
lambda: ('', ''),
lambda: ('abc', 'abc'),
lambda: (r'\{', '{'),
lambda: (r'\}', '}'),
lambda: (r'\{abc\}}{', '{abc}}{'),
))
def test_unescape_atom(atom, text):
assert formatting._unescape_atom(atom) == text
@parametrize((
lambda: ('', ''),
lambda: ('abc', 'Abc'),
lambda: ('ABC', 'ABC'),
))
def test_capitalize_first_word(s, expected):
assert formatting.capitalize_first_word(s) == expected
RIGHTMOST_WORD_TESTS = (
lambda: ('', ''),
lambda: ('\n', ''),
lambda: ('\t', ''),
lambda: ('abc', 'abc'),
lambda: ('a word', 'word'),
lambda: ('word.', '.'),
lambda: ('word ', ''),
lambda: ('word\n', ''),
lambda: ('word\t', ''),
lambda: (' word', 'word'),
lambda: ('\nword', 'word'),
lambda: ('\tword', 'word'),
)
@parametrize(RIGHTMOST_WORD_TESTS)
def test_rightmost_word(s, expected):
assert formatting.rightmost_word(s) == expected
REPLACE_TESTS = (
# Check that 'prev_replace' does not unconditionally erase
# the previous character if it does not match.
lambda:
([
translation(english='{MODE:SET_SPACE:}'),
translation(english='foobar'),
translation(english='{^}{#Return}{^}{-|}'),
], [('s', 'foobar'), ('c', 'Return')]),
# Check 'prev_replace' correctly takes into account
# the previous translation.
lambda:
([
translation(english='test '),
translation(english='{^,}'),
], [('s', 'test '), ('b', 1), ('s', ', ')]),
# While the previous translation must be taken into account,
# any meta-command must not be fired again.
lambda:
([
translation(english='{#Return}'),
translation(english='test'),
], [('c', 'Return'), ('s', 'test ')]),
)
@parametrize(REPLACE_TESTS)
def test_replace(translations, expected_instructions):
output = CaptureOutput()
formatter = formatting.Formatter()
formatter.set_output(output)
formatter.set_space_placement('After Output')
prev = []
for t in translations:
formatter.format([], [t], prev)
prev.append(t)
assert output.instructions == expected_instructions
def test_undo_replace():
# Undoing a replace....
output = CaptureOutput()
formatter = formatting.Formatter()
formatter.set_output(output)
formatter.set_space_placement('After Output')
prev = [translation(english='test')]
formatter.format([], prev, None)
undo = translation(english='{^,}')
formatter.format([], [undo], prev)
# Undo.
formatter.format([undo], [], prev)
assert output.instructions == [
('s', 'test '), ('b', 1), ('s', ', '), ('b', 2), ('s', ' '),
]
OUTPUT_OPTIMISATION_TESTS = (
# No change.
lambda:
([
translation(english='noop'),
], [
translation(english='noop'),
], [('s', ' noop')]),
# Append only.
lambda:
([
translation(english='test'),
], [
translation(english='testing'),
], [('s', ' test'), ('s', 'ing')]),
# Chained meta-commands.
lambda:
([
translation(english='{#a}'),
], [
translation(english='{#a}{#b}'),
], [('c', 'a'), ('c', 'b')]),
)
@parametrize(OUTPUT_OPTIMISATION_TESTS)
def test_output_optimization(undo, do, expected_instructions):
output = CaptureOutput()
formatter = formatting.Formatter()
formatter.set_output(output)
formatter.format([], undo, None)
formatter.format(undo, do, None)
assert output.instructions == expected_instructions
class TestRetroFormatter:
def setup_method(self):
self.formatter = formatting.Formatter()
self.translations = []
self.retro_formatter = formatting.RetroFormatter(self.translations)
def format(self, text):
t = translation(english=text)
self.formatter.format([], [t], self.translations)
self.translations.append(t)
return t
ITER_LAST_ACTIONS_TESTS = (
lambda:
(['Luca', 'mela'],
[action(text_and_word='mela', trailing_space=' '),
action(text_and_word='Luca', trailing_space=' ')]),
lambda:
(['{Luca^}', '{^mela}'],
[action(prev_attach=True, text='mela', trailing_space=' ', word='Lucamela'),
action(text_and_word='Luca', next_attach=True)]),
lambda:
(['Luca', '{^ ^}', 'mela'],
[action(text_and_word='mela', trailing_space=' ', prev_attach=True),
action(text=' ', word='', prev_attach=True, next_attach=True),
action(text_and_word='Luca', trailing_space=' ')]),
lambda:
(['Luca', '{-|}', 'mela'],
[action(text='Mela', trailing_space=' ', word='mela'),
action(word='Luca', trailing_space=' ', next_case=Case.CAP_FIRST_WORD),
action(text_and_word='Luca', trailing_space=' ')]),
)
@parametrize(ITER_LAST_ACTIONS_TESTS)
def test_iter_last_actions(self, translation_list, action_list):
for t in translation_list:
self.format(t)
assert list(self.retro_formatter.iter_last_actions()) == action_list
ITER_LAST_FRAGMENTS_TESTS = (
lambda:
(False,
['Luca', 'mela'],
['mela', 'Luca ']),
lambda:
(False,
['Luca{^ ^}mela'],
['mela', 'Luca ']),
lambda:
(False,
['Luca, mela.'],
['mela.', 'Luca, ']),
lambda:
(False,
['Luca{-|}mela'],
['Mela', 'Luca ']),
lambda:
(True,
['Luca{-|}mela'],
['Mela', 'Luca ']),
)
@parametrize(ITER_LAST_FRAGMENTS_TESTS)
def test_iter_last_fragments(self, spaces_after, translation_list, fragment_list):
if spaces_after:
self.formatter.set_space_placement('After Output')
for t in translation_list:
self.format(t)
assert list(self.retro_formatter.iter_last_fragments()) == fragment_list
ITER_LAST_WORDS_TESTS = (
lambda:
(False,
['Luca', 'mela'],
['mela', 'Luca ']),
lambda:
(False,
['Luca{^ ^}mela'],
['mela', 'Luca ']),
lambda:
(False,
['Luca, mela.'],
['.', 'mela', ', ', 'Luca']),
lambda:
(False,
['Luca{-|}mela'],
['Mela', 'Luca ']),
lambda:
(True,
['Luca{-|}mela'],
['Mela', 'Luca ']),
)
@parametrize(ITER_LAST_WORDS_TESTS)
def test_iter_last_words(self, spaces_after, translation_list, word_list):
if spaces_after:
self.formatter.set_space_placement('After Output')
for t in translation_list:
self.format(t)
assert list(self.retro_formatter.iter_last_words()) == word_list
LAST_TEXT_TESTS = (
lambda:
(False,
['Luca{-|}mela'],
3,
'ela'),
lambda:
(False,
['Luca{-|}mela'],
5,
' Mela'),
lambda:
(False,
['Luca{-|}mela'],
12,
'Luca Mela'),
lambda:
(False,
['Luca{-|}mela'],
20,
'Luca Mela'),
lambda:
(True,
['Luca{-|}mela'],
6,
'a Mela'),
)
@parametrize(LAST_TEXT_TESTS)
def test_last_text(self, spaces_after, translation_list, count, text):
if spaces_after:
self.formatter.set_space_placement('After Output')
for t in translation_list:
self.format(t)
assert self.retro_formatter.last_text(count) == text
| 38,867
|
Python
|
.py
| 993
| 32.930514
| 112
| 0.590915
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,647
|
test_rtfcre_dict.py
|
openstenoproject_plover/test/test_rtfcre_dict.py
|
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
import textwrap
import pytest
from plover import __version__ as plover_version
from plover.dictionary.rtfcre_dict import RtfDictionary, TranslationFormatter
from plover.dictionary.rtfcre_parse import BadRtfError
from plover_build_utils.testing import dictionary_test, parametrize
@parametrize((
lambda: ('', ''),
# Affix handling.
lambda: ('{^}', r'{\cxds}'),
lambda: ('{^^}', r'{\cxds}'),
lambda: ('{^in^}', r'{\cxds in\cxds}'),
lambda: ('{pre^}', r'{pre\cxds}'),
lambda: ('{pre^} ', r'{pre\cxds}'),
lambda: ('{pre^} ', r'{pre\cxds}'),
lambda: ('{ pre ^} ', r'{ pre \cxds}'),
lambda: ('{^post}', r'{\cxds post}'),
lambda: (' {^post}', r'{\cxds post}'),
lambda: (' {^post}', r'{\cxds post}'),
lambda: ('{^ post } ', r'{\cxds post }'),
# Escaping special characters.
lambda: (r'\{', r'\\\{'),
lambda: (r'\}', r'\\\}'),
# Hard space.
lambda: ('{^ ^}', r'\~'),
# Non-breaking hyphen.
lambda: ('{^-^}', r'\_'),
# Handling newlines.
lambda: ('test\nsomething', r'test\line something'),
lambda: ('test\n\nsomething', r'test\par something'),
lambda: ('test\\nsomething', r'test\\nsomething'),
# Handling tabs.
lambda: ('test\tsomething', r'test\tab something'),
lambda: ('test\\tsomething', r'test\\tsomething'),
# Force Cap.
lambda: (r'Mr.{-|}', r'Mr.\cxfc '),
# Force Lower Case.
lambda: (r'{>}lol', r'\cxfl lol'),
# Infix with force cap.
lambda: ('{^\n^}{-|}', r'{\cxds \line \cxds}\cxfc '),
lambda: ('{^\n\n^}{-|}', r'{\cxds \par \cxds}\cxfc '),
# Plover custom formatting:
# - meta: command
lambda: ('{PLOVER:TOGGLE}', r'{\*\cxplovermeta PLOVER:TOGGLE}'),
# - meta: key combo
lambda: ('{#Return}', r'{\*\cxplovermeta #Return}'),
# - meta: other
lambda: ('{:retro_case:cap_first_word}', r'{\*\cxplovermeta :retro_case:cap_first_word}'),
# - macro
lambda: ('{*}', r'{\*\cxplovermacro retrospective_toggle_asterisk}'),
lambda: ('{*!}', r'{\*\cxplovermacro retrospective_delete_space}'),
lambda: ('{*?}', r'{\*\cxplovermacro retrospective_insert_space}'),
lambda: ('{*+}', r'{\*\cxplovermacro repeat_last_stroke}'),
lambda: ('=retrospective_delete_space', r'{\*\cxplovermacro retrospective_delete_space}'),
lambda: ('=macro:with_arg', r'{\*\cxplovermacro macro:with_arg}'),
lambda: ('=macro:', r'{\*\cxplovermacro macro:}'),
lambda: ('=undo', r'\cxdstroke'),
# - not macros
lambda: ('==', '=='),
lambda: ('=test()', '=test()'),
lambda: ("=macro{<-ceci n'est pas une macro}", r"=macro{\*\cxplovermeta <-ceci n'est pas une macro}"),
lambda: ('{*}something', r'{\*\cxplovermeta *}something'),
))
def test_format_translation(before, expected):
result = TranslationFormatter().format(before)
assert result == expected
def rtf_load_test(*spec, xfail=False):
assert 1 <= len(spec) <= 2
if len(spec) == 2:
# Conversion test.
rtf_entries = r'{\*\cxs S}%s' % spec[0]
dict_entries = '"S": %r' % spec[1]
else:
spec = textwrap.dedent(spec[0]).lstrip()
if not spec:
rtf_entries, dict_entries = '', ''
else:
rtf_entries, dict_entries = tuple(spec.rsplit('\n\n', 1))
kwargs = dict(marks=pytest.mark.xfail) if xfail else {}
return pytest.param(rtf_entries, dict_entries, **kwargs)
RTF_LOAD_TESTS = (
# Empty dictionary.
lambda: rtf_load_test(
'''
'''),
# Only one translation.
lambda: rtf_load_test(
r'''
{\*\cxs SP}translation
'SP': 'translation',
'''),
# One translation on multiple lines.
lambda: rtf_load_test(
'''
{\\*\\cxs SP}\r\ntranslation
'SP': 'translation'
'''),
# Steno and translation on multiple lines.
lambda: rtf_load_test(
'''
{\\*\\cxs PHA*-EU/SKWRAO-UPB/SKWR-UL/A-UGT/S-EPT/O-BGT/TPHO*-EF/STK-EPL/SKWRA-PB/TP-EB\r\n
/PHA-R/A-EUP}May, June, July, August, September, October, November, December, January,\r\n
February, March, April
('PHA*-EU/SKWRAO-UPB/SKWR-UL/A-UGT/'
'S-EPT/O-BGT/TPHO*-EF/STK-EPL/'
'SKWRA-PB/TP-EB/PHA-R/A-EUP'): (
'May, June, July, August, September, '
'October, November, December, January, '
'February, March, April')
'''),
# Multiple translations no newlines.
lambda: rtf_load_test(
r'''
{\*\cxs SP}translation{\*\cxs S}translation2
'SP': 'translation',
'S': 'translation2',
'''),
# Multiple translations on separate lines.
lambda: rtf_load_test(
'''
{\\*\\cxs SP}translation\r\n{\\*\\cxs S}translation2
'SP': 'translation',
'S': 'translation2',
'''),
lambda: rtf_load_test(
'''
{\\*\\cxs SP}translation\n{\\*\\cxs S}translation2
'SP': 'translation',
'S': 'translation2',
'''),
# Group start split on 2 lines.
lambda: rtf_load_test(
r'''
{\*\cxs TEFT}{
\*\ignored I'm invisible}
'TEFT': '',
'''),
# Mapping to empty string at end.
lambda: rtf_load_test(
r'''
{\*\cxs TEFT}
'TEFT': '',
'''),
# Escaped \r\n handled.
lambda: rtf_load_test(
'''
{\\*\\cxs SP}trans\\\r\n
'SP': 'trans{^\\n\\n^}',
'''),
# Escaped \r\n handled in mid translation.
lambda: rtf_load_test(
'''
{\\*\\cxs SP}trans\\\r\nlation
'SP': 'trans\\n\\nlation',
'''),
# Whitespace is preserved in various situations.
lambda: rtf_load_test(
r'''
{\*\cxs S}t
'S': 't{^ ^}',
'''),
lambda: rtf_load_test(
r'''
{\*\cxs S} t
'S': '{^ ^}t',
'''),
lambda: rtf_load_test(
r'''
{\*\cxs S}t {\*\cxs T}t
'S': 't{^ ^}',
'T': 't{^ ^}',
'''),
lambda: rtf_load_test(
'''
{\\*\\cxs S}t \r\n{\\*\\cxs T}t
'S': 't{^ ^}',
'T': 't{^ ^}',
'''),
lambda: rtf_load_test(
'''
{\\*\\cxs S}t \r\n{\\*\\cxs T} t \r\n
'S': 't{^ ^}',
'T': ' t ',
'''),
# Translations are ignored if converter returns None
lambda: rtf_load_test(
r'''
{\*\cxs S}return_none
'S': 'return_none',
'''),
lambda: rtf_load_test(
r'''
{\*\cxs T}t t t
'T': 't t t{^ ^}',
'''),
# Conflicts result on only the last one kept.
lambda: rtf_load_test(
r'''
{\*\cxs T}t
{\*\cxs T}g
'T': 'g',
'''),
lambda: rtf_load_test(
r'''
{\*\cxs T}t
{\*\cxs T}return_none
'T': 'return_none',
'''),
# Translation conversion tests.
# Void...
lambda: rtf_load_test('', ''),
# Escaped characters.
lambda: rtf_load_test(r'\-', '-'),
lambda: rtf_load_test(r'\\ ', '\\ '),
lambda: rtf_load_test(r'\\', '\\'),
lambda: rtf_load_test(r'\{', '{'),
lambda: rtf_load_test(r'\}', '}'),
# Hard space.
lambda: rtf_load_test(r'\~', '{^ ^}'),
# Non-breaking hyphen.
lambda: rtf_load_test(r'\_', '{^-^}'),
# Escaped newline.
lambda: rtf_load_test('\\\r\n', '{^\n\n^}'),
# Newline / tabs control words.
lambda: rtf_load_test(r'test:\line something', 'test:\nsomething'),
lambda: rtf_load_test(r'\line', '\n'),
lambda: rtf_load_test(r'\tab', '\t'),
lambda: rtf_load_test(r'{\line at group start}', '\nat group start'),
lambda: rtf_load_test('test text', 'test text'),
lambda: rtf_load_test('test text', 'test text'),
# Delete Spaces.
lambda: rtf_load_test(r'\cxds', '{^}'),
lambda: rtf_load_test(r'{\cxds}', '{^}'),
lambda: rtf_load_test(r'pre\cxds ', '{pre^}'),
lambda: rtf_load_test(r'{pre\cxds}', '{pre^}'),
lambda: rtf_load_test(r'pre\cxds ', '{pre^} '),
lambda: rtf_load_test(r'pre\cxds', '{pre^}'),
lambda: rtf_load_test(r'{pre\cxds }', '{pre^} '),
lambda: rtf_load_test(r'\cxds post', '{^post}'),
lambda: rtf_load_test(r'{\cxds post}', '{^post}'),
lambda: rtf_load_test(r'\cxds in\cxds', '{^in^}'),
lambda: rtf_load_test(r'\cxds in\cxds ', '{^in^}'),
lambda: rtf_load_test(r'{\cxds in\cxds}', '{^in^}'),
# Force Cap.
lambda: rtf_load_test(r'\cxfc', '{-|}'),
# Force Lower Case.
lambda: rtf_load_test(r'\cxfl', '{>}'),
lambda: rtf_load_test(r'pre\cxfl', 'pre{>}'),
# New paragraph.
lambda: rtf_load_test(r'\par', '{^\n\n^}'),
lambda: rtf_load_test(r'{\par test}', '{^\n\n^}test'),
# Stenovations extensions...
lambda: rtf_load_test(r'{\*\cxsvatdictflags N}', '{-|}'),
lambda: rtf_load_test(r'{\*\cxsvatdictflags LN1}', '{-|}'),
# Styles.
# Continuation styles are indented.
lambda: rtf_load_test(r'\par\s4', '{^\n\n ^}'),
# caseCATalyst declares new styles without a preceding \par,
# so we treat it as an implicit par.
lambda: rtf_load_test(r'\s1', '{^\n\n^}'),
# But if the \par is present we don't treat \s as an implicit par.
lambda: rtf_load_test(r'\par\s1', '{^\n\n^}'),
# caseCATalyst punctuation.
lambda: rtf_load_test(r'.', '{.}'),
lambda: rtf_load_test(r'. ', '{.} '),
lambda: rtf_load_test(r' . ', ' . '),
# Automatic Text.
lambda: rtf_load_test(r'{\cxa Q.}.', 'Q..'),
# Don't mess with period that is part of a word.
lambda: rtf_load_test(r'Mr.', 'Mr.'),
lambda: rtf_load_test(r'.attribute', '.attribute'),
# Stitching.
lambda: rtf_load_test(r'{\cxstit contents}', 'contents'),
# Fingerspelling.
lambda: rtf_load_test(r'{\cxfing c}', '{&c}'),
lambda: rtf_load_test(r'\cxfing Z.', '{&Z.}'),
# Punctuation.
lambda: rtf_load_test(r'{\cxp.}', '{.}'),
lambda: rtf_load_test(r'{\cxp .}', '{.}'),
lambda: rtf_load_test(r'{\cxp . }', '{.}'),
lambda: rtf_load_test(r'{\cxp . }', '{.}'),
lambda: rtf_load_test(r'{\cxp !}', '{!}'),
lambda: rtf_load_test(r'{\cxp ?}', '{?}'),
lambda: rtf_load_test(r'{\cxp ,}', '{,}'),
lambda: rtf_load_test(r'{\cxp ;}', '{;}'),
lambda: rtf_load_test(r'{\cxp :}', '{:}'),
lambda: rtf_load_test('{\\cxp \'}', '{^\'}'),
lambda: rtf_load_test('{\\cxp -}', '{^-^}'),
lambda: rtf_load_test('{\\cxp /}', '{^/^}'),
# Why not '{^...^}'?
lambda: rtf_load_test('{\\cxp... }', '{^... ^}'),
# Why not '{^")^}'?
lambda: rtf_load_test('{\\cxp ") }', '{^") ^}'),
# Unsupported, non-ignored group.
lambda: rtf_load_test(r'{\nonexistent }', ''),
lambda: rtf_load_test(r'{\nonexistent contents}', 'contents'),
lambda: rtf_load_test(r'{\nonexistent cont\_ents}', 'cont{^-^}ents'),
lambda: rtf_load_test(r'{\nonexistent {\cxp .}}', '{.}'),
# Unsupported, ignored group.
lambda: rtf_load_test(r'{\*\nonexistent }', ''),
lambda: rtf_load_test(r'{\*\nonexistent contents}', ''),
lambda: rtf_load_test(r'{\*\nonexistent {\cxp .}}', ''),
lambda: rtf_load_test(r'be\cxds{\*\cxsvatdictentrydate\yr2006\mo5\dy10}', '{be^}'),
# Unresolved conflicts.
lambda: rtf_load_test(r'{\cxconf [{\cxc abc}]}', '[abc]'),
lambda: rtf_load_test(r'{\cxconf [{\cxc abc}|{\cxc def}]}', '[abc|def]'),
lambda: rtf_load_test(r'{\cxconf [{\cxc abc}|{\cxc def}|{\cxc ghi}]}', '[abc|def|ghi]'),
lambda: rtf_load_test(r'{\cxconf [{\cxc abc}|{\cxc {\cxp... }}]}', '[abc|{^... ^}]'),
lambda: rtf_load_test(r"{\cxconf [{\cxc their}|{\cxc there}|{\cxc they're}]}", "[their|there|they're]"),
lambda: rtf_load_test(r'{\cxconf [{\cxc \par\s4}|{\cxc paragraph}]}', '[\n\n |paragraph]'),
# Resolved conflicts.
lambda: rtf_load_test(r"{\cxconf {\cxc their}{\*\deleted {\cxc there}{\cxc they're}}}", 'their'),
lambda: rtf_load_test(r"{\cxconf {\*\deleted {\cxc their}}{\cxc there}{\*\deleted {\cxc they're}}}", 'there'),
lambda: rtf_load_test(r'{\cxconf {\cxc \par\s4}{\*\deleted {\cxc paragraph}}}', '{^\n\n ^}'),
lambda: rtf_load_test(r'{\cxconf {\*\deleted {\cxc \par\s4}}{\cxc paragraph}}', 'paragraph'),
# Plover custom formatting:
# - meta: command
lambda: rtf_load_test(r'{\*\cxplovermeta plover:focus}', '{plover:focus}'),
# - meta: key combo
lambda: rtf_load_test(r'{\*\cxplovermeta #Alt_L(Tab Tab)}', '{#Alt_L(Tab Tab)}'),
# - meta: other
lambda: rtf_load_test(r'{\*\cxplovermeta :retro_currency:$c}', '{:retro_currency:$c}'),
# - macro
lambda: rtf_load_test(r'{\*\cxplovermacro retrospective_delete_space}', '=retrospective_delete_space'),
lambda: rtf_load_test(r'\cxdstroke', '=undo'),
# Unrecoverable content.
# Bad header.
lambda: (br' { \rtf1 test }', BadRtfError),
lambda: (br'\rtf1 test }', BadRtfError),
lambda: (br'{\rtf500 ... }', BadRtfError),
# Recoverable content.
# Starting new mapping in the middle of previous one.
lambda: rtf_load_test(
r'''
{\*\cxs PWRA*-BGT}{\cxconf [{\cxds arg{\cxc\cxds{\*\cxsvatdictentrydate\yr2016\mo8\da12}
{\*\cxs #25-UBG}{\cxconf [{\cxc the talk}|{\cxc\cxsvatds , 1946,}]}{\*\cxsvatdictentrydate
\yr2016\mo7\da14}
{\*\cxs #250-UDZ}{\cxconf [{\cxc $25,000}|{\cxc the attitudes}]}{\*\cxsvatdictentrydate
\yr2016\mo6\da29}
'PWRA*BGT': '[{^arg}{^}',
'25UBG': '[ the talk|, 1946,]',
'250UDZ': '[$25,000|the attitudes]',
'''),
# Unescaped group end in the middle of a mapping.
lambda: rtf_load_test(
r'''
{\*\cxs SPWHRA*-RB}\cxds \\\cxds{\*\cxsvatdictentrydate\yr2016\mo5\da20}
{\*\cxs PWRA*-BGTD}\cxds}]}{\*\cxsvatdictentrydate\yr2016\mo8\da12}
{\*\cxs SPA*-EUS}\cxds ^\cxds{\*\cxsvatdictentrydate\yr2016\mo8\da22}
'SPWHRA*RB': '{^}\\{^}',
'PWRA*BGTD' : '{^}]',
'SPA*EUS' : '{^^^}',
'''),
# Fingerspelling without content.
lambda: rtf_load_test(r'\cxfing', ''),
lambda: (
br'{\rtf1\ansi{\*\cxs TEFT}\cxfing',
'''
'TEFT': ''
'''),
# RTF/CRE spec allow the number key letter anywhere...
lambda: rtf_load_test(
r'''
{\*\cxs 2#}2
'2': '2',
'''),
)
def rtf_save_test(dict_entries, rtf_entries):
rtf_entries = b'\r\n'.join(((
br'{\rtf1\ansi{\*\cxrev100}\cxdict'
br'{\*\cxsystem Plover %s}'
br'{\stylesheet{\s0 Normal;}}'
) % plover_version.encode(),) + rtf_entries + (b'}', b''))
return dict_entries, rtf_entries
RTF_SAVE_TESTS = (
lambda: rtf_save_test(
'''
'TEFT': 'test',
'TEFT/-G': 'testing',
''',
(br'{\*\cxs TEFT}test',
br'{\*\cxs TEFT/-G}testing')
),
lambda: rtf_save_test(
'''
'S/T': '{pre^}',
''',
(br'{\*\cxs S/T}{pre\cxds}',)
),
lambda: rtf_save_test(
r'''
"PWR-S": "\\{",
''',
(br'{\*\cxs PWR-S}\\\{',)
),
lambda: rtf_save_test(
r'''
"TEFT": "test\nsomething",
''',
(br'{\*\cxs TEFT}test\line something',)
),
lambda: rtf_save_test(
r'''
"TEFT": "test\\nsomething",
''',
(br'{\*\cxs TEFT}test\\nsomething',)
),
lambda: rtf_save_test(
'''
"PHROLG": "{PLOVER:TOGGLE}",
''',
(br'{\*\cxs PHROLG}{\*\cxplovermeta PLOVER:TOGGLE}',)
),
)
@dictionary_test
class TestRtfDictionary:
DICT_CLASS = RtfDictionary
DICT_EXTENSION = 'rtf'
DICT_REGISTERED = True
DICT_LOAD_TESTS = RTF_LOAD_TESTS
DICT_SAVE_TESTS = RTF_SAVE_TESTS
DICT_SAMPLE = ''
@staticmethod
def make_dict(contents):
if isinstance(contents, bytes):
return contents
rtf_styles = {
0: 'Normal',
1: 'Question',
2: 'Answer',
3: 'Colloquy',
4: 'Continuation Q',
5: 'Continuation A',
6: 'Continuation Col',
7: 'Paren',
8: 'Centered',
}
rtf = (
'\r\n'.join(
[r'{\rtf1\ansi\cxdict{\*\cxrev100}{\*\cxsystem Fake Software}'] +
[r'{\stylesheet'] +
[r'{\s%d %s;}' % (k, v) for k, v in rtf_styles.items()] +
['}', contents, '}', '']
))
return rtf.encode('cp1252')
| 16,537
|
Python
|
.py
| 447
| 30.145414
| 114
| 0.52549
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,648
|
test_log.py
|
openstenoproject_plover/test/test_log.py
|
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
import os
from logging import Handler
from collections import defaultdict
import pytest
from plover.steno import Stroke
from plover import log
class FakeHandler(Handler):
outputs = defaultdict(list)
def __init__(self, filename, format=log.STROKE_LOG_FORMAT):
super().__init__()
self.baseFilename = filename
def emit(self, record):
FakeHandler.outputs[self.baseFilename].append(record.getMessage())
@pytest.fixture(autouse=True)
def fake_file_log(monkeypatch):
monkeypatch.setattr('plover.log.FileHandler', FakeHandler)
yield
FakeHandler.outputs.clear()
# Reset logger state.
log.set_stroke_filename(None)
log.enable_stroke_logging(False)
log.enable_translation_logging(False)
def stroke_filename(path):
return os.path.abspath(path)
def test_set_filename():
sf1 = stroke_filename('/fn1')
log.set_stroke_filename('/fn1')
log.enable_stroke_logging(True)
log.stroke(Stroke(('S-',)))
sf2 = stroke_filename('/fn2')
log.set_stroke_filename('/fn2')
log.stroke(Stroke(('-T',)))
log.set_stroke_filename(None)
log.stroke(Stroke(('P-',)))
assert FakeHandler.outputs == {
sf1: ["Stroke(S : ['S-'])"],
sf2: ["Stroke(-T : ['-T'])"],
}
def test_stroke():
sf = stroke_filename('/fn')
log.set_stroke_filename(sf)
log.enable_stroke_logging(True)
log.stroke(Stroke(('S-', '-T', 'T-')))
log.stroke(Stroke(('#', 'S-', '-T')))
assert FakeHandler.outputs == {
sf: ["Stroke(ST-T : ['S-', 'T-', '-T'])",
"Stroke(1-9 : ['#', 'S-', '-T'])"],
}
def test_log_translation():
sf = stroke_filename('/fn')
log.set_stroke_filename(sf)
log.enable_translation_logging(True)
log.translation(['a', 'b'], ['c', 'd'], None)
assert FakeHandler.outputs == {sf: ['*a', '*b', 'c', 'd']}
def test_enable_stroke_logging():
sf = stroke_filename('/fn')
log.set_stroke_filename(sf)
log.stroke(Stroke(('S-',)))
log.enable_stroke_logging(True)
log.stroke(Stroke(('T-',)))
log.enable_stroke_logging(False)
log.stroke(Stroke(('K-',)))
assert FakeHandler.outputs == {sf: ["Stroke(T : ['T-'])"]}
def test_enable_translation_logging():
sf = stroke_filename('/fn')
log.set_stroke_filename(sf)
log.translation(['a'], ['b'], None)
log.enable_translation_logging(True)
log.translation(['c'], ['d'], None)
log.enable_translation_logging(False)
log.translation(['e'], ['f'], None)
assert FakeHandler.outputs == {sf: ['*c', 'd']}
| 2,611
|
Python
|
.py
| 74
| 30.5
| 74
| 0.639396
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,649
|
test_misc.py
|
openstenoproject_plover/test/test_misc.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Open Steno Project
# See LICENSE.txt for details.
"""Tests for misc.py."""
import inspect
import os
import pytest
import plover.misc as misc
import plover.oslayer.config as conf
from plover.resource import ASSET_SCHEME
from plover_build_utils.testing import parametrize
def test_popcount_8():
for n in range(256):
assert misc.popcount_8(n) == format(n, 'b').count('1')
with pytest.raises(AssertionError):
misc.popcount_8(256)
with pytest.raises(AssertionError):
misc.popcount_8(-1)
if conf.PLATFORM == 'win':
ABS_PATH = os.path.normcase(r'c:\foo\bar')
else:
ABS_PATH = '/foo/bar'
@parametrize((
# Asset, no change.
lambda:
(ASSET_SCHEME + 'foo:bar',
ASSET_SCHEME + 'foo:bar'),
# Absolute path, no change.
lambda:
(ABS_PATH,
ABS_PATH),
# Relative path, resolve relative to configuration directory.
lambda:
(os.path.normcase(os.path.normpath('foo/bar')),
os.path.normcase(os.path.join(os.path.realpath(conf.CONFIG_DIR),
'foo', 'bar'))),
# Path below the user home directory.
lambda:
(os.path.normcase(os.path.normpath('~/foo/bar')),
os.path.normcase(os.path.expanduser(os.path.normpath('~/foo/bar')))),
))
def test_dictionary_path(short_path, full_path):
for input, function, expected in (
# Unchanged.
(short_path, 'shorten', short_path),
(full_path, 'expand', full_path),
# Shorten.
(full_path, 'shorten', short_path),
# Expand.
(short_path, 'expand', full_path),
):
function = '%s_path' % function
result = getattr(misc, function)(input)
assert result == expected, function
@parametrize((
# Boolean.
lambda: (False, False),
lambda: (True, True),
# True string values.
lambda: ('1', True),
lambda: ('yes', True),
lambda: ('true', True),
lambda: ('on', True),
# False string values.
lambda: ('0', False),
lambda: ('no', False),
lambda: ('false', False),
lambda: ('off', False),
# Invalid string values.
lambda: ('yep', ValueError),
lambda: ('nope', ValueError),
# Other types.
lambda: (0, False),
lambda: (1, True),
lambda: (42, True),
lambda: (4.2, True),
lambda: (0.0, False),
lambda: (None, False),
))
def test_boolean(input, output):
if inspect.isclass(output):
with pytest.raises(output):
misc.boolean(input)
else:
assert misc.boolean(input) == output
def test_to_surrogate_pairs():
# Split unicode characters above 0xFFFF
assert misc.to_surrogate_pair(chr(0x1F4A6)) == [0xD83D, 0xDCA6]
# Do not slit characters below 0xFFFF
assert misc.to_surrogate_pair(chr(0x20)) == [0x20]
# Do not split already split characters.
assert misc.to_surrogate_pair(chr(0xD83D) + chr(0xDCA6)) == [0xD83D, 0xDCA6]
| 2,942
|
Python
|
.py
| 92
| 26.641304
| 80
| 0.631078
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,650
|
test_default_dict.py
|
openstenoproject_plover/test/test_default_dict.py
|
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
import json
from collections import defaultdict
import pytest
from plover_build_utils.testing import steno_to_stroke
DICT_NAMES = ['main.json',
'commands.json',
'user.json']
DICT_PATH = 'plover/assets/'
def test_no_duplicates_categorized_files():
d = defaultdict(list)
for dictionary in DICT_NAMES:
with open(DICT_PATH + dictionary, encoding='utf-8') as fp:
pairs = json.load(fp, object_pairs_hook=lambda x: x)
for key, value in pairs:
d[key].append((value, dictionary))
msg_list = []
has_duplicate = False
for key, value_list in d.items():
if len(value_list) > 1:
has_duplicate = True
msg_list.append('key: %s\n' % key)
for value in value_list:
msg_list.append('%r in %s\n' % value)
msg = '\n' + ''.join(msg_list)
assert not has_duplicate, msg
@pytest.mark.parametrize('dictionary', DICT_NAMES)
def test_entries_are_valid(dictionary):
all_strokes = []
with open(DICT_PATH + dictionary, encoding='utf-8') as fp:
for k in json.load(fp):
all_strokes += k.split('/')
for s in set(all_strokes):
steno_to_stroke(s)
| 1,280
|
Python
|
.py
| 35
| 29.714286
| 66
| 0.624595
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,651
|
test_loading_manager.py
|
openstenoproject_plover/test/test_loading_manager.py
|
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
"""Tests for loading_manager.py."""
from collections import defaultdict
import os
import tempfile
import pytest
from plover.exception import DictionaryLoaderException
import plover.dictionary.loading_manager as loading_manager
class FakeDictionaryContents:
def __init__(self, contents, timestamp):
self.contents = contents
self.timestamp = timestamp
def __eq__(self, other):
if isinstance(other, FakeDictionaryContents):
return self.contents == other.contents
return self.contents == other
class FakeDictionaryInfo:
def __init__(self, name, contents):
self.name = name
self.contents = contents
self.tf = tempfile.NamedTemporaryFile()
def __repr__(self):
return 'FakeDictionaryInfo(%r, %r)' % (self.name, self.contents)
class MockLoader:
def __init__(self, files):
self.files = files
self.load_counts = defaultdict(int)
def __call__(self, filename):
self.load_counts[filename] += 1
d = self.files[filename]
if isinstance(d.contents, Exception):
raise d.contents
timestamp = os.path.getmtime(filename)
return FakeDictionaryContents(d.contents, timestamp)
def test_loading(monkeypatch):
dictionaries = {}
for i in range(8):
c = chr(ord('a') + i)
contents = c * 5
if i >= 4:
contents = Exception(contents)
d = FakeDictionaryInfo(c, contents)
assert d.tf.name not in dictionaries
dictionaries[d.tf.name] = d
assert c not in dictionaries
dictionaries[c] = d
def df(name):
return dictionaries[name].tf.name
loader = MockLoader(dictionaries)
monkeypatch.setattr('plover.dictionary.loading_manager.load_dictionary', loader)
manager = loading_manager.DictionaryLoadingManager()
manager.start_loading(df('a')).get()
manager.start_loading(df('b')).get()
results = manager.load([df('c'), df('b')])
# Returns the right values in the right order.
assert results == ['ccccc', 'bbbbb']
# Dropped superfluous files.
assert sorted([df('b'), df('c')]) == sorted(manager.dictionaries.keys())
# Check dict like interface.
assert len(manager) == 2
assert df('a') not in manager
with pytest.raises(KeyError):
manager[df('a')]
assert df('b') in manager
assert df('c') in manager
assert results == [manager[df('c')], manager[df('b')]]
# Return a DictionaryLoaderException for load errors.
results = manager.load([df('c'), df('e'), df('b'), df('f')])
assert len(results) == 4
assert results[0] == 'ccccc'
assert results[2] == 'bbbbb'
assert isinstance(results[1], DictionaryLoaderException)
assert results[1].path == df('e')
assert isinstance(results[1].exception, Exception)
assert isinstance(results[3], DictionaryLoaderException)
assert results[3].path == df('f')
assert isinstance(results[3].exception, Exception)
# Only loaded the files once.
assert all(x == 1 for x in loader.load_counts.values())
# No reload if file timestamp is unchanged, or the dictionary
# timestamp is more recent. (use case: dictionary edited with
# Plover and saved back)
file_timestamp = results[0].timestamp
results[0].timestamp = file_timestamp + 1
dictionaries['c'].contents = 'CCCCC'
results = manager.load([df('c')])
assert results == ['ccccc']
assert loader.load_counts[df('c')] == 1
# Check outdated dictionaries are reloaded.
results[0].timestamp = file_timestamp - 1
results = manager.load([df('c')])
assert results == ['CCCCC']
assert loader.load_counts[df('c')] == 2
# Check trimming of outdated dictionaries.
results[0].timestamp = file_timestamp - 1
manager.unload_outdated()
assert len(manager) == 0
assert df('c') not in manager
| 3,951
|
Python
|
.py
| 99
| 33.989899
| 84
| 0.668145
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,652
|
test_orthography.py
|
openstenoproject_plover/test/test_orthography.py
|
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
from plover.orthography import add_suffix
from plover_build_utils.testing import parametrize
ADD_SUFFIX_TESTS = (
lambda: ('artistic', 'ly', 'artistically'),
lambda: ('cosmetic', 'ly', 'cosmetically'),
lambda: ('establish', 's', 'establishes'),
lambda: ('speech', 's', 'speeches'),
lambda: ('approach', 's', 'approaches'),
lambda: ('beach', 's', 'beaches'),
lambda: ('arch', 's', 'arches'),
lambda: ('larch', 's', 'larches'),
lambda: ('march', 's', 'marches'),
lambda: ('search', 's', 'searches'),
lambda: ('starch', 's', 'starches'),
lambda: ('stomach', 's', 'stomachs'),
lambda: ('monarch', 's', 'monarchs'),
lambda: ('patriarch', 's', 'patriarchs'),
lambda: ('oligarch', 's', 'oligarchs'),
lambda: ('cherry', 's', 'cherries'),
lambda: ('day', 's', 'days'),
lambda: ('penny', 's', 'pennies'),
lambda: ('pharmacy', 'ist', 'pharmacist'),
lambda: ('melody', 'ist', 'melodist'),
lambda: ('pacify', 'ist', 'pacifist'),
lambda: ('geology', 'ist', 'geologist'),
lambda: ('metallurgy', 'ist', 'metallurgist'),
lambda: ('anarchy', 'ist', 'anarchist'),
lambda: ('monopoly', 'ist', 'monopolist'),
lambda: ('alchemy', 'ist', 'alchemist'),
lambda: ('botany', 'ist', 'botanist'),
lambda: ('therapy', 'ist', 'therapist'),
lambda: ('theory', 'ist', 'theorist'),
lambda: ('psychiatry', 'ist', 'psychiatrist'),
lambda: ('lobby', 'ist', 'lobbyist'),
lambda: ('hobby', 'ist', 'hobbyist'),
lambda: ('copy', 'ist', 'copyist'),
lambda: ('beauty', 'ful', 'beautiful'),
lambda: ('weary', 'ness', 'weariness'),
lambda: ('weary', 'some', 'wearisome'),
lambda: ('lonely', 'ness', 'loneliness'),
lambda: ('narrate', 'ing', 'narrating'),
lambda: ('narrate', 'or', 'narrator'),
lambda: ('generalize', 'ability', 'generalizability'),
lambda: ('reproduce', 'able', 'reproducible'),
lambda: ('grade', 'ations', 'gradations'),
lambda: ('urine', 'ary', 'urinary'),
lambda: ('achieve', 'able', 'achievable'),
lambda: ('polarize', 'ation', 'polarization'),
lambda: ('done', 'or', 'donor'),
lambda: ('analyze', 'ed', 'analyzed'),
lambda: ('narrate', 'ing', 'narrating'),
lambda: ('believe', 'able', 'believable'),
lambda: ('animate', 'ors', 'animators'),
lambda: ('discontinue', 'ation', 'discontinuation'),
lambda: ('innovate', 'ive', 'innovative'),
lambda: ('future', 'ists', 'futurists'),
lambda: ('illustrate', 'or', 'illustrator'),
lambda: ('emerge', 'ent', 'emergent'),
lambda: ('equip', 'ed', 'equipped'),
lambda: ('defer', 'ed', 'deferred'),
lambda: ('defer', 'er', 'deferrer'),
lambda: ('defer', 'ing', 'deferring'),
lambda: ('pigment', 'ed', 'pigmented'),
lambda: ('refer', 'ed', 'referred'),
lambda: ('fix', 'ed', 'fixed'),
lambda: ('alter', 'ed', 'altered'),
lambda: ('interpret', 'ing', 'interpreting'),
lambda: ('wonder', 'ing', 'wondering'),
lambda: ('target', 'ing', 'targeting'),
lambda: ('limit', 'er', 'limiter'),
lambda: ('maneuver', 'ing', 'maneuvering'),
lambda: ('monitor', 'ing', 'monitoring'),
lambda: ('color', 'ing', 'coloring'),
lambda: ('inhibit', 'ing', 'inhibiting'),
lambda: ('master', 'ed', 'mastered'),
lambda: ('target', 'ing', 'targeting'),
lambda: ('fix', 'ed', 'fixed'),
lambda: ('scrap', 'y', 'scrappy'),
lambda: ('trip', 's', 'trips'),
lambda: ('equip', 's', 'equips'),
lambda: ('bat', 'en', 'batten'),
lambda: ('smite', 'en', 'smitten'),
lambda: ('got', 'en', 'gotten'),
lambda: ('bite', 'en', 'bitten'),
lambda: ('write', 'en', 'written'),
lambda: ('flax', 'en', 'flaxen'),
lambda: ('wax', 'en', 'waxen'),
lambda: ('fast', 'est', 'fastest'),
lambda: ('white', 'er', 'whiter'),
lambda: ('crap', 'y', 'crappy'),
lambda: ('lad', 'er', 'ladder'),
lambda: ('translucent', 'cy', 'translucency'),
lambda: ('bankrupt', 'cy', 'bankruptcy'),
lambda: ('inadequate', 'cy', 'inadequacy'),
lambda: ('secret', 'cy', 'secrecy'),
lambda: ('impolite', 'cy', 'impolicy'),
lambda: ('idiot', 'cy', 'idiocy'),
lambda: ('free', 'ed', 'freed'),
lambda: ('free', 'er', 'freer'),
lambda: ('regulate', 'ry', 'regulatory'),
lambda: ('humble', 'ly', 'humbly'),
lambda: ('questionable', 'ly', 'questionably'),
lambda: ('triple', 'ly', 'triply'),
lambda: ('statute', 'ry', 'statutory'),
lambda: ('statute', 'ary', 'statutory'),
lambda: ('confirm', 'tory', 'confirmatory'),
lambda: ('supervise', 'ary', 'supervisory'),
lambda: ('Minessota', 'en', 'Minessotan'),
lambda: ('ceremony', 'ial', 'ceremonial'),
lambda: ('editor', 'ial', 'editorial'),
lambda: ('editor', 'ially', 'editorially'),
lambda: ('spaghetti', 'ification', 'spaghettification'),
lambda: ('fantastic', 'ical', 'fantastical'),
lambda: ('epistomology', 'ical', 'epistomological'),
lambda: ('oratory', 'ical', 'oratorical'),
lambda: ('radical', 'ist', 'radicalist'),
lambda: ('complementary', 'ity', 'complementarity'),
lambda: ('disproportional', 'ity', 'disproportionality'),
lambda: ('perform', 'tive', 'performative'),
lambda: ('restore', 'tive', 'restorative'),
lambda: ('token', 'ize', 'tokenize'),
lambda: ('token', 'ise', 'tokenise'),
lambda: ('conditional', 'ize', 'conditionalize'),
lambda: ('conditional', 'isation', 'conditionalisation'),
lambda: ('spectacular', 'ization', 'spectacularization'),
lambda: ('spectacular', 'ism', 'spectacularism'),
lambda: ('category', 'ize', 'categorize'),
lambda: ('category', 'ise', 'categorise'),
lambda: ('custom', 'izable', 'customizable'),
lambda: ('custom', 'isable', 'customisable'),
lambda: ('fantasy', 'ize', 'fantasize'),
lambda: ('criminal', 'ology', 'criminology'),
lambda: ('criminal', 'ologist', 'criminologist'),
lambda: ('dermatology', 'ist', 'dermatologist'),
lambda: ('similar', 'ish', 'similarish'),
)
@parametrize(ADD_SUFFIX_TESTS)
def test_add_suffix(word, suffix, expected):
assert add_suffix(word, suffix) == expected
| 6,225
|
Python
|
.py
| 141
| 39.340426
| 61
| 0.577233
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,653
|
test_passport.py
|
openstenoproject_plover/test/test_passport.py
|
# Copyright (c) 2011 Hesky Fisher
# See LICENSE.txt for details.
"""Unit tests for passport.py."""
import threading
from plover.machine.passport import Passport
from plover_build_utils.testing import parametrize
class MockSerial:
def __init__(self, **params):
pass
def isOpen(self):
return True
def inWaiting(self):
return len(self.data)
def read(self, size=1):
assert size >= self.inWaiting()
if not self.data:
return ''
data = self.data.pop(0)
if not self.data:
self.event.set()
return data
def close(self):
pass
PASSPORT_TESTS = (
# Test all keys
lambda: ((b'!f#f+f*fAfCfBfEfDfGfFfHfKfLfOfNfQfPfSfRfUfTfWfYfXfZf^f~f',),
[set(Passport.get_keys()),]),
# Anything below 8 is not registered
lambda: ((b'S9T8A7',), [['S', 'T'],]),
# Sequence of strokes
lambda: ((b'SfTf', b'Zf', b'QfLf'), [['S', 'T'], ['Z',], ['Q', 'L']]),
)
@parametrize(PASSPORT_TESTS)
def test_passport(monkeypatch, inputs, expected):
params = {k: v[0] for k, v in Passport.get_option_info().items()}
class mock(MockSerial):
event = threading.Event()
data = [b'<123/' + s + b'/something>' for s in inputs]
monkeypatch.setattr('plover.machine.base.serial.Serial', mock)
actual = []
m = Passport(params)
m.add_stroke_callback(actual.append)
m.start_capture()
mock.event.wait()
m.stop_capture()
assert len(actual) == len(expected)
for actual_keys, expected_keys in zip(actual, expected):
assert sorted(actual_keys) == sorted(expected_keys)
| 1,640
|
Python
|
.py
| 48
| 28.270833
| 76
| 0.629911
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,654
|
test_steno_dictionary.py
|
openstenoproject_plover/test/test_steno_dictionary.py
|
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
"""Unit tests for steno_dictionary.py."""
import pytest
from plover.steno_dictionary import StenoDictionary, StenoDictionaryCollection
from plover_build_utils.testing import dictionary_test
def test_dictionary_collection():
d1 = StenoDictionary()
d1[('S',)] = 'a'
d1[('T',)] = 'b'
d1.path = 'd1'
d2 = StenoDictionary()
d2[('S',)] = 'c'
d2[('W',)] = 'd'
d2.path = 'd2'
dc = StenoDictionaryCollection([d2, d1])
assert dc.lookup(('S',)) == 'c'
assert dc.lookup(('W',)) == 'd'
assert dc.lookup(('T',)) == 'b'
assert dc.lookup_from_all(('S',)) == [('c', d2), ('a', d1)]
assert dc.lookup_from_all(('W',)) == [('d', d2)]
assert dc.lookup_from_all(('T',)) == [('b', d1)]
f = lambda k, v: v == 'c'
dc.add_filter(f)
assert dc.lookup(('S',)) == 'a'
assert dc.raw_lookup(('S',)) == 'c'
assert dc.lookup_from_all(('S',)) == [('a', d1)]
g = lambda k, v: v == 'a'
dc.add_filter(g)
assert dc.lookup(('S',)) is None
assert dc.lookup_from_all(('S',)) == []
assert dc.raw_lookup(('S',)) == 'c'
assert dc.lookup(('W',)) == 'd'
assert dc.lookup(('T',)) == 'b'
assert dc.raw_lookup(('W',)) == 'd'
assert dc.raw_lookup(('T',)) == 'b'
assert dc.raw_lookup_from_all(('S',)) == [('c', d2), ('a', d1)]
assert dc.raw_lookup_from_all(('W',)) == [('d', d2)]
assert dc.raw_lookup_from_all(('T',)) == [('b', d1)]
assert dc.reverse_lookup('c') == {('S',)}
dc.remove_filter(f)
dc.remove_filter(g)
assert dc.lookup(('S',)) == 'c'
assert dc.lookup(('W',)) == 'd'
assert dc.lookup(('T',)) == 'b'
assert dc.lookup_from_all(('S',)) == [('c', d2), ('a', d1)]
assert dc.lookup_from_all(('W',)) == [('d', d2)]
assert dc.lookup_from_all(('T',)) == [('b', d1)]
assert dc.reverse_lookup('c') == {('S',)}
dc.set(('S',), 'e')
assert dc.lookup(('S',)) == 'e'
assert d2[('S',)] == 'e'
dc.set(('S',), 'f', path='d1')
assert dc.lookup(('S',)) == 'e'
assert d1[('S',)] == 'f'
assert d2[('S',)] == 'e'
# Iterating on a StenoDictionaryCollection is
# the same as iterating on its dictionaries' paths.
assert list(dc) == ['d2', 'd1']
# Test get and [].
assert dc.get('d1') == d1
assert dc['d1'] == d1
assert dc.get('invalid') is None
with pytest.raises(KeyError):
dc['invalid']
def test_dictionary_collection_writeable():
d1 = StenoDictionary()
d1[('S',)] = 'a'
d1[('T',)] = 'b'
d2 = StenoDictionary()
d2[('S',)] = 'c'
d2[('W',)] = 'd'
d2.readonly = True
dc = StenoDictionaryCollection([d2, d1])
assert dc.first_writable() == d1
dc.set(('S',), 'A')
assert d1[('S',)] == 'A'
assert d2[('S',)] == 'c'
def test_dictionary_collection_longest_key():
k1 = ('S',)
k2 = ('S', 'T')
k3 = ('S', 'T', 'R')
dc = StenoDictionaryCollection()
assert dc.longest_key == 0
d1 = StenoDictionary()
d1.path = 'd1'
d1[k1] = 'a'
dc.set_dicts([d1])
assert dc.longest_key == 1
d1[k2] = 'a'
assert dc.longest_key == 2
d2 = StenoDictionary()
d2.path = 'd2'
d2[k3] = 'c'
dc.set_dicts([d2, d1])
assert dc.longest_key == 3
del d1[k2]
assert dc.longest_key == 3
d2.enabled = False
assert dc.longest_key == 1
d1.enabled = False
assert dc.longest_key == 0
d2.enabled = True
assert dc.longest_key == 3
d1.enabled = True
assert dc.longest_key == 3
dc.set_dicts([d1])
assert dc.longest_key == 1
dc.set_dicts([])
assert dc.longest_key == 0
def test_casereverse_lookup():
dc = StenoDictionaryCollection()
d1 = StenoDictionary()
d1[('PWAOUFL',)] = 'beautiful'
d1[('WAOUFL',)] = 'beAuTIFul'
d2 = StenoDictionary()
d2[('PW-FL',)] = 'BEAUTIFUL'
d3 = StenoDictionary()
d3[('WAOUFL',)] = 'not beautiful'
dc.set_dicts([d1, d2, d3])
assert dc.casereverse_lookup('beautiful') == {'beautiful', 'BEAUTIFUL', 'beAuTIFul'}
def test_reverse_lookup():
dc = StenoDictionaryCollection()
d1 = StenoDictionary()
d1[('PWAOUFL',)] = 'beautiful'
d1[('WAOUFL',)] = 'beautiful'
d2 = StenoDictionary()
d2[('PW-FL',)] = 'beautiful'
d3 = StenoDictionary()
d3[('WAOUFL',)] = 'not beautiful'
# Simple test.
dc.set_dicts([d1])
assert dc.reverse_lookup('beautiful') == {('PWAOUFL',), ('WAOUFL',)}
# No duplicates.
d2_copy = StenoDictionary()
d2_copy.update(d2)
dc.set_dicts([d2_copy, d2])
assert dc.reverse_lookup('beautiful') == {('PW-FL',)}
# Don't stop at the first dictionary with matches.
dc.set_dicts([d2, d1])
assert dc.reverse_lookup('beautiful') == {('PW-FL',), ('PWAOUFL',), ('WAOUFL',)}
# Ignore keys overridden by a higher precedence dictionary.
dc.set_dicts([d3, d2, d1])
assert dc.reverse_lookup('beautiful') == {('PW-FL',), ('PWAOUFL',)}
def test_dictionary_enabled():
dc = StenoDictionaryCollection()
d1 = StenoDictionary()
d1.path = 'd1'
d1[('TEFT',)] = 'test1'
d1[('TEFGT',)] = 'Testing'
d2 = StenoDictionary()
d2[('TEFT',)] = 'test2'
d2[('TEFT', '-G')] = 'Testing'
d2.path = 'd2'
dc.set_dicts([d2, d1])
assert dc.lookup(('TEFT',)) == 'test2'
assert dc.raw_lookup(('TEFT',)) == 'test2'
assert dc.casereverse_lookup('testing') == {'Testing'}
assert dc.reverse_lookup('Testing') == {('TEFT', '-G'), ('TEFGT',)}
d2.enabled = False
assert dc.lookup(('TEFT',)) == 'test1'
assert dc.raw_lookup(('TEFT',)) == 'test1'
assert dc.casereverse_lookup('testing') == {'Testing'}
assert dc.reverse_lookup('Testing') == {('TEFGT',)}
d1.enabled = False
assert dc.lookup(('TEST',)) is None
assert dc.raw_lookup(('TEFT',)) is None
assert dc.casereverse_lookup('testing') == set()
assert dc.reverse_lookup('Testing') == set()
@dictionary_test
class TestStenoDictionary:
class DICT_CLASS(StenoDictionary):
def _load(self, filename):
pass
DICT_EXTENSION = 'dict'
DICT_SAMPLE = b''
@dictionary_test
class TestReadOnlyStenoDictionary:
class DICT_CLASS(StenoDictionary):
readonly = True
def _load(self, filename):
pass
DICT_EXTENSION = 'dict'
DICT_SAMPLE = b''
| 6,352
|
Python
|
.py
| 184
| 29.402174
| 88
| 0.571639
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,655
|
test_engine.py
|
openstenoproject_plover/test/test_engine.py
|
from functools import partial
import os
import tempfile
import pytest
from plover import system
from plover.config import Config, DictionaryConfig
from plover.engine import ErroredDictionary, StenoEngine
from plover.machine.base import (
STATE_INITIALIZING,
STATE_RUNNING,
STATE_STOPPED,
StenotypeBase,
)
from plover.machine.keymap import Keymap
from plover.misc import normalize_path
from plover.oslayer.controller import Controller
from plover.output import Output
from plover.registry import Registry
from plover.steno_dictionary import StenoDictionaryCollection
from plover_build_utils.testing import make_dict
from .py37compat import mock
class FakeMachine(StenotypeBase):
instance = None
def __init__(self, options):
super().__init__()
self.options = options
self.is_suppressed = False
@classmethod
def get_keys(cls):
return system.KEYS
def start_capture(self):
assert FakeMachine.instance is None
FakeMachine.instance = self
self._initializing()
self._ready()
def stop_capture(self):
FakeMachine.instance = None
self._stopped()
def set_suppression(self, enabled):
self.is_suppressed = enabled
class FakeKeyboardEmulation(Output):
def send_backspaces(self, b):
pass
def send_string(self, s):
pass
def send_key_combination(self, c):
pass
def set_key_press_delay(self, delay_ms):
pass
class FakeEngine(StenoEngine):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.events = []
def hook_callback(hook, *args, **kwargs):
self.events.append((hook, args, kwargs))
for hook in self.HOOKS:
self.hook_connect(hook, partial(hook_callback, hook))
def _in_engine_thread(self):
return True
def quit(self, code=0):
self._same_thread_hook(self._quit, code)
def start(self):
StenoEngine.start(self)
@pytest.fixture
def engine(monkeypatch):
FakeMachine.instance = None
registry = Registry()
registry.update()
registry.register_plugin('machine', 'Fake', FakeMachine)
monkeypatch.setattr('plover.config.registry', registry)
monkeypatch.setattr('plover.engine.registry', registry)
ctrl = mock.MagicMock(spec=Controller)
kbd = FakeKeyboardEmulation()
cfg_file = tempfile.NamedTemporaryFile(prefix='plover',
suffix='config',
delete=False)
try:
cfg_file.close()
cfg = Config(cfg_file.name)
cfg['dictionaries'] = []
cfg['machine_type'] = 'Fake'
cfg['system_keymap'] = [(k, k) for k in system.KEYS]
cfg.save()
yield FakeEngine(cfg, ctrl, kbd)
finally:
os.unlink(cfg_file.name)
def test_engine_lifecycle(engine):
# Config load.
assert engine.load_config()
assert engine.events == []
# Startup.
engine.start()
assert engine.events == [
('machine_state_changed', ('Fake', 'initializing'), {}),
('machine_state_changed', ('Fake', 'connected'), {}),
('config_changed', (engine.config,), {}),
]
assert FakeMachine.instance is not None
assert not FakeMachine.instance.is_suppressed
assert len(engine._controller.mock_calls) == 1
engine._controller.start.assert_called_once()
engine._controller.reset_mock()
# Output enabled.
engine.events.clear()
engine.output = True
assert engine.events == [
('output_changed', (True,), {}),
]
assert FakeMachine.instance.is_suppressed
# Machine reconnection.
engine.events.clear()
engine.reset_machine()
assert engine.events == [
('machine_state_changed', ('Fake', STATE_STOPPED), {}),
('machine_state_changed', ('Fake', STATE_INITIALIZING), {}),
('machine_state_changed', ('Fake', STATE_RUNNING), {}),
]
assert FakeMachine.instance is not None
assert FakeMachine.instance.is_suppressed
# No machine reset on keymap change.
engine.events.clear()
new_keymap = Keymap(system.KEYS, system.KEYS)
new_keymap.set_mappings(zip(system.KEYS, reversed(system.KEYS)))
config_update = { 'system_keymap': new_keymap }
assert FakeMachine.instance.keymap != new_keymap
engine.config = config_update
assert engine.events == [
('config_changed', (config_update,), {}),
]
assert FakeMachine.instance.keymap == new_keymap
# Output disabled
engine.events.clear()
engine.output = False
assert engine.events == [
('output_changed', (False,), {}),
]
assert not FakeMachine.instance.is_suppressed
# Stopped.
engine.events.clear()
engine.quit(42)
assert engine.join() == 42
assert engine.events == [
('machine_state_changed', ('Fake', STATE_STOPPED), {}),
('quit', (), {}),
]
assert FakeMachine.instance is None
assert len(engine._controller.mock_calls) == 1
engine._controller.stop.assert_called_once()
def test_loading_dictionaries(tmp_path, engine):
def check_loaded_events(actual_events, expected_events):
assert len(actual_events) == len(expected_events)
for n, event in enumerate(actual_events):
event_type, event_args, event_kwargs = event
msg = 'event %u: %r' % (n, event)
assert event_type == 'dictionaries_loaded', msg
assert event_kwargs == {}, msg
assert len(event_args) == 1, msg
assert isinstance(event_args[0], StenoDictionaryCollection), msg
assert [
(d.path, d.enabled, isinstance(d, ErroredDictionary))
for d in event_args[0].dicts
] == expected_events[n], msg
with \
make_dict(tmp_path, b'{}', 'json', 'valid1') as valid_dict_1, \
make_dict(tmp_path, b'{}', 'json', 'valid2') as valid_dict_2, \
make_dict(tmp_path, b'', 'json', 'invalid1') as invalid_dict_1, \
make_dict(tmp_path, b'', 'json', 'invalid2') as invalid_dict_2:
valid_dict_1 = normalize_path(str(valid_dict_1))
valid_dict_2 = normalize_path(str(valid_dict_2))
invalid_dict_1 = normalize_path(str(invalid_dict_1))
invalid_dict_2 = normalize_path(str(invalid_dict_2))
engine.start()
for new_dictionaries, *expected_events in (
# Load one valid dictionary.
[[
# path, enabled
(valid_dict_1, True),
], [
# path, enabled, errored
(valid_dict_1, True, False),
]],
# Load another invalid dictionary.
[[
(valid_dict_1, True),
(invalid_dict_1, True),
], [
(valid_dict_1, True, False),
(invalid_dict_1, True, True),
]],
# Disable first dictionary.
[[
(valid_dict_1, False),
(invalid_dict_1, True),
], [
(valid_dict_1, False, False),
(invalid_dict_1, True, True),
]],
# Replace invalid dictonary with another invalid one.
[[
(valid_dict_1, False),
(invalid_dict_2, True),
], [
(valid_dict_1, False, False),
], [
(valid_dict_1, False, False),
(invalid_dict_2, True, True),
]]
):
engine.events.clear()
config_update = {
'dictionaries': [DictionaryConfig(*d)
for d in new_dictionaries]
}
engine.config = dict(config_update)
assert engine.events[0] == ('config_changed', (config_update,), {})
check_loaded_events(engine.events[1:], expected_events)
# Simulate an outdated dictionary.
engine.events.clear()
engine.dictionaries[valid_dict_1].timestamp -= 1
engine.config = {}
check_loaded_events(engine.events, [[
(invalid_dict_2, True, True),
], [
(valid_dict_1, False, False),
(invalid_dict_2, True, True),
]])
def test_engine_running_state(engine):
# Running state must be different
# from initial (disabled state).
initial_state = engine.translator_state
assert engine.load_config()
engine.set_output(True)
running_state = engine.translator_state
assert running_state != initial_state
# Disabled state is reset every time
# output is disabled.
engine.set_output(False)
disabled_state = engine.translator_state
assert disabled_state != running_state
assert disabled_state != initial_state
# Running state is kept throughout.
engine.set_output(True)
assert engine.translator_state == running_state
def test_undo_and_clear_empty_translator_state(engine):
engine.clear_translator_state(undo=True)
| 9,074
|
Python
|
.py
| 245
| 28.689796
| 79
| 0.609318
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,656
|
test_json_dict.py
|
openstenoproject_plover/test/test_json_dict.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
"""Unit tests for json.py."""
from plover.dictionary.json_dict import JsonDictionary
from plover_build_utils.testing import dictionary_test
def json_load_test(contents, expected):
if isinstance(contents, str):
contents = contents.encode('utf-8')
return contents, expected
JSON_LOAD_TESTS = (
lambda: json_load_test('{"S": "a"}', '"S": "a"'),
# Default encoding is utf-8.
lambda: json_load_test('{"S": "café"}', '"S": "café"'),
# But if that fails, the implementation
# must automatically retry with latin-1.
lambda: json_load_test('{"S": "café"}'.encode('latin-1'), '"S": "café"'),
# Invalid JSON.
lambda: json_load_test('{"foo", "bar",}', ValueError),
# Invalid JSON.
lambda: json_load_test('foo', ValueError),
# Cannot convert to dict.
lambda: json_load_test('"foo"', ValueError),
# Ditto.
lambda: json_load_test('4.2', TypeError),
)
def json_save_test(entries, expected):
return entries, expected.encode('utf-8')
JSON_SAVE_TESTS = (
# Simple test.
lambda: json_save_test(
'''
'S': 'a',
''',
'{\n"S": "a"\n}\n'
),
# Check strokes format: '/' separated.
lambda: json_save_test(
'''
'SAPL/-PL': 'sample',
''',
'{\n"SAPL/-PL": "sample"\n}\n'
),
# Contents should be saved as UTF-8, no escaping.
lambda: json_save_test(
'''
'S': 'café',
''',
'{\n"S": "café"\n}\n'
),
# Check escaping of special characters.
lambda: json_save_test(
r'''
'S': '{^"\n\t"^}',
''',
'{\n"S": "' + r'{^\"\n\t\"^}' + '"\n}\n'
),
# Keys are sorted on save.
lambda: json_save_test(
'''
'T': 'bravo',
'S': 'alpha',
'R': 'charlie',
''',
'{\n"S": "alpha",\n"T": "bravo",\n"R": "charlie"\n}\n'
),
)
@dictionary_test
class TestJsonDictionary:
DICT_CLASS = JsonDictionary
DICT_EXTENSION = 'json'
DICT_REGISTERED = True
DICT_LOAD_TESTS = JSON_LOAD_TESTS
DICT_SAVE_TESTS = JSON_SAVE_TESTS
DICT_SAMPLE = b'{}'
| 2,213
|
Python
|
.py
| 75
| 23.813333
| 77
| 0.55655
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,657
|
test_stentura.py
|
openstenoproject_plover/test/test_stentura.py
|
# Copyright (c) 2011 Hesky Fisher
# See LICENSE.txt for details.
"""Unit tests for stentura.py."""
import struct
import threading
import pytest
from plover.machine import stentura
def make_response(seq, action, error=0, p1=0, p2=0, data=None, length=None):
if not length:
length = 14
if data:
length += 2 + len(data)
response = bytearray(14 + ((2 + len(data)) if data else 0))
struct.pack_into('<2B5H', response, 0, 1, seq, length, action, error, p1, p2)
struct.pack_into('<H', response, 12, stentura._crc(response, 1, 11))
if data:
response[14:14+len(data)] = data
struct.pack_into('<H', response, 14 + len(data), stentura._crc(data))
return response
def make_read_response(seq, data=[]):
return make_response(seq, stentura._READC, p1=len(data), data=data)
def make_readc_packets(data):
requests, responses = [], []
seq = stentura._SequenceCounter()
buf = bytearray(256)
block, byte = 0, 0
while data:
s = seq()
chunk = data[0:512]
data = data[512:]
q = stentura._make_read(buf, s, block, byte)
requests.append(bytes(q))
r = make_read_response(s, chunk)
responses.append(bytes(r))
byte += len(chunk)
if byte >= 512:
block += 1
byte -= 512
s = seq()
q = stentura._make_read(buf, s, block, byte)
requests.append(bytes(q))
r = make_read_response(s)
responses.append(bytes(r))
return requests, responses
def parse_request(request):
header = struct.unpack_from('<2B8H', request)
if header[2] > 18:
header = list(header) + [request[18:-2], struct.unpack('<H',
request[-2:])]
else:
header = list(header) + [None] * 2
return dict(zip(['SOH', 'seq', 'length', 'action', 'p1', 'p2',
'p3', 'p4', 'p5', 'crc', 'data', 'data_crc'], header))
class MockPacketPort:
def __init__(self, responses, requests=None):
self._responses = responses
self.writes = 0
self._requests = requests
self._current_response_offset = None
def write(self, data):
self.writes += 1
if self._requests and self._requests[self.writes - 1] != bytes(data):
raise Exception("Wrong packet.")
self._current_response_offset = 0
return len(data)
def read(self, count):
response = self._responses[self.writes - 1]
data = response[self._current_response_offset:self._current_response_offset+count]
self._current_response_offset += count
return data
def test_crc():
data = bytearray(b'123456789')
assert stentura._crc(data) == 0xBB3D
def test_write_buffer():
buf = bytearray()
data = [1, 2, 3]
stentura._write_to_buffer(buf, 0, data)
assert list(buf) == data
stentura._write_to_buffer(buf, 0, [5, 6])
assert list(buf) == [5, 6, 3]
def test_parse_stroke():
# SAT
a = 0b11001000
b = 0b11000100
c = 0b11000000
d = 0b11001000
assert sorted(stentura._parse_stroke(a, b, c, d)) == sorted(['S-', 'A-', '-T'])
# 11^#STKP 11WHRAO* 11EUFRPB 11LGTSDZ
# PRAOERBGS
def test_parse_strokes():
data = bytearray()
# SAT
a = 0b11001000
b = 0b11000100
c = 0b11000000
d = 0b11001000
data.extend([a, b, c, d])
# PRAOERBGS
a = 0b11000001
b = 0b11001110
c = 0b11100101
d = 0b11010100
data.extend([a, b, c, d])
for result, expected in zip(
stentura._parse_strokes(bytes(data)),
[['S-', 'A-', '-T'],
['P-', 'R-', 'A-', 'O-', '-E', '-R', '-B', '-G', '-S']],
):
assert sorted(result) == sorted(expected)
def test_make_request():
buf = bytearray(range(256))
seq = 2
action = stentura._OPEN
p1, p2, p3, p4, p5 = 1, 2, 3, 4, 5
p = stentura._make_request(buf, action, seq, p1, p2, p3, p4, p5)
for_crc = [seq, 18, 0, action, 0, p1, 0, p2, 0, p3, 0, p4, 0, p5, 0]
crc = stentura._crc(for_crc)
expected = bytearray([1] + for_crc + [crc & 0xFF, crc >> 8])
assert p == expected
# Now with data.
data = b'Testing Testing 123'
p = stentura._make_request(buf, action, seq, p1, p2, p3, p4, p5, data)
length = 18 + len(data) + 2
for_crc = [seq, length & 0xFF, length >> 8, action, 0,
p1, 0, p2, 0, p3, 0, p4, 0, p5, 0]
crc = stentura._crc(for_crc)
data_crc = stentura._crc(data)
expected = bytearray([1] + for_crc + [crc & 0xFF, crc >> 8])
expected.extend(data)
expected.extend([data_crc & 0xFF, data_crc >> 8])
assert p == expected
def test_make_open():
buf = bytearray(range(32)) # Start with junk in the buffer.
seq = 79
drive = b'A'
filename = b'REALTIME.000'
p = stentura._make_open(buf, seq, drive, filename)
for_crc = [seq, 20 + len(filename), 0, stentura._OPEN, 0, ord(drive),
0, 0, 0, 0, 0, 0, 0, 0, 0]
crc = stentura._crc(for_crc)
data_crc = stentura._crc(filename)
expected = bytearray([1] + for_crc + [crc & 0xFF, crc >> 8])
expected.extend(filename)
expected.extend([data_crc & 0xFF, data_crc >> 8])
assert p == expected
def test_make_read():
buf = bytearray(range(32)) # Start with junk in the buffer.
seq = 32
block = 1
byte = 8
length = 20
p = stentura._make_read(buf, seq, block, byte, length)
for_crc = [seq, 18, 0, stentura._READC, 0, 1, 0, 0, 0, length, 0,
block, 0, byte, 0]
crc = stentura._crc(for_crc)
expected = bytearray([1] + for_crc + [crc & 0xFF, crc >> 8])
assert p == expected
def test_make_reset():
buf = bytearray(range(32)) # Start with junk in the buffer.
seq = 67
p = stentura._make_reset(buf, seq)
for_crc = [seq, 18, 0, stentura._RESET, 0] + ([0] * 10)
crc = stentura._crc(for_crc)
expected = bytearray([1] + for_crc + [crc & 0xFF, crc >> 8])
assert p == expected
VALIDATE_RESPONSE_TESTS = (
("valid, no data",
make_response(5, 9, 1, 2, 3), True),
("valid, data",
make_response(5, 9, 1, 2, 3, data=b"hello"), True),
("short",
make_response(5, 9, 1, 2, 3)[:12], False),
("length long",
make_response(5, 9, 1, 2, 3, length=15), False),
("length short",
make_response(5, 9, 1, 2, 3, data=b'foo', length=15), False),
("bad data",
make_response(5, 9, 1, 2, 3) + b'1', False),
("bad crc",
make_response(5, 9, 1, 2, 3)[:-1] + b'1', False),
("bad data crc",
make_response(5, 9, 1, 2, 3, data=b'foo')[:-1] + b'1', False)
)
@pytest.mark.parametrize(
'packet, valid',
[t[1:] for t in VALIDATE_RESPONSE_TESTS],
ids=[t[0] for t in VALIDATE_RESPONSE_TESTS],
)
def test_validate_response(packet, valid):
assert stentura._validate_response(packet) == valid
def test_read_data_simple():
class MockPort:
def read(self, count):
if count != 5:
raise Exception("Incorrect number read.")
return b"12345"
port = MockPort()
buf = bytearray([0] * 20)
count = stentura._read_data(port, threading.Event(), buf, 0, 5)
assert count == 5
assert buf == b'12345' + (b'\x00' * 15)
# Test the offset parameter.
count = stentura._read_data(port, threading.Event(), buf, 4, 5)
assert buf == b'123412345' + (b'\x00' * 11)
def test_read_data_stop_set():
class MockPort:
def read(self, count):
return b"0000"
buf = bytearray()
event = threading.Event()
event.set()
with pytest.raises(stentura._StopException):
stentura._read_data(MockPort(), event, buf, 0, 4)
def test_read_data_timeout():
class MockPort:
def read(self, count):
# When serial time out occurs read() returns
# less characters as requested
return "123"
port = MockPort()
buf = bytearray()
with pytest.raises(stentura._TimeoutException):
stentura._read_data(port, threading.Event(), buf, 0, 4)
def test_read_packet_simple():
class MockPort:
def __init__(self, packet):
self._packet = packet
def read(self, count):
requested_bytes = self._packet[0:count]
self._packet = self._packet[count:]
return requested_bytes
buf = bytearray(256)
for packet in [make_response(1, 2, 3, 4, 5),
make_response(1, 2, 3, 4, 5, b"hello")]:
port = MockPort(packet)
response = stentura._read_packet(port, threading.Event(), buf)
assert response == packet
def test_read_packet_fail():
class MockPort:
def __init__(self, data_section_length=0, set1=False, set2=False,
give_too_much_data=False, give_timeout=False):
self._set1 = set1
self._set2 = set2
self._read1 = False
self._read2 = False
self.event = threading.Event()
self._give_timeout = give_timeout
self._data = ([1, 0, data_section_length + 4, 0] +
[0] * data_section_length)
if give_too_much_data:
self._data.append(0)
self._data = bytearray(self._data)
def read(self, count):
if not self._read1:
self._read1 = True
if self._set1:
self.event.set()
elif not self._read2:
self._read2 = True
if self._set2:
self.event.set()
else:
raise Exception("Already read data.")
if self._give_timeout and len(self._data) == count:
# If read() returns less bytes what was requested,
# it indicates a timeout.
count -= 1
requested_bytes = self._data[0:count]
self._data = self._data[count:]
return requested_bytes
buf = bytearray()
with pytest.raises(stentura._StopException):
port = MockPort(set1=True)
stentura._read_packet(port, port.event, buf)
with pytest.raises(stentura._StopException):
port = MockPort(data_section_length=30, set2=True)
stentura._read_packet(port, port.event, buf)
with pytest.raises(stentura._TimeoutException):
port = MockPort(give_timeout=True)
stentura._read_packet(port, port.event, buf)
with pytest.raises(stentura._TimeoutException):
port = MockPort(data_section_length=30, give_timeout=True)
stentura._read_packet(port, port.event, buf)
with pytest.raises(stentura._ProtocolViolationException):
port = MockPort(give_too_much_data=True)
stentura._read_packet(port, port.event, buf)
def test_write_to_port():
class MockPort:
def __init__(self, chunk):
self._chunk = chunk
self.data = b''
def write(self, data):
data = data[:self._chunk]
self.data += data
return len(data)
data = bytearray(range(20))
# All in one shot.
port = MockPort(20)
stentura._write_to_port(port, data)
assert data == port.data
# In parts.
port = MockPort(5)
stentura._write_to_port(port, data)
assert data == port.data
def test_send_receive():
event = threading.Event()
buf, seq, action = bytearray(256), 5, stentura._OPEN
request = stentura._make_request(bytearray(256), stentura._OPEN, seq)
correct_response = make_response(seq, action)
wrong_seq = make_response(seq - 1, action)
wrong_action = make_response(seq, action + 1)
bad_response = make_response(seq, action, data=b"foo", length=15)
# Correct response first time.
responses = [correct_response]
port = MockPacketPort(responses)
response = stentura._send_receive(port, event, request, buf)
assert response == correct_response
# Timeout once then correct response.
responses = [b'', correct_response]
port = MockPacketPort(responses)
response = stentura._send_receive(port, event, request, buf)
assert response == correct_response
# Wrong sequence number then correct response.
responses = [wrong_seq, correct_response]
port = MockPacketPort(responses)
response = stentura._send_receive(port, event, request, buf)
assert response == correct_response
# No correct responses. Also make sure max_retries is honored.
max_tries = 6
responses = [b''] * max_tries
port = MockPacketPort(responses)
with pytest.raises(stentura._ConnectionLostException):
stentura._send_receive(port, event, request, buf, max_tries)
assert max_tries == port.writes
# Wrong action.
responses = [wrong_action]
port = MockPacketPort(responses)
with pytest.raises(stentura._ProtocolViolationException):
stentura._send_receive(port, event, request, buf)
# Bad packet.
responses = [bad_response]
port = MockPacketPort(responses)
with pytest.raises(stentura._ProtocolViolationException):
stentura._send_receive(port, event, request, buf)
# Stopped.
responses = ['']
event.set()
port = MockPacketPort(responses)
with pytest.raises(stentura._StopException):
stentura._send_receive(port, event, request, buf)
def test_sequence_counter():
seq = stentura._SequenceCounter()
actual = [seq() for x in range(512)]
expected = list(range(256)) * 2
assert actual == expected
seq = stentura._SequenceCounter(67)
actual = [seq() for x in range(512)]
expected = list(range(67, 256)) + list(range(256)) + list(range(67))
assert actual == expected
def test_read():
request_buf = bytearray(256)
response_buf = bytearray(256)
stroke_buf = bytearray(256)
event = threading.Event()
tests = ([0b11000001] * (3 * 512 + 28), [0b11010101] * 4,
[0b11000010] * 8)
for data in tests:
data = bytearray(data)
requests, responses = make_readc_packets(data)
port = MockPacketPort(responses, requests)
seq = stentura._SequenceCounter()
block, byte = 0, 0
block, byte, response = stentura._read(port, event, seq, request_buf,
response_buf, stroke_buf, block, byte)
assert data == bytes(response)
assert block == len(data) // 512
assert byte == len(data) % 512
def test_loop():
class Event:
def __init__(self, count, data, stop=False):
self.count = count
self.data = data
self.stop = stop
def __repr__(self):
return '<{}, {}, {}>'.format(self.count, self.data, self.stop)
class MockPort:
def __init__(self, events=[]):
self._file = b''
self._out = b''
self._is_open = False
self.event = threading.Event()
self.count = 0
self.events = [Event(*x) for x in
sorted(events, key=lambda x: x[0])]
def write(self, request):
# Process the packet and put together a response.
p = parse_request(request)
if p['action'] == stentura._OPEN:
self._out = make_response(p['seq'], p['action'])
self._is_open = True
elif p['action'] == stentura._READC:
if not self._is_open:
raise Exception("no open")
length, block, byte = p['p3'], p['p4'], p['p5']
seq = p['seq']
action = stentura._READC
start = block * 512 + byte
end = start + length
data = self._file[start:end]
self._out = make_response(seq, action, p1=len(data),
data=data)
while self.events and self.events[0].count <= self.count:
event = self.events.pop(0)
self.append(event.data)
if event.stop:
self.event.set()
self.count += 1
return len(request)
def read(self, count):
requested_bytes = self._out[0:count]
self._out = self._out[count:]
return requested_bytes
def append(self, data):
self._file += data
return self
def flushInput(self):
pass
def flushOutput(self):
pass
data1 = bytearray([0b11001010] * 4 * 9)
data1_trans = [['S-', 'K-', 'R-', 'O-', '-F', '-P', '-T', '-D']] * 9
data2 = bytearray([0b11001011] * 4 * 30)
tests = [
# No inputs but nothing crashes either.
(MockPort([(30, b'', True)]), []),
# A few strokes.
(MockPort([(23, data1), (43, b'', True)]), data1_trans),
# Ignore data that's there before we started.
(MockPort([(46, b'', True)]).append(data2), []),
# Ignore data that was there and also parse some strokes.
(MockPort([(25, data1), (36, b'', True)]).append(data2), data1_trans)
]
for test in tests:
read_data = []
def callback(data):
read_data.append(data)
port = test[0]
expected = test[1]
ready_called = [False]
def ready():
ready_called[0] = True
try:
ready_called[0] = False
stentura._loop(port, port.event, callback, ready, timeout=0)
except stentura._StopException:
pass
assert read_data == expected
assert ready_called[0]
# TODO: add a test on the machine itself with mocks
| 17,617
|
Python
|
.py
| 459
| 30.228758
| 90
| 0.580658
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,658
|
test_key_combo.py
|
openstenoproject_plover/test/test_key_combo.py
|
import inspect
import itertools
import pytest
from plover.key_combo import parse_key_combo
def generate_combo_tests(test_id, *params, key_name_to_key_code=None):
yield ('id', test_id)
if key_name_to_key_code is not None:
yield ('key_name_to_key_code', key_name_to_key_code)
for iterables in params:
iterables = [
i if isinstance(i, (tuple, list)) else (i,)
for i in iterables
]
if len(iterables) < 2:
iterables.append(('',))
for combo_string, parse_result, in itertools.product(*iterables):
yield (
('parse', combo_string, parse_result),
)
# Test directives:
# - id TEST_IDENTIFIER
# - key_name_to_key_code KEY_NAME_TO_KEY_CODE_FUNCTION
# - parse COMBO_STRING KEY_EVENTS
KEY_COMBO_TESTS = tuple(itertools.chain(
(
('key_name_to_key_code', lambda k: k),
),
# No-op.
generate_combo_tests(
'no-op',
(('', ' '), ''),
),
# Syntax error:
generate_combo_tests(
'syntax_error',
((
# - invalid character
'Return,',
'Return&',
'Ret. urn <',
'exclam ! foo',
'shift[a]',
# - unbalanced `)`
') arg',
'arg )',
'arg())',
'arg(x) )',
# - unbalanced `(`
'test(',
'( grr',
'foo ( bar',
'foo (bar ( ',
'foo ((',
# - [-+]key() is not valid
'+foo ()',
'+foo()',
), SyntaxError),
),
# Pressing an already pressed key.
generate_combo_tests(
'already_pressed',
((
'foo(foo)',
'Foo(foO)',
'foo(fOo(arg))',
'foo(bar(Foo))',
'foo(bar(foo(x)))',
), ValueError),
),
# Stacking.
generate_combo_tests(
'stacking',
# 1 is not a valid identifier, but still a valid key name.
('1', '+1 -1'),
(('Shift_l', 'SHIFT_L'), '+shift_l -shift_l'),
# Case does not matter.
(('a', ' A '), '+a -a'),
(('a(b c)', 'a ( b c )'), '+a +b -b +c -c -a'),
(('a(bc)', ' a( Bc )'), '+a +bc -bc -a'),
(('a(bc(d)e f(g) h())i j'),
'+a +bc +d -d -bc +e -e +f +g -g -f +h -h -a +i -i +j -j'),
(('foo () bar ( foo a b c (d))', 'fOo () Bar ( FOO a B c (D))'),
'+foo -foo +bar +foo -foo +a -a +b -b +c +d -d -c -bar'),
),
# Invalid key name.
generate_combo_tests(
'invalid_key',
('1 (c) 2 bad 3 (a b c)', ValueError),
key_name_to_key_code={c: c for c in '123abc'}.get,
),
# Same key code, multiple key names.
generate_combo_tests(
'aliasing',
('1 exclam', '+10 -10 +10 -10'),
(('1 ( exclam )', 'exclam(1)'), ValueError),
key_name_to_key_code={'1': '10', 'exclam': '10'}.get,
),
))
def parametrize(tests):
key_name_to_key_code = None
test_id = None
args = []
ids = []
for t in tests:
if t[0] == 'key_name_to_key_code':
key_name_to_key_code = t[1]
elif t[0] == 'id':
test_id = t[1]
else:
assert key_name_to_key_code is not None
assert test_id is not None
args.append((key_name_to_key_code, t))
ids.append(test_id)
return pytest.mark.parametrize(
('key_name_to_key_code', 'instructions'),
args, ids=ids
)
@parametrize(KEY_COMBO_TESTS)
def test_key_combo(key_name_to_key_code, instructions):
def repr_expected(result):
assert isinstance(result, str)
return [s.strip() for s in result.split()]
def repr_key_events(events):
assert isinstance(events, list)
return ['%s%s' % ('+' if pressed else '-', key)
for key, pressed in events]
for action, *args in instructions:
if action == 'parse':
combo_string, key_events = args
if inspect.isclass(key_events):
with pytest.raises(key_events):
parse_key_combo(combo_string, key_name_to_key_code=key_name_to_key_code)
else:
assert repr_key_events(parse_key_combo(combo_string, key_name_to_key_code=key_name_to_key_code)) == repr_expected(key_events)
else:
raise ValueError(args[0])
| 4,424
|
Python
|
.py
| 136
| 23.683824
| 141
| 0.494966
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,659
|
test_config.py
|
openstenoproject_plover/test/test_config.py
|
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
"""Unit tests for config.py."""
from ast import literal_eval
from contextlib import ExitStack
from pathlib import Path
from site import USER_BASE
from string import Template
import inspect
import json
import os
import subprocess
import sys
import textwrap
import appdirs
import pytest
from plover import config
from plover.config import DictionaryConfig
from plover.oslayer.config import PLATFORM
from plover.machine.keyboard import Keyboard
from plover.machine.keymap import Keymap
from plover.misc import expand_path
from plover.registry import Registry
from plover.system import english_stenotype
def dedent_strip(s):
return textwrap.dedent(s).strip()
def dict_replace(d, update):
d = dict(d)
d.update(update)
return d
class FakeMachine:
KEYMAP_MACHINE_TYPE = 'Keyboard'
def get_keys():
return Keyboard.get_keys()
def get_actions():
return Keyboard.get_actions()
@staticmethod
def get_option_info():
bool_converter = lambda s: s == 'True'
return {
'stroption1': (None, str),
'intoption1': (3, int),
'stroption2': ('abc', str),
'floatoption1': (1, float),
'booloption1': (True, bool_converter),
'booloption2': (False, bool_converter)
}
FakeMachine.DEFAULT_OPTIONS = {k: v[0] for k, v in FakeMachine.get_option_info().items()}
class FakeSystem:
KEYS = english_stenotype.KEYS
IMPLICIT_HYPHEN_KEYS = ()
SUFFIX_KEYS = ()
NUMBER_KEY = english_stenotype.NUMBER_KEY
NUMBERS = english_stenotype.NUMBERS
UNDO_STROKE_STENO = english_stenotype.UNDO_STROKE_STENO
ORTHOGRAPHY_RULES = []
ORTHOGRAPHY_RULES_ALIASES = {}
ORTHOGRAPHY_WORDLIST = None
KEYMAPS = {
'Faky faky': english_stenotype.KEYMAPS['Keyboard'],
}
DEFAULT_DICTIONARIES = ('utilisateur.json', 'principal.json')
def test_config_dict():
short_path = os.path.normcase(os.path.normpath('~/foo/bar'))
full_path = os.path.normcase(os.path.expanduser(os.path.normpath('~/foo/bar')))
# Path should be expanded.
assert DictionaryConfig(short_path).path == full_path
assert DictionaryConfig(full_path).path == full_path
# Short path is available through `short_path`.
assert DictionaryConfig(full_path).short_path == short_path
assert DictionaryConfig(short_path).short_path == short_path
# Enabled default to True.
assert DictionaryConfig('foo').enabled
assert not DictionaryConfig('foo', False).enabled
# When converting to a dict (for dumping to JSON),
# a dictionary with the shortened path is used.
assert DictionaryConfig(full_path).to_dict() == \
{'path': short_path, 'enabled': True}
assert DictionaryConfig(short_path, False).to_dict() == \
{'path': short_path, 'enabled': False}
# Test from_dict creation helper.
assert DictionaryConfig.from_dict({'path': short_path}) == \
DictionaryConfig(short_path)
assert DictionaryConfig.from_dict({'path': full_path, 'enabled': False}) == \
DictionaryConfig(short_path, False)
if PLATFORM == 'win':
ABS_PATH = os.path.normcase(r'c:/foo/bar')
else:
ABS_PATH = '/foo/bar'
DEFAULT_KEYMAP = Keymap(Keyboard.get_keys(), english_stenotype.KEYS + Keyboard.get_actions())
DEFAULT_KEYMAP.set_mappings(english_stenotype.KEYMAPS['Keyboard'])
DEFAULTS = {
'space_placement': 'Before Output',
'start_attached': False,
'start_capitalized': False,
'undo_levels': config.DEFAULT_UNDO_LEVELS,
'log_file_name': expand_path('strokes.log'),
'enable_stroke_logging': False,
'enable_translation_logging': False,
'start_minimized': False,
'show_stroke_display': False,
'show_suggestions_display': False,
'translation_frame_opacity': 100,
'classic_dictionaries_display_order': False,
'enabled_extensions': set(),
'auto_start': False,
'machine_type': 'Keyboard',
'machine_specific_options': { 'arpeggiate': False, 'first_up_chord_send': False },
'system_name': config.DEFAULT_SYSTEM_NAME,
'system_keymap': DEFAULT_KEYMAP,
'dictionaries': [DictionaryConfig(p) for p in english_stenotype.DEFAULT_DICTIONARIES]
}
CONFIG_TESTS = (
('defaults',
'''
''',
DEFAULTS,
{}, {},
'''
''',
),
('simple_options',
'''
[Output Configuration]
space_placement = After Output
start_attached = true
start_capitalized = yes
undo_levels = 42
''',
dict_replace(DEFAULTS, {
'space_placement': 'After Output',
'start_attached': True,
'start_capitalized': True,
'undo_levels': 42,
}),
{
'space_placement': 'Before Output',
'start_attached': False,
'start_capitalized': False,
'undo_levels': 200,
},
None,
'''
[Output Configuration]
space_placement = Before Output
start_attached = False
start_capitalized = False
undo_levels = 200
''',
),
('machine_options',
'''
[Machine Configuration]
auto_start = True
machine_type = keyboard
[Keyboard]
arpeggiate = True
''',
dict_replace(DEFAULTS, {
'auto_start': True,
'machine_specific_options': dict_replace(
DEFAULTS['machine_specific_options'], {
'arpeggiate': True,
}),
}),
{
'machine_type': 'faKY FAky',
'machine_specific_options': {
'stroption1': 42,
'floatoption1': '4.2',
'booloption1': False,
},
'system_name': 'FAUX SYSTÈME',
'system_keymap': str(DEFAULT_KEYMAP),
},
{
'machine_type': 'Faky faky',
'machine_specific_options': dict_replace(
FakeMachine.DEFAULT_OPTIONS, {
'stroption1': '42',
'floatoption1': 4.2,
'booloption1': False,
}),
'system_name': 'Faux système',
'system_keymap': DEFAULT_KEYMAP,
'dictionaries': [DictionaryConfig('utilisateur.json'),
DictionaryConfig('principal.json')],
},
'''
[Machine Configuration]
auto_start = True
machine_type = Faky faky
[Keyboard]
arpeggiate = True
[Faky faky]
booloption1 = False
booloption2 = False
floatoption1 = 4.2
intoption1 = 3
stroption1 = 42
stroption2 = abc
[System]
name = Faux système
[System: Faux système]
keymap[faky faky] = %s
''' % DEFAULT_KEYMAP,
),
('machine_bool_option',
'''
''',
DEFAULTS,
{
'machine_specific_options': {
'arpeggiate': True,
},
},
{
'machine_specific_options': {
'arpeggiate': True,
'first_up_chord_send': False,
}
},
'''
[Keyboard]
arpeggiate = True
first_up_chord_send = False
'''
),
('legacy_dictionaries_1',
'''
[Dictionary Configuration]
dictionary_file = main.json
''',
dict_replace(DEFAULTS, {
'dictionaries': [DictionaryConfig('main.json')],
}),
{
'dictionaries': ['user.json', 'main.json'],
},
{
'dictionaries': [DictionaryConfig('user.json'),
DictionaryConfig('main.json')],
},
'''
[System: English Stenotype]
dictionaries = [{"enabled": true, "path": "user.json"}, {"enabled": true, "path": "main.json"}]
'''
),
('legacy_dictionaries_2',
'''
[Dictionary Configuration]
dictionary_file1 = main.json
dictionary_file3 = user.json
''',
dict_replace(DEFAULTS, {
'dictionaries': [DictionaryConfig('user.json'),
DictionaryConfig('main.json')],
}),
{
'dictionaries': ['user.json', 'commands.json', 'main.json'],
},
{
'dictionaries': [DictionaryConfig('user.json'),
DictionaryConfig('commands.json'),
DictionaryConfig('main.json')],
},
'''
[System: English Stenotype]
dictionaries = [{"enabled": true, "path": "user.json"}, {"enabled": true, "path": "commands.json"}, {"enabled": true, "path": "main.json"}]
'''
),
('dictionaries',
'''
[System: English Stenotype]
dictionaries = %s
''' % json.dumps([os.path.join(ABS_PATH, 'user.json'),
"english/main.json"]),
dict_replace(DEFAULTS, {
'dictionaries': [DictionaryConfig(os.path.join(ABS_PATH, 'user.json')),
DictionaryConfig('english/main.json')],
}),
{
'dictionaries': [DictionaryConfig(os.path.join(ABS_PATH, 'user.json')),
DictionaryConfig('english/main.json')],
},
{
'dictionaries': [DictionaryConfig(os.path.join(ABS_PATH, 'user.json')),
DictionaryConfig('english/main.json')],
},
'''
[System: English Stenotype]
dictionaries = %s
''' % json.dumps([{"enabled": True, "path": os.path.join(ABS_PATH, 'user.json')},
{"enabled": True, "path": os.path.join('english', 'main.json')}],
sort_keys=True)
),
('invalid_config',
'''
[Startup]
Start Minimized = True
[Machine Configuratio
machine_type = Faky faky
''',
config.InvalidConfigurationError,
{},
{},
'''
'''
),
('invalid_keymap_1',
'''
[System: English Stenotype]
keymap[keyboard] = [["_-"]]
''',
DEFAULTS,
{},
{},
None,
),
('invalid_keymap_2',
'''
[System: English Stenotype]
keymap[keyboard] = 42
''',
DEFAULTS,
{},
{},
None,
),
('invalid_options_1',
'''
[System]
name = foobar
''',
DEFAULTS,
{},
{},
None,
),
('invalid_options_2',
'''
[Machine Configuration]
machine_type = Faky faky
[Faky faky]
booloption2 = 50
intoption1 = 3.14
''',
dict_replace(DEFAULTS, {
'machine_type': 'Faky faky',
'machine_specific_options': FakeMachine.DEFAULT_OPTIONS,
}),
{},
{},
None,
),
('invalid_options_3',
'''
[Output Configuration]
undo_levels = foobar
''',
DEFAULTS,
{},
{},
None,
),
('invalid_update_1',
'''
[Translation Frame]
opacity = 75
''',
dict_replace(DEFAULTS, {
'translation_frame_opacity': 75,
}),
{
'start_minimized': False,
'show_stroke_display': True,
'translation_frame_opacity': 101,
},
config.InvalidConfigOption,
None,
),
('invalid_update_2',
'''
''',
DEFAULTS,
{
'machine_type': 'FakeMachine',
'machine_specific_options': dict_replace(
FakeMachine.DEFAULT_OPTIONS, {
'stroption1': '42',
}),
},
config.InvalidConfigOption,
None,
),
('setitem_valid',
'''
''',
DEFAULTS,
('auto_start', True),
None,
'''
[Machine Configuration]
auto_start = True
'''
),
('setitem_invalid',
'''
''',
DEFAULTS,
('undo_levels', -42),
config.InvalidConfigOption,
'''
'''
),
)
@pytest.mark.parametrize(('original_contents', 'original_config',
'config_update', 'validated_config_update',
'resulting_contents'),
[t[1:] for t in CONFIG_TESTS],
ids=[t[0] for t in CONFIG_TESTS])
def test_config(original_contents, original_config,
config_update, validated_config_update,
resulting_contents, monkeypatch, tmpdir):
registry = Registry()
registry.register_plugin('machine', 'Keyboard', Keyboard)
registry.register_plugin('machine', 'Faky faky', FakeMachine)
registry.register_plugin('system', 'English Stenotype', english_stenotype)
registry.register_plugin('system', 'Faux système', FakeSystem)
monkeypatch.setattr('plover.config.registry', registry)
config_file = tmpdir / 'config.cfg'
# Check initial contents.
config_file.write_text(original_contents, encoding='utf-8')
cfg = config.Config(config_file.strpath)
if inspect.isclass(original_config):
with pytest.raises(original_config):
cfg.load()
original_config = dict(DEFAULTS)
cfg.clear()
else:
cfg.load()
cfg_dict = cfg.as_dict()
for name, value in original_config.items():
assert cfg[name] == value
assert cfg_dict[name] == value
# Check updated contents.
with ExitStack() as stack:
if inspect.isclass(validated_config_update):
stack.enter_context(pytest.raises(validated_config_update))
validated_config_update = None
elif validated_config_update is None:
validated_config_update = config_update
if isinstance(config_update, dict):
cfg.update(**config_update)
else:
key, value = config_update
cfg[key] = value
if validated_config_update is not None:
if isinstance(validated_config_update, dict):
cfg_dict.update(validated_config_update)
else:
key, value = validated_config_update
cfg_dict[key] = value
assert cfg.as_dict() == cfg_dict
config_file.write_text('', encoding='utf-8')
cfg.save()
if resulting_contents is None:
resulting_contents = original_contents
assert config_file.read_text(encoding='utf-8').strip() == dedent_strip(resulting_contents)
CONFIG_MISSING_INTS_TESTS = (
('int_option',
config.OUTPUT_CONFIG_SECTION,
'undo_levels',
config.DEFAULT_UNDO_LEVELS,
),
('opacity_option',
'Translation Frame',
'translation_frame_opacity',
100,
),
)
@pytest.mark.parametrize(('which_section', 'which_option', 'fixed_value'),
[t[1:] for t in CONFIG_MISSING_INTS_TESTS],
ids=[t[0] for t in CONFIG_MISSING_INTS_TESTS])
def test_config_missing_ints(which_section, which_option, fixed_value,
monkeypatch, tmpdir, caplog):
registry = Registry()
registry.register_plugin('machine', 'Keyboard', Keyboard)
registry.register_plugin('system', 'English Stenotype', english_stenotype)
monkeypatch.setattr('plover.config.registry', registry)
config_file = tmpdir / 'config.cfg'
# Make config with the appropriate empty section
contents = f'''
[{which_section}]
'''
config_file.write_text(contents, encoding='utf-8')
cfg = config.Config(config_file.strpath)
cfg.load()
# Try to access an option under that section
# (should trigger validation)
assert cfg[which_option] == fixed_value
# Ensure that missing options are handled
assert 'InvalidConfigOption: None' not in caplog.text
# ... or any that there aren't any unhandled errors
for record in caplog.records:
assert record.levelname != 'ERROR'
CONFIG_DIR_TESTS = (
# Default to `user_config_dir`.
('''
$user_config_dir/foo.cfg
$user_data_dir/bar.cfg
$cwd/config.cfg
''', '$user_config_dir'),
# Use cwd if config file is present, override other directories.
('''
$user_config_dir/plover.cfg
$user_data_dir/plover.cfg
$cwd/plover.cfg
''', '$cwd'),
# plover.cfg must be a file.
('''
$user_data_dir/plover.cfg
$cwd/plover.cfg/config
''', '$user_data_dir'),
)
if appdirs.user_data_dir() != appdirs.user_config_dir():
CONFIG_DIR_TESTS += (
# `user_config_dir` take precedence over `user_data_dir`.
('''
$user_config_dir/plover.cfg
$user_data_dir/plover.cfg
$cwd/config.cfg
''', '$user_config_dir'),
# But `user_data_dir` is still used when applicable.
('''
$user_config_dir/plover.cfg/conf
$user_data_dir/plover.cfg
$cwd/config.cfg
''', '$user_data_dir'),
)
@pytest.mark.parametrize(('tree', 'expected_config_dir'), CONFIG_DIR_TESTS)
def test_config_dir(tree, expected_config_dir, tmpdir):
# Create fake home/cwd directories.
home = tmpdir / 'home'
home.mkdir()
cwd = tmpdir / 'cwd'
cwd.mkdir()
directories = {
'tmpdir': str(tmpdir),
'home': str(home),
'cwd': str(cwd),
}
# Setup environment.
env = dict(os.environ)
if PLATFORM == 'win':
env['USERPROFILE'] = str(home)
else:
env['HOME'] = str(home)
env['PYTHONUSERBASE'] = USER_BASE
# Ensure XDG_xxx environment variables don't screw up our isolation.
for k in list(env):
if k.startswith('XDG_'):
del env[k]
# Helpers.
def pyeval(script):
return literal_eval(subprocess.check_output(
(sys.executable, '-c', script),
cwd=str(cwd), env=env).decode())
def path_expand(path):
return Template(path.replace('/', os.sep)).substitute(**directories)
# Find out user_config_dir/user_data_dir locations.
directories.update(pyeval(dedent_strip(
'''
import appdirs, os
print(repr({
'user_config_dir': appdirs.user_config_dir('plover'),
'user_data_dir': appdirs.user_data_dir('plover'),
}))
''')))
# Create initial tree.
for filename_template in dedent_strip(tree).replace('/', os.sep).split('\n'):
filename = Path(path_expand(filename_template))
filename.parent.mkdir(parents=True, exist_ok=True)
filename.write_text('pouet')
# Check plover.oslayer.config.CONFIG_DIR is correctly set.
config_dir = pyeval(dedent_strip(
'''
__import__('sys').path.insert(0, %r)
from plover.oslayer.config import CONFIG_DIR
print(repr(CONFIG_DIR))
''' % str(Path(config.__file__).parent.parent)))
expected_config_dir = path_expand(expected_config_dir)
assert config_dir == expected_config_dir
| 18,429
|
Python
|
.py
| 593
| 23.82968
| 144
| 0.593754
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,660
|
py37compat.py
|
openstenoproject_plover/test/py37compat.py
|
import sys
if sys.version_info[:2] <= (3, 7):
import mock
else:
from unittest import mock
| 99
|
Python
|
.py
| 5
| 17
| 34
| 0.688172
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,661
|
test_keyboard.py
|
openstenoproject_plover/test/test_keyboard.py
|
import pytest
from plover import system
from plover.machine.keyboard import Keyboard
from plover.machine.keymap import Keymap
from plover.oslayer.keyboardcontrol import KeyboardCapture
from .py37compat import mock
def send_input(capture, key_events):
for evt in key_events.strip().split():
if evt.startswith('+'):
capture.key_down(evt[1:])
elif evt.startswith('-'):
capture.key_up(evt[1:])
else:
capture.key_down(evt)
capture.key_up(evt)
@pytest.fixture
def capture():
capture = mock.MagicMock(spec=KeyboardCapture)
with mock.patch('plover.machine.keyboard.KeyboardCapture', new=lambda: capture):
yield capture
@pytest.fixture(params=[{'arpeggiate': False, 'first_up_chord_send': False}])
def machine(request, capture):
machine = Keyboard(request.param)
keymap = Keymap(Keyboard.KEYS_LAYOUT.split(),
system.KEYS + Keyboard.ACTIONS)
keymap.set_mappings(system.KEYMAPS['Keyboard'])
machine.set_keymap(keymap)
return machine
arpeggiate = pytest.mark.parametrize('machine', [{'arpeggiate': True, 'first_up_chord_send': False}], indirect=True)
first_up_chord_send = pytest.mark.parametrize('machine', [{'arpeggiate': False, 'first_up_chord_send': True}], indirect=True)
"""
These are decorators to be applied on test functions to modify the machine configuration.
Note that at the moment it's not possible to apply both at the same time.
"""
@pytest.fixture
def strokes(machine):
strokes = []
machine.add_stroke_callback(strokes.append)
return strokes
def test_lifecycle(capture, machine, strokes):
# Start machine.
machine.start_capture()
assert capture.mock_calls == [
mock.call.start(),
mock.call.suppress(()),
]
capture.reset_mock()
machine.set_suppression(True)
suppressed_keys = dict(machine.keymap.get_bindings())
del suppressed_keys['space']
assert strokes == []
assert capture.mock_calls == [
mock.call.suppress(suppressed_keys.keys()),
]
# Trigger some strokes.
capture.reset_mock()
send_input(capture, '+a +h -a -h space w')
assert strokes == [
{'S-', '*'},
{'T-'},
]
assert capture.mock_calls == []
# Stop machine.
del strokes[:]
machine.stop_capture()
assert strokes == []
assert capture.mock_calls == [
mock.call.suppress(()),
mock.call.cancel(),
]
def test_unfinished_stroke_1(capture, machine, strokes):
machine.start_capture()
send_input(capture, '+a +q -a')
assert strokes == []
def test_unfinished_stroke_2(capture, machine, strokes):
machine.start_capture()
send_input(capture, '+a +r -a +a -r')
assert strokes == []
@arpeggiate
def test_arpeggiate_1(capture, machine, strokes):
machine.start_capture()
send_input(capture, 'a h space w')
assert strokes == [{'S-', '*'}]
@arpeggiate
def test_arpeggiate_2(capture, machine, strokes):
machine.start_capture()
send_input(capture, 'a +h +space -space -h w')
assert strokes == [{'S-', '*'}]
@first_up_chord_send
def test_first_up_chord_send(capture, machine, strokes):
machine.start_capture()
send_input(capture, '+a +w +l -l +l')
assert strokes == [{'S-', 'T-', '-G'}]
send_input(capture, '-l')
assert strokes == [{'S-', 'T-', '-G'}, {'S-', 'T-', '-G'}]
send_input(capture, '-a -w')
assert strokes == [{'S-', 'T-', '-G'}, {'S-', 'T-', '-G'}]
| 3,486
|
Python
|
.py
| 97
| 30.979381
| 125
| 0.655203
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,662
|
test_translation.py
|
openstenoproject_plover/test/test_translation.py
|
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
"""Unit tests for translation.py."""
from collections import namedtuple
import ast
import copy
import operator
from plover.oslayer.config import PLATFORM
from plover.steno import Stroke, normalize_steno
import pytest
from plover.steno_dictionary import StenoDictionary, StenoDictionaryCollection
from plover.translation import Translation, Translator, _State
from plover.translation import escape_translation, unescape_translation
from plover_build_utils.testing import parametrize, steno_to_stroke as stroke
if PLATFORM == 'mac':
BACK_STRING = '{#Alt_L(BackSpace)}{^}'
else:
BACK_STRING = '{#Control_L(BackSpace)}{^}'
def test_no_translation():
t = Translation([stroke('S'), stroke('T')], None)
assert t.strokes == [stroke('S'), stroke('T')]
assert t.rtfcre == ('S', 'T')
assert t.english is None
def test_translation():
t = Translation([stroke('S'), stroke('T')], 'translation')
assert t.strokes == [stroke('S'), stroke('T')]
assert t.rtfcre == ('S', 'T')
assert t.english == 'translation'
class TestTranslatorStateSize:
class FakeState(_State):
def __init__(self):
_State.__init__(self)
self.restrict_calls = []
def restrict_size(self, n):
self.restrict_calls.append(n)
def _check_size_call(self, size):
assert self.s.restrict_calls[-1] == size
def _check_no_size_call(self):
assert self.s.restrict_calls == []
def clear(self):
self.s.restrict_calls = []
def setup_method(self):
self.t = Translator()
self.s = type(self).FakeState()
self.t._state = self.s
self.d = StenoDictionary()
self.dc = StenoDictionaryCollection([self.d])
self.t.set_dictionary(self.dc)
@pytest.mark.parametrize('key', (
('S',),
('S', 'PT', '-Z', 'TOP'),
))
def test_dictionary_update_grows_size(self, key):
self.d[key] = 'key'
self.t.translate(stroke('T-'))
self._check_size_call(len(key))
def test_dictionary_update_no_grow(self):
self.t.set_min_undo_length(4)
self._check_size_call(4)
self.clear()
self.d[('S', 'T')] = 'nothing'
self.t.translate(stroke('T-'))
self._check_size_call(4)
def test_dictionary_update_shrink(self):
self.d[('S', 'T', 'P', '-Z', '-D')] = '1'
self.t.translate(stroke('T-'))
self._check_size_call(5)
self.clear()
self.d[('A', 'P')] = '2'
self.t.translate(stroke('T-'))
self._check_size_call(5)
self.clear()
del self.d[('S', 'T', 'P', '-Z', '-D')]
self.t.translate(stroke('T-'))
self._check_size_call(2)
def test_dictionary_update_no_shrink(self):
self.t.set_min_undo_length(7)
self.d[('S', 'T', 'P', '-Z', '-D')] = '1'
del self.d[('S', 'T', 'P', '-Z', '-D')]
self._check_size_call(7)
def test_translation_calls_restrict(self):
self.t.translate(stroke('S'))
self._check_size_call(0)
def test_listeners():
output1 = []
def listener1(undo, do, prev):
output1.append((undo, do, prev))
output2 = []
def listener2(undo, do, prev):
output2.append((undo, do, prev))
t = Translator()
s = stroke('S')
tr = Translation([s], None)
expected_output = [([], [tr], [tr])]
t.translate(s)
t.add_listener(listener1)
t.translate(s)
assert output1 == expected_output
del output1[:]
t.add_listener(listener2)
t.translate(s)
assert output1 == expected_output
assert output2 == expected_output
del output1[:]
del output2[:]
t.add_listener(listener2)
t.translate(s)
assert output1 == expected_output
assert output2 == expected_output
del output1[:]
del output2[:]
t.remove_listener(listener1)
t.translate(s)
assert output1 == []
assert output2 == expected_output
del output1[:]
del output2[:]
t.remove_listener(listener2)
t.translate(s)
assert output1 == []
assert output2 == []
def test_changing_state():
output = []
def listener(undo, do, prev):
prev = list(prev) if prev else None
output.append((undo, do, prev))
d = StenoDictionary()
d[('S', 'P')] = 'hi'
dc = StenoDictionaryCollection([d])
t = Translator()
t.set_dictionary(dc)
t.translate(stroke('T'))
t.translate(stroke('S'))
s = copy.deepcopy(t.get_state())
t.add_listener(listener)
expected = [([Translation([stroke('S')], None)],
[Translation([stroke('S'), stroke('P')], 'hi')],
[Translation([stroke('T')], None)])]
t.translate(stroke('P'))
assert output == expected
del output[:]
t.set_state(s)
t.translate(stroke('P'))
assert output == expected
del output[:]
t.clear_state()
t.translate(stroke('P'))
assert output == [([], [Translation([stroke('P')], None)], None)]
del output[:]
t.set_state(s)
t.translate(stroke('P'))
assert output == [([],
[Translation([stroke('P')], None)],
[Translation([stroke('S'), stroke('P')], 'hi')])]
def test_translator():
# It's not clear that this test is needed anymore. There are separate
# tests for _translate_stroke and test_translate_calls_translate_stroke
# makes sure that translate calls it properly. But since I already wrote
# this test I'm going to keep it.
class Output:
def __init__(self):
self._output = []
def write(self, undo, do, prev):
for t in undo:
self._output.pop()
for t in do:
if t.english:
self._output.append(t.english)
else:
self._output.append('/'.join(t.rtfcre))
def get(self):
return ' '.join(self._output)
def clear(self):
del self._output[:]
d = StenoDictionary()
out = Output()
t = Translator()
dc = StenoDictionaryCollection([d])
t.set_dictionary(dc)
t.add_listener(out.write)
t.translate(stroke('S'))
assert out.get() == 'S'
t.translate(stroke('T'))
assert out.get() == 'S T'
t.translate(stroke('*'))
assert out.get() == 'S'
t.translate(stroke('*'))
# Undo buffer ran out
assert out.get() == 'S ' + BACK_STRING
t.set_min_undo_length(3)
out.clear()
t.translate(stroke('S'))
assert out.get() == 'S'
t.translate(stroke('T'))
assert out.get() == 'S T'
t.translate(stroke('*'))
assert out.get() == 'S'
t.translate(stroke('*'))
assert out.get() == ''
out.clear()
d[('S',)] = 't1'
d[('T',)] = 't2'
d[('S', 'T')] = 't3'
t.translate(stroke('S'))
assert out.get() == 't1'
t.translate(stroke('T'))
assert out.get() == 't3'
t.translate(stroke('T'))
assert out.get() == 't3 t2'
t.translate(stroke('S'))
assert out.get() == 't3 t2 t1'
t.translate(stroke('*'))
assert out.get() == 't3 t2'
t.translate(stroke('*'))
assert out.get() == 't3'
t.translate(stroke('*'))
assert out.get() == 't1'
t.translate(stroke('*'))
assert out.get() == ''
t.translate(stroke('S'))
assert out.get() == 't1'
t.translate(stroke('T'))
assert out.get() == 't3'
t.translate(stroke('T'))
assert out.get() == 't3 t2'
d[('S', 'T', 'T')] = 't4'
d[('S', 'T', 'T', 'S')] = 't5'
t.translate(stroke('S'))
assert out.get() == 't5'
t.translate(stroke('*'))
assert out.get() == 't3 t2'
t.translate(stroke('*'))
assert out.get() == 't3'
t.translate(stroke('T'))
assert out.get() == 't4'
t.translate(stroke('S'))
assert out.get() == 't5'
t.translate(stroke('S'))
assert out.get() == 't5 t1'
t.translate(stroke('*'))
assert out.get() == 't5'
t.translate(stroke('*'))
assert out.get() == 't4'
t.translate(stroke('*'))
assert out.get() == 't3'
t.translate(stroke('*'))
assert out.get() == 't1'
t.translate(stroke('*'))
assert out.get() == ''
d.clear()
s = stroke('S')
t.translate(s)
t.translate(s)
t.translate(s)
t.translate(s)
s = stroke('*')
t.translate(s)
t.translate(s)
t.translate(s)
t.translate(s)
# Not enough undo to clear output.
assert out.get() == 'S ' + BACK_STRING
out.clear()
t.remove_listener(out.write)
t.translate(stroke('S'))
assert out.get() == ''
class TestState:
def setup_method(self):
self.a = Translation([stroke('S')], None)
self.b = Translation([stroke('T'), stroke('-D')], None)
self.c = Translation([stroke('-Z'), stroke('P'), stroke('T*')], None)
def test_prev_list0(self):
s = _State()
assert s.prev() is None
def test_prev_list1(self):
s = _State()
s.translations = [self.a]
assert s.prev() == [self.a]
def test_prev_list2(self):
s = _State()
s.translations = [self.a, self.b]
assert s.prev() == [self.a, self.b]
def test_prev_tail1(self):
s = _State()
s.translations = [self.a]
s.tail = self.b
assert s.prev() == [self.a]
def test_prev_tail0(self):
s = _State()
s.tail = self.b
assert s.prev() == [self.b]
def test_restrict_size_zero_on_empty(self):
s = _State()
s.restrict_size(0)
assert s.translations == []
assert s.tail is None
def test_restrict_size_zero_on_one_stroke(self):
s = _State()
s.translations = [self.a]
s.restrict_size(0)
assert s.translations == [self.a]
assert s.tail is None
def test_restrict_size_to_exactly_one_stroke(self):
s = _State()
s.translations = [self.a]
s.restrict_size(1)
assert s.translations == [self.a]
assert s.tail is None
def test_restrict_size_to_one_on_two_strokes(self):
s = _State()
s.translations = [self.b]
s.restrict_size(1)
assert s.translations == [self.b]
assert s.tail is None
def test_restrict_size_to_one_on_two_translations(self):
s = _State()
s.translations = [self.b, self.a]
s.restrict_size(1)
assert s.translations == [self.a]
assert s.tail == self.b
def test_restrict_size_to_one_on_two_translations_too_big(self):
s = _State()
s.translations = [self.a, self.b]
s.restrict_size(1)
assert s.translations == [self.b]
assert s.tail == self.a
def test_restrict_size_lose_translations(self):
s = _State()
s.translations = [self.a, self.b, self.c]
s.restrict_size(2)
assert s.translations == [self.c]
assert s.tail == self.b
def test_restrict_size_multiple_translations(self):
s = _State()
s.translations = [self.a, self.b, self.c]
s.restrict_size(5)
assert s.translations == [self.b, self.c]
assert s.tail == self.a
class TestTranslateStroke:
DICT_COLLECTION_CLASS = StenoDictionaryCollection
class CaptureOutput:
Output = namedtuple('Output', 'undo do prev')
def __init__(self):
self.output = []
def __call__(self, undo, new, prev):
prev = list(prev) if prev else None
self.output = self.Output(undo, new, prev)
def t(self, strokes):
"""A quick way to make a translation."""
strokes = [stroke(x) for x in strokes.split('/')]
key = tuple(s.rtfcre for s in strokes)
translation = self.dc.lookup(key)
return Translation(strokes, translation)
def lt(self, translations):
"""A quick way to make a list of translations."""
return [self.t(x) for x in translations.split()]
def define(self, key, value):
key = normalize_steno(key)
self.d[key] = value
def translate(self, steno):
self.tlor.translate(stroke(steno))
def _check_translations(self, expected):
# Hide from traceback on assertions (reduce output size for failed tests).
__tracebackhide__ = operator.methodcaller('errisinstance', AssertionError)
msg = '''
translations:
results: %s
expected: %s
''' % (self.s.translations, expected)
assert self.s.translations == expected, msg
def _check_output(self, undo, do, prev):
# Hide from traceback on assertions (reduce output size for failed tests).
__tracebackhide__ = operator.methodcaller('errisinstance', AssertionError)
msg = '''
output:
results: -%s
+%s
[%s]
expected: -%s
+%s
[%s]
''' % (self.o.output + (undo, do, prev))
assert self.o.output == (undo, do, prev), msg
def setup_method(self):
self.d = StenoDictionary()
self.dc = self.DICT_COLLECTION_CLASS([self.d])
self.s = _State()
self.o = self.CaptureOutput()
self.tlor = Translator()
self.tlor.set_dictionary(self.dc)
self.tlor.add_listener(self.o)
self.tlor.set_state(self.s)
def test_first_stroke(self):
self.translate('-B')
self._check_translations(self.lt('-B'))
self._check_output([], self.lt('-B'), None)
def test_second_stroke(self):
self.define('S/P', 'spiders')
self.s.translations = self.lt('S')
self.translate('-T')
self._check_translations(self.lt('S -T'))
self._check_output([], self.lt('-T'), self.lt('S'))
def test_second_stroke_tail(self):
self.s.tail = self.t('T/A/EU/L')
self.translate('-E')
self._check_translations(self.lt('E'))
self._check_output([], self.lt('E'), self.lt('T/A/EU/L'))
def test_with_translation_1(self):
self.define('S', 'is')
self.define('-T', 'that')
self.s.translations = self.lt('S')
self.tlor.set_min_undo_length(2)
self.translate('-T')
self._check_translations(self.lt('S -T'))
self._check_output([], self.lt('-T'), self.lt('S'))
assert self.o.output.do[0].english == 'that'
def test_with_translation_2(self):
self.define('S', 'is')
self.define('-T', 'that')
self.s.translations = self.lt('S')
self.tlor.set_min_undo_length(1)
self.translate('-T')
self._check_translations(self.lt('-T'))
self._check_output([], self.lt('-T'), self.lt('S'))
assert self.o.output.do[0].english == 'that'
def test_finish_two_translation(self):
self.define('S/T', 'hello')
self.s.translations = self.lt('S')
self.translate('T')
self._check_translations(self.lt('S/T'))
self._check_output(self.lt('S'), self.lt('S/T'), None)
assert self.o.output.do[0].english == 'hello'
assert self.o.output.do[0].replaced == self.lt('S')
def test_finish_three_translation(self):
self.define('S/T/-B', 'bye')
self.s.translations = self.lt('S T')
self.translate('-B')
self._check_translations(self.lt('S/T/-B'))
self._check_output(self.lt('S T'), self.lt('S/T/-B'), None)
assert self.o.output.do[0].english == 'bye'
assert self.o.output.do[0].replaced == self.lt('S T')
def test_replace_translation(self):
self.define('S/T/-B', 'longer')
self.s.translations = self.lt('S/T')
self.translate('-B')
self._check_translations(self.lt('S/T/-B'))
self._check_output(self.lt('S/T'), self.lt('S/T/-B'), None)
assert self.o.output.do[0].english == 'longer'
assert self.o.output.do[0].replaced == self.lt('S/T')
def test_undo(self):
self.s.translations = self.lt('POP')
self.translate('*')
self._check_translations([])
self._check_output(self.lt('POP'), [], None)
def test_empty_undo(self):
self.translate('*')
self._check_translations([])
self._check_output([], [Translation([Stroke('*')], BACK_STRING)], None)
def test_undo_translation(self):
self.define('P/P', 'pop')
self.translate('P')
self.translate('P')
self.translate('*')
self._check_translations(self.lt('P'))
self._check_output(self.lt('P/P'), self.lt('P'), None)
def test_undo_longer_translation(self):
self.define('P/P/-D', 'popped')
self.translate('P')
self.translate('P')
self.translate('-D')
self.translate('*')
self._check_translations(self.lt('P P'))
self._check_output(self.lt('P/P/-D'), self.lt('P P'), None)
def test_undo_tail(self):
self.s.tail = self.t('T/A/EU/L')
self.translate('*')
self._check_translations([])
self._check_output([], [Translation([Stroke('*')], BACK_STRING)], [self.s.tail])
def test_suffix_folding(self):
self.define('K-L', 'look')
self.define('-G', '{^ing}')
lt = self.lt('K-LG')
lt[0].english = 'look {^ing}'
self.translate('K-LG')
self._check_translations(lt)
def test_suffix_folding_multi_stroke(self):
self.define('E/HR', 'he will')
self.define('-S', '{^s}')
self.translate('E')
self.translate('HR-S')
output = ' '.join(t.english for t in self.s.translations)
assert output == 'he will {^s}'
def test_suffix_folding_doesnt_interfere(self):
self.define('E/HR', 'he will')
self.define('-S', '{^s}')
self.define('E', 'he')
self.define('HR-S', 'also')
self.translate('E')
self.translate('HR-S')
output = ' '.join(t.english for t in self.s.translations)
assert output == 'he also'
def test_suffix_folding_no_suffix(self):
self.define('K-L', 'look')
lt = self.lt('K-LG')
assert lt[0].english is None
self.translate('K-LG')
self._check_translations(lt)
def test_suffix_folding_no_main(self):
self.define('-G', '{^ing}')
lt = self.lt('K-LG')
assert lt[0].english is None
self.translate('K-LG')
self._check_translations(lt)
def test_retrospective_insert_space(self):
self.define('T/E/S/T', 'a longer key')
self.define('PER', 'perfect')
self.define('SWAEUGS', 'situation')
self.define('PER/SWAEUGS', 'persuasion')
self.define('SP*', '{*?}')
self.translate('PER')
self.translate('SWAEUGS')
self.translate('SP*')
lt = self.lt('PER')
undo = self.lt('PER/SWAEUGS')
undo[0].replaced = lt
do = self.lt('SP*')
do[0].english = 'perfect situation'
do[0].is_retrospective_command = True
do[0].replaced = undo
self._check_translations(do)
self._check_output(undo, do, None)
def test_retrospective_insert_space_undefined(self):
# Should work when beginning or ending strokes aren't defined
self.define('T/E/S/T', 'a longer key')
self.define('STWR/STWR', 'test')
self.define('SP*', '{*?}')
self.translate('STWR')
self.translate('STWR')
self.translate('SP*')
lt = self.lt('STWR')
undo = self.lt('STWR/STWR')
undo[0].replaced = lt
do = self.lt('SP*')
do[0].english = 'STWR STWR'
do[0].is_retrospective_command = True
do[0].replaced = undo
self._check_translations(do)
self._check_output(undo, do, None)
def test_retrospective_delete_space(self):
self.define('T/E/S/T', 'a longer key')
self.define('K', 'kick')
self.define('PW', 'back')
self.define('SP*', '{*!}')
self.translate('K')
self.translate('PW')
self.translate('SP*')
undo = self.lt('K PW')
do = self.lt('SP*')
do[0].english = 'kick{^~|^}back'
do[0].is_retrospective_command = True
do[0].replaced = undo
self._check_translations(do)
self._check_output(undo, do, None)
def test_retrospective_delete_space_with_number(self):
self.define('T/E/S/T', 'a longer key')
self.define('U', 'user')
self.define('SP*', '{*!}')
self.translate('U')
self.translate('1-')
self.translate('SP*')
undo = self.lt('U 1-')
do = self.lt('SP*')
do[0].english = 'user{^~|^}{&1}'
do[0].is_retrospective_command = True
do[0].replaced = undo
self._check_translations(do)
self._check_output(undo, do, None)
def test_retrospective_delete_space_with_period(self):
self.define('T/E/S/T', 'a longer key')
self.define('P-P', '{.}')
self.define('SH*', 'zshrc')
self.define('SP*', '{*!}')
self.translate('P-P')
self.translate('SH*')
self.translate('SP*')
undo = self.lt('P-P SH*')
do = self.lt('SP*')
do[0].english = '{.}{^~|^}zshrc'
do[0].is_retrospective_command = True
do[0].replaced = undo
self._check_translations(do)
self._check_output(undo, do, None)
def test_retrospective_toggle_asterisk(self):
self.define('T/E/S/T', 'a longer key')
self.define('S', 'see')
self.define('S*', 'sea')
self.define('A*', '{*}')
self.translate('S')
self.translate('A*')
self._check_translations(self.lt('S*'))
self._check_output(self.lt('S'), self.lt('S*'), None)
def test_retrospective_toggle_empty(self):
self.define('A*', '{*}')
self.translate('A*')
self._check_translations(self.lt(''))
assert self.o.output == []
def test_retrospective_toggle_asterisk_replaced1(self):
self.define('P-P', '{.}')
self.define('SKEL', 'cancel')
self.define('SKEL/TO-PB', 'skeleton')
self.define('SKEL/TO*PB', 'not skeleton!')
self.define('A*', '{*}')
self.translate('P-P')
self.translate('SKEL')
self.translate('TO-PB')
self.translate('A*')
self._check_translations(self.lt('SKEL/TO*PB'))
self._check_output(self.lt('SKEL/TO-PB'),
self.lt('SKEL/TO*PB'),
self.lt('P-P'))
def test_retrospective_toggle_asterisk_replaced2(self):
self.define('P-P', '{.}')
self.define('SKEL', 'cancel')
self.define('SKEL/TO-PB', 'skeleton')
self.define('TO*PB', '{^ton}')
self.define('A*', '{*}')
self.translate('P-P')
self.translate('SKEL')
self.translate('TO-PB')
self.translate('A*')
self._check_translations(self.lt('SKEL TO*PB'))
self._check_output(self.lt('SKEL/TO-PB'),
self.lt('SKEL TO*PB'),
self.lt('P-P'))
def test_repeat_last_stroke1(self):
self.define('T/E/S/T', 'a longer key')
self.define('TH', 'this')
self.define('R*', '{*+}')
self.translate('TH')
self.translate('R*')
undo = []
do = self.lt('TH')
state = self.lt('TH TH')
self._check_translations(state)
self._check_output(undo, do, do)
def test_repeat_last_stroke2(self):
self.define('T/E/S/T', 'a longer key')
self.define('THA', 'that')
self.define('R*', '{*+}')
self.translate('THA')
self.translate('R*')
undo = []
do = self.lt('THA')
state = self.lt('THA THA')
self._check_translations(state)
self._check_output(undo, do, do)
def test_untranslate_translation(self):
self.tlor.set_min_undo_length(2)
self.define('TH', 'this')
self.define('THA', 'that')
self.translate('TH')
self.translate('THA')
self.tlor.untranslate_translation(self.t('THA'))
self.tlor.untranslate_translation(self.t('TH'))
self.tlor.flush()
self._check_output(self.lt('TH THA'), [], None)
ESCAPE_UNESCAPE_TRANSLATION_TESTS = (
# No change.
lambda: ('foobar', 'foobar'),
lambda: (r'\\', r'\\'),
lambda: ('\\\\\\', '\\\\\\'), # -> \\\
# Basic support: \n, \r, \t.
lambda: ('\n', r'\n'),
lambda: ('\r', r'\r'),
lambda: ('\t', r'\t'),
# Allow a literal \n, \r, or \t by doubling the \.
lambda: (r'\n', r'\\n'),
lambda: (r'\r', r'\\r'),
lambda: (r'\t', r'\\t'),
lambda: (r'\\n', r'\\\n'),
lambda: (r'\\r', r'\\\r'),
lambda: (r'\\t', r'\\\t'),
# A little more complex.
lambda: ('\tfoo\nbar\r', r'\tfoo\nbar\r'),
lambda: ('\\tfoo\\nbar\\r', r'\\tfoo\\nbar\\r'),
)
@parametrize(ESCAPE_UNESCAPE_TRANSLATION_TESTS)
def test_escape_unescape_translation(raw, escaped):
assert unescape_translation(escaped) == raw
assert escape_translation(raw) == escaped
class TestNoUnnecessaryLookups(TestTranslateStroke):
# Custom dictionary collection class for tracking lookups.
class DictTracy(StenoDictionaryCollection):
def __init__(self, dicts):
super().__init__(dicts)
self.lookup_history = []
def lookup(self, key):
self.lookup_history.append(key)
return super().lookup(key)
DICT_COLLECTION_CLASS = DictTracy
def _prepare_state(self, definitions, translations):
if definitions:
for steno, english in ast.literal_eval('{' + definitions + '}').items():
self.define(steno, english)
translations = self.lt(translations)
for t in translations:
for s in t.strokes:
self.translate(s.rtfcre)
state = translations[len(translations)-self.dc.longest_key:]
self._check_translations(state)
self.dc.lookup_history.clear()
def _check_lookup_history(self, expected):
# Hide from traceback on assertions (reduce output size for failed tests).
__tracebackhide__ = operator.methodcaller('errisinstance', AssertionError)
result = ['/'.join(key) for key in self.dc.lookup_history]
expected = expected.split()
msg = '''
lookup history:
results: %s
expected: %s
''' % (result, expected)
assert result == expected, msg
def test_zero_lookups(self):
# No lookups at all if longest key is zero.
self.translate('TEFT')
self._check_lookup_history('')
self._check_translations(self.lt('TEFT'))
def test_no_prefix_lookup_over_the_longest_key_limit(self):
self._prepare_state(
'''
"HROPBG/EFT/KAOE": "longest key",
"HRETS": "let's",
"TKO": "do",
"SPH": "some",
"TEFT": "test",
"-G": "{^ing}",
''',
'HRETS TKO SPH TEFT')
self.translate('-G')
self._check_lookup_history(
# Macros.
'''
/-G
-G
'''
# Others.
'''
SPH/TEFT/-G
/TEFT/-G TEFT/-G
'''
)
def test_no_duplicate_lookups_for_longest_no_suffix_match(self):
self._prepare_state(
'''
"TEFT": "test",
"-G": "{^ing}",
''',
'TEFT')
self.translate('TEFGT')
self._check_lookup_history(
# Macros.
'''
TEFGT
'''
# No suffix.
'''
'''
# With suffix.
'''
-G TEFT
'''
)
def test_lookup_suffixes_once(self):
self._prepare_state(
'''
"HROPBG/EFT/KAOE": "longest key",
"HRETS": "let's",
"TEFT": "test",
"SPH": "some",
"SUFBGS": "suffix",
"-G": "{^ing}",
"-S": "{^s}",
"-D": "{^ed}",
"-Z": "{^s}",
''',
'HRETS TEFT SPH')
self.translate('SUFBGSZ')
self._check_lookup_history(
# Macros.
'''
/SUFBGSZ
SUFBGSZ
'''
# Without suffix.
'''
TEFT/SPH/SUFBGSZ
/SPH/SUFBGSZ
SPH/SUFBGSZ
'''
# Suffix lookups.
'''
-Z -S -G
TEFT/SPH/SUFBGS TEFT/SPH/SUFBGZ TEFT/SPH/SUFBSZ
/SPH/SUFBGS /SPH/SUFBGZ /SPH/SUFBSZ
SPH/SUFBGS SPH/SUFBGZ SPH/SUFBSZ
/SUFBGS /SUFBGZ /SUFBSZ
SUFBGS
''')
| 28,803
|
Python
|
.py
| 808
| 27.32302
| 88
| 0.551814
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,663
|
test_command.py
|
openstenoproject_plover/test/test_command.py
|
""" Unit tests for built-in engine command plugins. """
import ast
import pytest
from plover.command.set_config import set_config
from plover.config import Config, DictionaryConfig
from plover_build_utils.testing import parametrize
from .test_config import DEFAULTS, DEFAULT_KEYMAP
class FakeEngine:
""" The plugin sets the engine.config property, which unpacks a dict as
keywords into config.update. """
def __init__(self):
self._cfg = Config()
@property
def config(self):
return self._cfg.as_dict()
@config.setter
def config(self, options):
self._cfg.update(**options)
SET_CONFIG_TESTS = (
lambda: ('"space_placement":"After Output"', "After Output"),
lambda: ('"start_attached":True', True),
lambda: ('"undo_levels":10', 10),
lambda: ('"log_file_name":"c:/whatever/morestrokes.log"', "c:/whatever/morestrokes.log"),
lambda: ('"enabled_extensions":[]', set()),
lambda: ('"machine_type":"Keyboard"', "Keyboard"),
lambda: ('"machine_specific_options":{"arpeggiate":True}', {"arpeggiate": True, "first_up_chord_send": False}),
lambda: ('"system_keymap":'+str(DEFAULT_KEYMAP), DEFAULT_KEYMAP),
lambda: ('"dictionaries":("user.json","main.json")', list(map(DictionaryConfig, ("user.json", "main.json")))),
)
@parametrize(SET_CONFIG_TESTS)
def test_set_config(cmdline, expected):
""" Unit tests for set_config command. Test at least one of each valid data type. """
# Config values are sometimes converted and stored as different types than the input,
# so the expected output config values may not always be strictly equal to the input.
engine = FakeEngine()
engine.config = DEFAULTS
set_config(engine, cmdline)
key = ast.literal_eval("{"+cmdline+"}").popitem()[0]
assert engine.config[key] == expected
def test_set_config_multiple_options():
""" Multiple options can be set with one command if separated by commas.
Try setting every test option from scratch with a single command. """
engine = FakeEngine()
engine.config = DEFAULTS
individual_tests = [t() for t in SET_CONFIG_TESTS]
compound_cmdline = ",".join([t[0] for t in individual_tests])
set_config(engine, compound_cmdline)
for cmdline, expected in individual_tests:
key = ast.literal_eval("{"+cmdline+"}").popitem()[0]
assert engine.config[key] == expected
SET_CONFIG_INVALID_TESTS = (
# No delimiter in command.
lambda: '"undo_levels"10',
# Wrong value type.
lambda: '"undo_levels":"does a string work?"',
# Unsupported value.
lambda: '"machine_type":"Telepathy"',
# Bad format for dict literal.
lambda: '"machine_specific_options":{"closing_brace":False',
)
@parametrize(SET_CONFIG_INVALID_TESTS, arity=1)
def test_set_config_invalid(invalid_cmdline):
""" Unit tests for set_config exception handling. """
engine = FakeEngine()
with pytest.raises(ValueError):
set_config(engine, invalid_cmdline)
| 3,119
|
Python
|
.py
| 66
| 42.712121
| 120
| 0.663047
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,664
|
test_keymap.py
|
openstenoproject_plover/test/test_keymap.py
|
import pytest
from plover.machine.keymap import Keymap
def new_keymap():
return Keymap(('k%u' % n for n in range(8)),
('a%u' % n for n in range(4)))
BINDINGS_LIST = (
('k0', 'a0'),
('k1', 'a3'),
('k4', 'a1'),
('k5', 'a2'),
('k7', 'a1'),
)
BINDINGS_DICT = dict(BINDINGS_LIST)
MAPPINGS_LIST = (
('a0', ('k0',)),
('a1', ('k4', 'k7')),
('a2', ('k5',)),
('a3', ('k1',)),
)
MAPPINGS_DICT = dict(MAPPINGS_LIST)
MAPPINGS_FULL = dict(MAPPINGS_DICT)
MAPPINGS_FULL.update({'no-op': ()})
def test_keymap_init():
k = new_keymap()
assert k.get_bindings() == {}
assert k.get_mappings() == {}
def test_keymap_set_bindings():
# Set bindings from a dictionary.
k = new_keymap()
k.set_bindings(BINDINGS_DICT)
assert k.get_bindings() == BINDINGS_DICT
assert k.get_mappings() == MAPPINGS_FULL
# Set bindings from a list of tuples.
k = new_keymap()
k.set_bindings(BINDINGS_LIST)
assert k.get_bindings() == BINDINGS_DICT
assert k.get_mappings() == MAPPINGS_FULL
def test_keymap_set_mappings():
# Set mappings from a dictionary.
k = new_keymap()
k.set_mappings(MAPPINGS_DICT)
assert k.get_bindings() == BINDINGS_DICT
assert k.get_mappings() == MAPPINGS_FULL
# Set mappings from a list of tuples.
k = new_keymap()
k.set_mappings(MAPPINGS_LIST)
assert k.get_bindings() == BINDINGS_DICT
assert k.get_mappings() == MAPPINGS_FULL
def test_keymap_setitem():
bindings = dict(BINDINGS_DICT)
mappings = dict(MAPPINGS_FULL)
k = new_keymap()
k.set_mappings(mappings)
# Bind to one key.
k['a3'] = 'k6'
del bindings['k1']
bindings['k6'] = 'a3'
mappings['a3'] = ('k6',)
assert k.get_bindings() == bindings
assert k.get_mappings() == mappings
# Bind to multiple keys.
k['a3'] = ('k6', 'k1')
bindings['k1'] = 'a3'
bindings['k6'] = 'a3'
mappings['a3'] = ('k1', 'k6',)
assert k.get_bindings() == bindings
assert k.get_mappings() == mappings
# If a key is already mapped (k0), don't override it.
k['a3'] = ('k0', 'k2')
del bindings['k1']
del bindings['k6']
bindings['k2'] = 'a3'
mappings['a3'] = ('k2',)
assert k.get_bindings() == bindings
assert k.get_mappings() == mappings
# Assert on invalid action.
with pytest.raises(AssertionError):
k['a9'] = 'k0'
| 2,392
|
Python
|
.py
| 78
| 26.076923
| 57
| 0.601128
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,665
|
test_steno_validator.py
|
openstenoproject_plover/test/gui_qt/test_steno_validator.py
|
from PyQt5.QtGui import QValidator
import pytest
from plover.gui_qt.steno_validator import StenoValidator
@pytest.mark.parametrize(('text', 'state'), (
# Acceptable.
('ST', QValidator.Acceptable),
('TEFT', QValidator.Acceptable),
('TEFT/-G', QValidator.Acceptable),
('/ST', QValidator.Acceptable),
('-F', QValidator.Acceptable),
# Intermediate.
('-', QValidator.Intermediate),
('/', QValidator.Intermediate),
('/-', QValidator.Intermediate),
('ST/', QValidator.Intermediate),
('ST/-', QValidator.Intermediate),
('ST//', QValidator.Intermediate),
# Invalid.
('WK', QValidator.Invalid),
('PLOVER', QValidator.Invalid),
))
def test_steno_validator_validate(text, state):
validator = StenoValidator()
assert validator.validate(text, len(text)) == (state, text, len(text))
| 844
|
Python
|
.py
| 24
| 31
| 74
| 0.680147
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,666
|
test_dictionaries_widget.py
|
openstenoproject_plover/test/gui_qt/test_dictionaries_widget.py
|
from collections import namedtuple
from pathlib import Path
from textwrap import dedent
from types import SimpleNamespace
import operator
from PyQt5.QtCore import QModelIndex, QPersistentModelIndex, Qt
import pytest
from plover.config import DictionaryConfig
from plover.engine import ErroredDictionary
from plover.gui_qt.dictionaries_widget import DictionariesModel, DictionariesWidget
from plover.steno_dictionary import StenoDictionary, StenoDictionaryCollection
from plover.misc import expand_path
from plover_build_utils.testing import parametrize
from ..py37compat import mock
INVALID_EXCEPTION = Exception('loading error')
ICON_TO_CHAR = {
'error': '!',
'favorite': '★',
'loading': '🗘',
'normal': '�',
'readonly': '🛇',
}
ICON_FROM_CHAR = {c: i for i, c in ICON_TO_CHAR.items()}
ENABLED_TO_CHAR = {
False: '�',
True: '☑',
}
ENABLED_FROM_CHAR = {c: e for e, c in ENABLED_TO_CHAR.items()}
CHECKED_TO_BOOL = {
Qt.Checked: True,
Qt.Unchecked: False,
}
MODEL_ROLES = sorted([Qt.AccessibleTextRole, Qt.CheckStateRole,
Qt.DecorationRole, Qt.DisplayRole, Qt.ToolTipRole])
def parse_state(state_str):
state_str = dedent(state_str).strip()
if not state_str:
return
for line in state_str.split('\n'):
enabled, icon, path = line.split()
yield ENABLED_FROM_CHAR[enabled], ICON_FROM_CHAR[icon], path
def config_dictionaries_from_state(state_str):
return [
DictionaryConfig(path, enabled)
for enabled, icon, path in parse_state(state_str)
]
def steno_dictionaries_from_state(state_str, existing_dictionaries=None):
new_dictionaries = []
for enabled, icon, path in parse_state(state_str):
if icon == 'loading':
continue
path = expand_path(path)
if existing_dictionaries is None:
steno_dict = None
else:
steno_dict = existing_dictionaries.get(path)
if steno_dict is None:
if icon == 'error' or path.endswith('.bad'):
steno_dict = ErroredDictionary(path, INVALID_EXCEPTION)
else:
steno_dict = StenoDictionary()
steno_dict.path = path
steno_dict.readonly = (
icon == 'readonly' or
path.endswith('.ro') or
path.startswith('asset:')
)
steno_dict.enabled = enabled
new_dictionaries.append(steno_dict)
return new_dictionaries
class ModelTest(namedtuple('ModelTest', '''
config dictionaries engine
model signals connections
initial_state
''')):
def configure(self, **kwargs):
self.connections['config_changed'](kwargs)
def configure_dictionaries(self, state):
self.configure(dictionaries=config_dictionaries_from_state(state))
def load_dictionaries(self, state):
self.dictionaries.set_dicts(steno_dictionaries_from_state(state, self.dictionaries))
self.connections['dictionaries_loaded'](self.dictionaries)
loaded = [row
for row, (enabled, icon, path)
in enumerate(parse_state(state))
if icon != 'loading']
def check(self, expected,
config_change=None, data_change=None,
layout_change=False, undo_change=None):
__tracebackhide__ = operator.methodcaller('errisinstance', AssertionError)
expected = dedent(expected).strip()
if expected:
expected_config = expected
expected_state = expected.split('\n')
else:
expected_config = ''
expected_state = []
actual_state = []
for row in range(self.model.rowCount()):
index = self.model.index(row)
enabled = CHECKED_TO_BOOL[index.data(Qt.CheckStateRole)]
icon = index.data(Qt.DecorationRole)
path = index.data(Qt.DisplayRole)
actual_state.append('%s %s %s' % (
ENABLED_TO_CHAR.get(enabled, '?'),
ICON_TO_CHAR.get(icon, '?'),
path))
assert actual_state == expected_state
assert not self.engine.mock_calls, 'unexpected engine call'
if config_change == 'reload':
assert self.config.mock_calls == [mock.call({})]
self.config.reset_mock()
elif config_change == 'update':
config_update = {
'dictionaries': config_dictionaries_from_state(expected_config),
}
assert self.config.mock_calls == [mock.call(config_update)]
self.config.reset_mock()
else:
assert not self.config.mock_calls, 'unexpected config call'
signal_calls = self.signals.mock_calls[:]
if undo_change is not None:
call = signal_calls.pop(0)
assert call == mock.call.has_undo_changed(undo_change)
if data_change is not None:
for row in data_change:
index = self.model.index(row)
call = signal_calls.pop(0)
call.args[2].sort()
assert call == mock.call.dataChanged(index, index, MODEL_ROLES)
if layout_change:
assert signal_calls[0:2] == [mock.call.layoutAboutToBeChanged([], self.model.NoLayoutChangeHint),
mock.call.layoutChanged([], self.model.NoLayoutChangeHint)]
del signal_calls[0:2]
assert not signal_calls
self.signals.reset_mock()
def reset_mocks(self):
self.config.reset_mock()
self.engine.reset_mock()
self.signals.reset_mock()
@pytest.fixture
def model_test(monkeypatch, request):
state = request.function.__doc__
# Patch configuration directory.
current_dir = Path('.').resolve()
monkeypatch.setattr('plover.misc.CONFIG_DIR', str(current_dir))
monkeypatch.setattr('plover.gui_qt.dictionaries_widget.CONFIG_DIR', str(current_dir))
# Disable i18n support.
monkeypatch.setattr('plover.gui_qt.dictionaries_widget._', lambda s: s)
# Fake config.
config = mock.PropertyMock()
config.return_value = {}
# Dictionaries.
dictionaries = StenoDictionaryCollection()
# Fake engine.
engine = mock.MagicMock(spec='''
__enter__ __exit__
config signal_connect
'''.split())
engine.__enter__.return_value = engine
type(engine).config = config
signals = mock.MagicMock()
config.return_value = {
'dictionaries': config_dictionaries_from_state(state) if state else [],
'classic_dictionaries_display_order': False,
}
# Setup model.
model = DictionariesModel(engine, {name: name for name in ICON_TO_CHAR}, max_undo=5)
for slot in '''
dataChanged
layoutAboutToBeChanged
layoutChanged
has_undo_changed
'''.split():
getattr(model, slot).connect(getattr(signals, slot))
connections = dict(call.args for call in engine.signal_connect.mock_calls)
assert connections.keys() == {'config_changed', 'dictionaries_loaded'}
config.reset_mock()
engine.reset_mock()
# Test helper.
test = ModelTest(config, dictionaries, engine, model, signals, connections, state)
if state and any(icon != 'loading' for enabled, icon, path in parse_state(state)):
test.load_dictionaries(state)
test.reset_mocks()
return test
def test_model_accessible_text_1(model_test):
'''
☑ 🗘 read-only.ro
☑ 🗘 user.json
☑ 🗘 invalid.bad
☑ 🗘 commands.json
� 🗘 asset:plover:assets/main.json
'''
for n, expected in enumerate((
'read-only.ro, loading',
'user.json, loading',
'invalid.bad, loading',
'commands.json, loading',
'asset:plover:assets/main.json, disabled, loading',
)):
assert model_test.model.index(n).data(Qt.AccessibleTextRole) == expected
def test_model_accessible_text_2(model_test):
'''
☑ 🛇 read-only.ro
☑ ★ user.json
☑ � commands.json
� 🛇 asset:plover:assets/main.json
'''
for n, expected in enumerate((
'read-only.ro, read-only',
'user.json, favorite',
'commands.json',
'asset:plover:assets/main.json, disabled, read-only',
)):
assert model_test.model.index(n).data(Qt.AccessibleTextRole) == expected
def test_model_accessible_text_3(model_test):
'''
☑ ! invalid.bad
'''
expected = 'invalid.bad, errored: %s.' % INVALID_EXCEPTION
assert model_test.model.index(0).data(Qt.AccessibleTextRole) == expected
def test_model_accessible_text_4(model_test):
'''
� ! invalid.bad
'''
expected = 'invalid.bad, disabled, errored: %s.' % INVALID_EXCEPTION
assert model_test.model.index(0).data(Qt.AccessibleTextRole) == expected
def test_model_add_existing(model_test):
'''
☑ ★ user.json
☑ � commands.json
� � main.json
'''
model_test.model.add([expand_path('main.json')])
model_test.check(model_test.initial_state, config_change='reload')
def test_model_add_new_1(model_test):
'''
☑ ★ user.json
� � commands.json
☑ 🗘 main.json
'''
model_test.model.add([expand_path('read-only.ro')])
model_test.check(
'''
☑ 🗘 read-only.ro
☑ ★ user.json
� � commands.json
☑ 🗘 main.json
''',
config_change='update',
layout_change=True,
undo_change=True,
)
def test_model_add_new_2(model_test):
'''
☑ ★ user.json
� � commands.json
☑ 🗘 main.json
'''
model_test.model.add(['duplicated.json',
'unique.json',
'duplicated.json'])
model_test.check(
'''
☑ 🗘 duplicated.json
☑ 🗘 unique.json
☑ ★ user.json
� � commands.json
☑ 🗘 main.json
''',
config_change='update',
layout_change=True,
undo_change=True,
)
def test_model_add_nothing(model_test):
'''
☑ ★ user.json
☑ � commands.json
☑ � main.json
'''
model_test.model.add([])
model_test.check(model_test.initial_state)
def test_model_config_update(model_test):
'''
� � user.json
☑ ★ commands.json
☑ 🛇 read-only.ro
☑ ! invalid.bad
� 🛇 asset:plover:assets/main.json
'''
state = '''
☑ ★ user.json
� � commands.json
☑ 🗘 main.json
'''
model_test.configure_dictionaries(state)
model_test.check(state, layout_change=True)
state = '''
☑ ★ user.json
� � commands.json
☑ � main.json
'''
model_test.load_dictionaries(state)
model_test.check(state, data_change=[2])
def test_model_insert_1(model_test):
'''
� � user.json
☑ ★ commands.json
☑ 🛇 read-only.ro
☑ ! invalid.bad
� 🛇 asset:plover:assets/main.json
'''
model_test.model.insert(model_test.model.index(2),
['main.json',
'commands.json',
'read-only.ro'])
model_test.check(
'''
� � user.json
☑ 🗘 main.json
☑ ★ commands.json
☑ 🛇 read-only.ro
☑ ! invalid.bad
� 🛇 asset:plover:assets/main.json
''',
config_change='update',
layout_change=True,
undo_change=True,
)
def test_model_insert_2(model_test):
'''
� � user.json
☑ 🗘 main.json
☑ ★ commands.json
☑ 🛇 read-only.ro
☑ ! invalid.bad
� 🛇 asset:plover:assets/main.json
'''
model_test.model.insert(QModelIndex(),
['commands.json',
'user.json',
'commands.json'])
model_test.check(
'''
☑ 🗘 main.json
☑ 🛇 read-only.ro
☑ ! invalid.bad
� 🛇 asset:plover:assets/main.json
☑ ★ commands.json
� � user.json
''',
config_change='update',
layout_change=True,
undo_change=True,
)
def test_model_insert_3(model_test):
'''
☑ ★ user.json
☑ � commands.json
☑ � main.json
'''
model_test.model.insert(QModelIndex(), [])
model_test.check(model_test.initial_state)
def test_model_display_order(model_test):
'''
☑ 🛇 read-only.ro
☑ ★ user.json
☑ � commands.json
� 🛇 asset:plover:assets/main.json
'''
state = model_test.initial_state
# Flip display order.
model_test.configure(classic_dictionaries_display_order=True)
model_test.check('\n'.join(reversed(state.split('\n'))), layout_change=True)
# Reset display order to default.
model_test.configure(classic_dictionaries_display_order=False)
model_test.check(state, layout_change=True)
def test_model_favorite(model_test):
'''
☑ 🛇 read-only.ro
☑ ★ user.json
☑ ! invalid.bad
☑ � commands.json
� 🛇 asset:plover:assets/main.json
'''
# New favorite.
model_test.model.setData(model_test.model.index(1), Qt.Unchecked, Qt.CheckStateRole)
model_test.check(
'''
☑ 🛇 read-only.ro
� � user.json
☑ ! invalid.bad
☑ ★ commands.json
� 🛇 asset:plover:assets/main.json
''',
config_change='update',
data_change=[1, 3],
undo_change=True,
)
# No favorite.
model_test.model.setData(model_test.model.index(3), Qt.Unchecked, Qt.CheckStateRole)
model_test.check(
'''
☑ 🛇 read-only.ro
� � user.json
☑ ! invalid.bad
� � commands.json
� 🛇 asset:plover:assets/main.json
''',
config_change='update',
data_change=[3],
)
def test_model_initial_setup(model_test):
'''
☑ 🗘 read-only.ro
☑ 🗘 user.json
☑ 🗘 invalid.bad
☑ 🗘 commands.json
� 🗘 asset:plover:assets/main.json
'''
state = model_test.initial_state
# Initial state.
model_test.check(state)
# First config notification: no-op.
model_test.configure(**model_test.config.return_value)
model_test.check(state)
# After loading dictionaries.
state = '''
☑ 🛇 read-only.ro
☑ ★ user.json
☑ ! invalid.bad
☑ � commands.json
� 🛇 asset:plover:assets/main.json
'''
model_test.load_dictionaries(state)
model_test.check(state, data_change=range(5))
def test_model_iter_loaded(model_test):
'''
☑ 🗘 magnum.json
☑ ★ user.json
� � commands.json
☑ 🗘 main.json
'''
model_test.check(model_test.initial_state)
index_list = [model_test.model.index(n) for n in range(4)]
index_list.append(QModelIndex())
assert list(model_test.model.iter_loaded(index_list)) == model_test.dictionaries.dicts
assert list(model_test.model.iter_loaded(reversed(index_list))) == model_test.dictionaries.dicts
model_test.configure(classic_dictionaries_display_order=False)
assert list(model_test.model.iter_loaded(index_list)) == model_test.dictionaries.dicts
assert list(model_test.model.iter_loaded(reversed(index_list))) == model_test.dictionaries.dicts
def test_model_move_dictionaries(model_test):
'''
☑ 🛇 read-only.ro
� � commands.json
☑ ★ user.json
☑ � main.json
'''
model_test.check(model_test.initial_state)
model_test.model.move(QModelIndex(), [model_test.model.index(0),
model_test.model.index(2)])
model_test.check(
'''
� � commands.json
☑ ★ main.json
☑ 🛇 read-only.ro
☑ � user.json
''',
config_change='update',
layout_change=True,
undo_change=True,
)
def test_model_move_down(model_test):
'''
� � commands.json
☑ ★ main.json
☑ 🛇 read-only.ro
☑ � user.json
'''
model_test.model.move_down([model_test.model.index(n) for n in [1]])
model_test.check(
'''
� � commands.json
☑ 🛇 read-only.ro
☑ ★ main.json
☑ � user.json
''',
config_change='update',
layout_change=True,
undo_change=True,
)
model_test.model.move_down([model_test.model.index(n) for n in [0, 2]])
model_test.check(
'''
☑ 🛇 read-only.ro
� � commands.json
☑ ★ user.json
☑ � main.json
''',
config_change='update',
layout_change=True,
)
model_test.model.move_down([model_test.model.index(n) for n in [1]])
model_test.check(
'''
☑ 🛇 read-only.ro
☑ ★ user.json
� � commands.json
☑ � main.json
''',
config_change='update',
layout_change=True,
)
def test_model_move_down_nothing(model_test):
'''
☑ ★ user.json
☑ � commands.json
☑ � main.json
'''
model_test.model.move_down([QModelIndex()])
model_test.check(model_test.initial_state)
def test_model_move_up(model_test):
'''
☑ 🛇 read-only.ro
☑ � user.json
� � commands.json
☑ ★ main.json
'''
model_test.model.move_up([model_test.model.index(n) for n in [2, 3]])
model_test.check(
'''
☑ 🛇 read-only.ro
� � commands.json
☑ ★ main.json
☑ � user.json
''',
config_change='update',
layout_change=True,
undo_change=True,
)
model_test.model.move_up([model_test.model.index(n) for n in [1, 2]])
model_test.check(
'''
� � commands.json
☑ ★ main.json
☑ 🛇 read-only.ro
☑ � user.json
''',
config_change='update',
layout_change=True,
)
def test_model_move_up_nothing(model_test):
'''
☑ ★ user.json
☑ � commands.json
☑ � main.json
'''
model_test.model.move_up([QModelIndex()])
model_test.check(model_test.initial_state)
def test_model_persistent_index(model_test):
'''
☑ 🛇 read-only.ro
☑ ★ user.json
☑ � commands.json
� 🛇 asset:plover:assets/main.json
'''
persistent_index = QPersistentModelIndex(model_test.model.index(1))
assert persistent_index.row() == 1
assert persistent_index.data(Qt.CheckStateRole) == Qt.Checked
assert persistent_index.data(Qt.DecorationRole) == 'favorite'
assert persistent_index.data(Qt.DisplayRole) == 'user.json'
model_test.configure(classic_dictionaries_display_order=True)
assert persistent_index.row() == 2
assert persistent_index.data(Qt.CheckStateRole) == Qt.Checked
assert persistent_index.data(Qt.DecorationRole) == 'favorite'
assert persistent_index.data(Qt.DisplayRole) == 'user.json'
model_test.model.setData(persistent_index, Qt.Unchecked, Qt.CheckStateRole)
assert persistent_index.row() == 2
assert persistent_index.data(Qt.CheckStateRole) == Qt.Unchecked
assert persistent_index.data(Qt.DecorationRole) == 'normal'
assert persistent_index.data(Qt.DisplayRole) == 'user.json'
def test_model_qtmodeltester(model_test, qtmodeltester):
'''
☑ 🛇 read-only.ro
☑ ★ user.json
☑ � commands.json
� 🛇 asset:plover:assets/main.json
'''
qtmodeltester.check(model_test.model)
def test_model_remove(model_test):
'''
� � commands.json
☑ 🛇 read-only.ro
☑ ★ main.json
☑ � user.json
'''
model_test.model.remove([model_test.model.index(n) for n in [0, 3]])
model_test.check(
'''
☑ 🛇 read-only.ro
☑ ★ main.json
''',
config_change='update',
layout_change=True,
undo_change=True,
)
model_test.model.remove([model_test.model.index(n) for n in [0, 1]])
model_test.check('', config_change='update', layout_change=True)
def test_model_remove_nothing_1(model_test):
'''
� � commands.json
☑ 🛇 read-only.ro
☑ ★ main.json
☑ � user.json
'''
model_test.model.remove([])
model_test.check(model_test.initial_state)
def test_model_remove_nothing_2(model_test):
'''
� � commands.json
☑ 🛇 read-only.ro
☑ ★ main.json
☑ � user.json
'''
model_test.model.remove([QModelIndex()])
model_test.check(model_test.initial_state)
def test_model_set_checked(model_test):
on_state = '☑ 🗘 user.json'
off_state = '� 🗘 user.json'
model_test.model.add(['user.json'])
model_test.check(on_state, config_change='update',
layout_change=True, undo_change=True)
first_index = model_test.model.index(0)
for index, value, ret, state in (
# Invalid index.
(QModelIndex(), Qt.Unchecked, False, on_state),
# Invalid values.
(first_index, 'pouet', False, on_state),
(first_index, Qt.PartiallyChecked, False, on_state),
# Already checked.
(first_index, Qt.Checked, False, on_state),
# Uncheck.
(first_index, Qt.Unchecked, True, off_state),
# Recheck.
(first_index, Qt.Checked, True, on_state),
):
assert model_test.model.setData(index, value, Qt.CheckStateRole) == ret
model_test.check(state, config_change='update' if ret else None,
data_change=[index.row()] if ret else None)
def test_model_unrelated_config_change(model_test):
'''
☑ ★ user.json
☑ � commands.json
☑ � main.json
'''
model_test.configure(start_minimized=True)
model_test.check(model_test.initial_state)
def test_model_undo_1(model_test):
'''
� 🗘 1.json
� 🗘 2.json
� 🗘 3.json
� 🗘 4.json
� 🗘 5.json
� 🗘 6.json
'''
# Check max undo size.
state = dedent(model_test.initial_state).strip()
state_stack = []
for n in range(6):
state_stack.append(state)
state = state.split('\n')
state[n] = '☑' + state[n][1:]
state = '\n'.join(state)
model_test.model.setData(model_test.model.index(n), Qt.Checked, Qt.CheckStateRole)
model_test.check(state, config_change='update', data_change=[n],
undo_change=(True if n == 0 else None))
for n in range(5):
model_test.model.undo()
model_test.check(state_stack.pop(),
config_change='update',
layout_change=True,
undo_change=(False if n == 4 else None))
def test_model_undo_2(model_test):
'''
☑ ★ user.json
☑ � commands.json
☑ � main.json
'''
# Changing display order as no impact on the undo stack.
model_test.configure(classic_dictionaries_display_order=True)
model_test.check(
'''
☑ � main.json
☑ � commands.json
☑ ★ user.json
''',
layout_change=True,
)
class WidgetTest(namedtuple('WidgetTest', '''
registry
bot widget
file_dialog
create_dictionary
model_test
''')):
def select(self, selection):
sm = self.widget.view.selectionModel()
for row in selection:
sm.select(self.model.index(row), sm.Select)
def unselect(self, selection):
sm = self.widget.view.selectionModel()
for row in selection:
sm.select(self.model.index(row), sm.Deselect)
def __getattr__(self, name):
return getattr(self.model_test, name)
@pytest.fixture
def widget_test(model_test, monkeypatch, qtbot):
# Fake registry.
def list_plugins(plugin_type):
assert plugin_type == 'dictionary'
for name, readonly in (
('bad', False),
('json', False),
('ro', True),
):
obj = SimpleNamespace(readonly=readonly)
yield SimpleNamespace(name=name, obj=obj)
registry = mock.MagicMock(spec=['list_plugins'])
registry.list_plugins.side_effect = list_plugins
monkeypatch.setattr('plover.gui_qt.dictionaries_widget.registry', registry)
# Fake file dialog.
file_dialog = mock.MagicMock(spec='''
getOpenFileNames
getSaveFileName
'''.split())
monkeypatch.setattr('plover.gui_qt.dictionaries_widget.QFileDialog', file_dialog)
# Fake `create_dictionary`.
def create_dictionary(filename, threaded_save=True):
pass
steno_dict = mock.create_autospec(StenoDictionary)
create_dictionary = mock.create_autospec(create_dictionary, return_value=steno_dict)
monkeypatch.setattr('plover.gui_qt.dictionaries_widget.create_dictionary', create_dictionary)
# Patch `DictionariesModel` constructor to use our own instance.
monkeypatch.setattr('plover.gui_qt.dictionaries_widget.DictionariesModel',
lambda engine, icons: model_test.model)
widget = DictionariesWidget()
widget.setup(model_test.engine)
qtbot.addWidget(widget)
test = WidgetTest(registry, qtbot, widget, file_dialog, create_dictionary, model_test)
return test
@parametrize((
# No selection.
lambda: ((), '''
AddDictionaries
AddTranslation
'''),
# No loaded dictionary selected.
lambda: ([1, 4], '''
AddDictionaries
AddTranslation
MoveDictionariesDown
MoveDictionariesUp
RemoveDictionaries
'''),
# At least one loaded dictionary selected.
lambda: ([0, 2], '''
AddDictionaries
AddTranslation
EditDictionaries
MoveDictionariesDown
MoveDictionariesUp
RemoveDictionaries
SaveDictionaries
'''),
lambda: ([1, 3], '''
AddDictionaries
AddTranslation
EditDictionaries
MoveDictionariesDown
MoveDictionariesUp
RemoveDictionaries
SaveDictionaries
'''),
))
def test_widget_selection(widget_test, selection, enabled_actions):
'''
☑ ★ favorite.json
☑ 🗘 loading.json
☑ � normal.json
☑ 🛇 read-only.ro
☑ ! invalid.bad
'''
widget_test.select(selection)
for action_name in '''
AddDictionaries
AddTranslation
EditDictionaries
MoveDictionariesDown
MoveDictionariesUp
RemoveDictionaries
SaveDictionaries
Undo
'''.split():
action = getattr(widget_test.widget, 'action_' + action_name)
enabled = action.isEnabled()
msg = '%s is %s' % (action_name, 'enabled' if enabled else 'disabled')
assert enabled == (action_name in enabled_actions), msg
FILE_PICKER_SAVE_FILTER = 'Dictionaries (*.bad *.json);; BAD dictionaries (*.bad);; JSON dictionaries (*.json)'
def test_widget_save_copy_1(widget_test):
'''
☑ ★ favorite.json
☑ 🗘 loading.json
☑ � normal.json
☑ 🛇 read-only.ro
☑ ! invalid.bad
'''
# Setup.
copy_names = (
expand_path('favorite_copy.json'),
'',
expand_path('read-only_copy.json'),
)
widget_test.file_dialog.getSaveFileName.side_effect = [
[name]
for name in copy_names
]
steno_dict_copies = (
mock.create_autospec(StenoDictionary),
mock.create_autospec(StenoDictionary),
)
widget_test.create_dictionary.side_effect = steno_dict_copies
# Execution.
widget_test.select(range(5))
widget_test.widget.action_CopyDictionaries.trigger()
# Check.
assert widget_test.file_dialog.mock_calls == [
mock.call.getSaveFileName(
parent=widget_test.widget,
caption='Save a copy of %s as...' % name,
directory=expand_path('%s - Copy.json' % Path(name).stem),
filter=FILE_PICKER_SAVE_FILTER,
)
for name in ['favorite.json', 'normal.json', 'read-only.ro']
]
assert widget_test.create_dictionary.mock_calls == [
mock.call(name, threaded_save=False)
for name in copy_names if name
]
assert steno_dict_copies[0].mock_calls == [
mock.call.update(widget_test.dictionaries.dicts[0]),
mock.call.save(),
]
assert steno_dict_copies[1].mock_calls == [
mock.call.update(widget_test.dictionaries.dicts[2]),
mock.call.save(),
]
def test_widget_save_merge_1(widget_test):
'''
☑ ★ favorite.json
☑ 🗘 loading.json
☑ � normal.json
☑ 🛇 read-only.ro
☑ ! invalid.bad
'''
# Setup.
merge_name = 'favorite + normal + read-only'
widget_test.file_dialog.getSaveFileName.return_value = [expand_path('merge.json')]
# Execution.
widget_test.select(range(5))
widget_test.widget.action_MergeDictionaries.trigger()
# Check.
assert widget_test.file_dialog.mock_calls == [mock.call.getSaveFileName(
parent=widget_test.widget,
caption='Merge %s as...' % merge_name,
directory=expand_path(merge_name + '.json'),
filter=FILE_PICKER_SAVE_FILTER,
)]
assert widget_test.create_dictionary.mock_calls == [mock.call(expand_path('merge.json'), threaded_save=False)]
steno_dict = widget_test.create_dictionary.return_value
assert steno_dict.mock_calls == [
mock.call.update(widget_test.dictionaries.dicts[2]),
mock.call.update(widget_test.dictionaries.dicts[1]),
mock.call.update(widget_test.dictionaries.dicts[0]),
mock.call.save(),
]
def test_widget_save_merge_2(widget_test):
'''
☑ ★ favorite.json
☑ 🗘 loading.json
☑ � normal.json
☑ 🛇 read-only.ro
☑ ! invalid.bad
'''
# Setup.
merge_name = 'favorite + normal'
widget_test.file_dialog.getSaveFileName.return_value = ['']
# Execution.
widget_test.select([0, 2])
widget_test.widget.action_MergeDictionaries.trigger()
# Check.
assert widget_test.file_dialog.mock_calls == [mock.call.getSaveFileName(
parent=widget_test.widget,
caption='Merge %s as...' % merge_name,
directory=expand_path(merge_name + '.json'),
filter=FILE_PICKER_SAVE_FILTER,
)]
assert widget_test.create_dictionary.mock_calls == []
| 30,814
|
Python
|
.py
| 908
| 26.55837
| 114
| 0.604169
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,667
|
key_combo.py
|
openstenoproject_plover/plover/key_combo.py
|
# -*- coding: utf-8 -*-
import re
# Mapping of "standard" keynames (derived from X11 keysym names) to Unicode.
KEYNAME_TO_CHAR = {
# Generated using:
#
# from Xlib import XK
# from plover.oslayer.linux.keyboardcontrol_x11 import keysym_to_string
# for kn, ks in sorted({
# name[3:].lower(): getattr(XK, name)
# for name in sorted(dir(XK))
# if name.startswith('XK_')
# }.items()):
# us = keysym_to_string(ks)
# if us == kn or not us:
# continue
# print ' %-20r: %8r, # %s' % (kn, us, us)
'aacute' : '\xe1', # á
'acircumflex' : '\xe2', # â
'acute' : '\xb4', # ´
'adiaeresis' : '\xe4', # ä
'ae' : '\xe6', # æ
'agrave' : '\xe0', # à
'ampersand' : '&', # &
'apostrophe' : "'", # '
'aring' : '\xe5', # å
'asciicircum' : '^', # ^
'asciitilde' : '~', # ~
'asterisk' : '*', # *
'at' : '@', # @
'atilde' : '\xe3', # ã
'backslash' : '\\', # \
'bar' : '|', # |
'braceleft' : '{', # {
'braceright' : '}', # }
'bracketleft' : '[', # [
'bracketright' : ']', # ]
'brokenbar' : '\xa6', # ¦
'ccedilla' : '\xe7', # ç
'cedilla' : '\xb8', # ¸
'cent' : '\xa2', # ¢
'clear' : '\x0b', #
'colon' : ':', # :
'comma' : ',', # ,
'copyright' : '\xa9', # ©
'currency' : '\xa4', # ¤
'degree' : '\xb0', # °
'diaeresis' : '\xa8', # ¨
'division' : '\xf7', # ÷
'dollar' : '$', # $
'eacute' : '\xe9', # é
'ecircumflex' : '\xea', # ê
'ediaeresis' : '\xeb', # ë
'egrave' : '\xe8', # è
'equal' : '=', # =
'eth' : '\xf0', # ð
'exclam' : '!', # !
'exclamdown' : '\xa1', # ¡
'grave' : '`', # `
'greater' : '>', # >
'guillemotleft' : '\xab', # «
'guillemotright' : '\xbb', # »
'hyphen' : '\xad', #
'iacute' : '\xed', # í
'icircumflex' : '\xee', # î
'idiaeresis' : '\xef', # ï
'igrave' : '\xec', # ì
'less' : '<', # <
'macron' : '\xaf', # ¯
'masculine' : '\xba', # º
'minus' : '-', # -
'mu' : '\xb5', # µ
'multiply' : '\xd7', # ×
'nobreakspace' : '\xa0', #
'notsign' : '\xac', # ¬
'ntilde' : '\xf1', # ñ
'numbersign' : '#', # #
'oacute' : '\xf3', # ó
'ocircumflex' : '\xf4', # ô
'odiaeresis' : '\xf6', # ö
'ograve' : '\xf2', # ò
'onehalf' : '\xbd', # ½
'onequarter' : '\xbc', # ¼
'onesuperior' : '\xb9', # ¹
'ooblique' : '\xd8', # Ø
'ordfeminine' : '\xaa', # ª
'oslash' : '\xf8', # ø
'otilde' : '\xf5', # õ
'paragraph' : '\xb6', # ¶
'parenleft' : '(', # (
'parenright' : ')', # )
'percent' : '%', # %
'period' : '.', # .
'periodcentered' : '\xb7', # ·
'plus' : '+', # +
'plusminus' : '\xb1', # ±
'question' : '?', # ?
'questiondown' : '\xbf', # ¿
'quotedbl' : '"', # "
'quoteleft' : '`', # `
'quoteright' : "'", # '
'registered' : '\xae', # ®
'return' : '\r', #
'section' : '\xa7', # §
'semicolon' : ';', # ;
'slash' : '/', # /
'space' : ' ', #
'ssharp' : '\xdf', # ß
'sterling' : '\xa3', # £
'tab' : '\t', #
'thorn' : '\xfe', # þ
'threequarters' : '\xbe', # ¾
'threesuperior' : '\xb3', # ³
'twosuperior' : '\xb2', # ²
'uacute' : '\xfa', # ú
'ucircumflex' : '\xfb', # û
'udiaeresis' : '\xfc', # ü
'ugrave' : '\xf9', # ù
'underscore' : '_', # _
'yacute' : '\xfd', # ý
'ydiaeresis' : '\xff', # ÿ
'yen' : '\xa5', # ¥
}
for char in (
'0123456789'
'abcdefghijklmnopqrstuvwxyz'
):
KEYNAME_TO_CHAR[char] = char
CHAR_TO_KEYNAME = {
char: name
for name, char in KEYNAME_TO_CHAR.items()
}
_SPLIT_RX = re.compile(r'(\s+|(?:\w+(?:\s*\()?)|.)')
def parse_key_combo(combo_string, key_name_to_key_code=None):
if key_name_to_key_code is None:
key_name_to_key_code = lambda key_name: key_name
key_events = []
down_keys = []
token = None
count = 0
def _raise_error(exception, details):
msg = '%s in "%s"' % (
details,
combo_string[:count] +
'[' + token + ']' +
combo_string[count+len(token):],
)
raise exception(msg)
for token in _SPLIT_RX.split(combo_string):
if not token:
continue
if token.isspace():
pass
elif re.match(r'\w', token):
if token.endswith('('):
key_name = token[:-1].rstrip().lower()
release = False
else:
key_name = token.lower()
release = True
key_code = key_name_to_key_code(key_name)
if key_code is None:
_raise_error(ValueError, 'unknown key')
elif key_code in down_keys:
_raise_error(ValueError, 'key "%s" already pressed' % key_name)
key_events.append((key_code, True))
if release:
key_events.append((key_code, False))
else:
down_keys.append(key_code)
elif token == ')':
if not down_keys:
_raise_error(SyntaxError, 'unbalanced ")"')
key_code = down_keys.pop()
key_events.append((key_code, False))
else:
_raise_error(SyntaxError, 'invalid character "%s"' % token)
count += len(token)
if down_keys:
_raise_error(SyntaxError, 'unbalanced "("')
return key_events
def add_modifiers_aliases(dictionary):
''' Add aliases for common modifiers to a dictionary of key name to key code.
- add `mod` for `mod_l` aliases for `alt`, `control`, `shift` and `super`
- add `command` and `windows` aliases for `super`
- add `option` alias for `alt`
'''
for name, extra_aliases in (
('control', '' ),
('shift' , '' ),
('super' , 'command windows'),
('alt' , 'option' ,)
):
code = dictionary[name + '_l']
dictionary[name] = code
for alias in extra_aliases.split():
dictionary[alias] = code
| 7,359
|
Python
|
.py
| 197
| 30.406091
| 81
| 0.38368
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,668
|
steno.py
|
openstenoproject_plover/plover/steno.py
|
# Copyright (c) 2010-2011 Joshua Harlan Lifton.
# See LICENSE.txt for details.
"""Generic stenography data models.
This module contains the following class:
Stroke -- A data model class that encapsulates a sequence of steno keys.
"""
from plover_stroke import BaseStroke
class Stroke(BaseStroke):
"""A standardized data model for stenotype machine strokes.
This class standardizes the representation of a stenotype chord. A stenotype
chord can be any sequence of stenotype keys that can be simultaneously
pressed. Nearly all stenotype machines offer the same set of keys that can
be combined into a chord, though some variation exists due to duplicate
keys. This class accounts for such duplication, imposes the standard
stenographic ordering on the keys, and combines the keys into a single
string (called RTFCRE for historical reasons).
"""
PREFIX_STROKE = None
UNDO_STROKE = None
@classmethod
def setup(cls, keys, implicit_hyphen_keys, number_key,
numbers, feral_number_key, undo_stroke):
if number_key is None:
assert not numbers
numbers = None
super().setup(keys, implicit_hyphen_keys, number_key, numbers, feral_number_key)
cls._class = type(cls.__name__, (cls,), {'_helper': cls._helper})
cls._class._class = cls._class
cls._class.PREFIX_STROKE = cls.PREFIX_STROKE = cls.from_integer(0)
cls._class.UNDO_STROKE = cls.UNDO_STROKE = cls.from_steno(undo_stroke)
@classmethod
def from_steno(cls, steno):
return int.__new__(cls._class, cls._helper.stroke_from_steno(steno))
@classmethod
def from_keys(cls, keys):
return int.__new__(cls._class, cls._helper.stroke_from_keys(keys))
@classmethod
def from_integer(cls, integer):
return int.__new__(cls._class, cls._helper.stroke_from_int(integer))
@classmethod
def normalize_stroke(cls, steno, strict=True):
try:
return cls._helper.normalize_stroke(steno)
except ValueError:
if strict:
raise
return steno
@classmethod
def normalize_steno(cls, steno, strict=True):
try:
return cls._helper.normalize_steno(steno)
except ValueError:
if strict:
raise
return tuple(steno.split('/'))
@classmethod
def steno_to_sort_key(cls, steno, strict=True):
try:
return cls._helper.steno_to_sort_key(steno)
except ValueError:
if strict:
raise
return b'\x00\x00' + steno.encode('utf-8')
def __new__(cls, value):
return int.__new__(cls._class, cls._helper.stroke_from_any(value))
@property
def steno_keys(self):
return list(self.keys())
@property
def rtfcre(self):
return self._helper.stroke_to_steno(self)
@property
def is_correction(self):
return int(self) == int(self.UNDO_STROKE)
def __str__(self):
prefix = '*' if self.is_correction else ''
return f'{prefix}Stroke({self.rtfcre} : {self.steno_keys})'
__repr__ = __str__
normalize_stroke = Stroke.normalize_stroke
normalize_steno = Stroke.normalize_steno
steno_to_sort_key = Stroke.steno_to_sort_key
def sort_steno_strokes(strokes_list):
'''Return suggestions, sorted by fewest strokes, then fewest keys.'''
return sorted(strokes_list, key=lambda x: (len(x), sum(map(len, x))))
| 3,491
|
Python
|
.py
| 84
| 34.214286
| 88
| 0.659763
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,669
|
suggestions.py
|
openstenoproject_plover/plover/suggestions.py
|
import collections
from plover.steno import sort_steno_strokes
Suggestion = collections.namedtuple('Suggestion', 'text steno_list')
class Suggestions:
def __init__(self, dictionary):
self.dictionary = dictionary
def find(self, translation):
suggestions = []
mods = [
'%s', # Same
'{^%s}', # Prefix
'{^}%s',
'{^%s^}', # Infix
'{^}%s{^}',
'{%s^}', # Suffix
'%s{^}',
'{&%s}', # Fingerspell
'{#%s}', # Command
]
possible_translations = {translation}
# Only strip spaces, so patterns with \n or \t are correctly handled.
stripped_translation = translation.strip(' ')
if stripped_translation and stripped_translation != translation:
possible_translations.add(stripped_translation)
lowercase_translation = translation.lower()
if lowercase_translation != translation:
possible_translations.add(lowercase_translation)
similar_words = self.dictionary.casereverse_lookup(translation.lower())
if similar_words:
possible_translations |= set(similar_words)
for t in possible_translations:
for modded_translation in [mod % t for mod in mods]:
strokes_list = self.dictionary.reverse_lookup(modded_translation)
if not strokes_list:
continue
strokes_list = sort_steno_strokes(strokes_list)
suggestion = Suggestion(modded_translation, strokes_list)
suggestions.append(suggestion)
return suggestions
| 1,675
|
Python
|
.py
| 39
| 31.974359
| 81
| 0.592113
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,670
|
config.py
|
openstenoproject_plover/plover/config.py
|
# Copyright (c) 2010-2011 Joshua Harlan Lifton.
# See LICENSE.txt for details.
"""Configuration management."""
from collections import ChainMap, namedtuple, OrderedDict
import configparser
import json
import re
from plover.exception import InvalidConfigurationError
from plover.machine.keymap import Keymap
from plover.registry import registry
from plover.resource import resource_update
from plover.misc import boolean, expand_path, shorten_path
from plover import log
# General configuration sections, options and defaults.
MACHINE_CONFIG_SECTION = 'Machine Configuration'
LEGACY_DICTIONARY_CONFIG_SECTION = 'Dictionary Configuration'
LOGGING_CONFIG_SECTION = 'Logging Configuration'
OUTPUT_CONFIG_SECTION = 'Output Configuration'
DEFAULT_UNDO_LEVELS = 100
MINIMUM_UNDO_LEVELS = 1
DEFAULT_TIME_BETWEEN_KEY_PRESSES = 0
MINIMUM_TIME_BETWEEN_KEY_PRESSES = 0
DEFAULT_SYSTEM_NAME = 'English Stenotype'
SYSTEM_CONFIG_SECTION = 'System: %s'
SYSTEM_KEYMAP_OPTION = 'keymap[%s]'
class DictionaryConfig(namedtuple('DictionaryConfig', 'path enabled')):
def __new__(cls, path, enabled=True):
return super().__new__(cls, expand_path(path), enabled)
@property
def short_path(self):
return shorten_path(self.path)
def to_dict(self):
# Note: do not use _asdict because of
# https://bugs.python.org/issue24931
return {
'path': self.short_path,
'enabled': self.enabled,
}
def replace(self, **kwargs):
return self._replace(**kwargs)
@staticmethod
def from_dict(d):
return DictionaryConfig(**d)
def __repr__(self):
return 'DictionaryConfig(%r, %r)' % (self.short_path, self.enabled)
ConfigOption = namedtuple('ConfigOption', '''
name default
getter setter
validate full_key
''')
class InvalidConfigOption(ValueError):
def __init__(self, raw_value, fixed_value, message=None):
super().__init__(raw_value)
self.raw_value = raw_value
self.fixed_value = fixed_value
self.message = message
def __str__(self):
return self.message or repr(self.raw_value)
def raw_option(name, default, section, option, validate):
option = option or name
def getter(config, key):
return config._config[section][option]
def setter(config, key, value):
config._set(section, option, value)
return ConfigOption(name, lambda c, k: default, getter, setter, validate, None)
def json_option(name, default, section, option, validate):
option = option or name
def getter(config, key):
value = config._config[section][option]
try:
return json.loads(value)
except json.JSONDecodeError as e:
raise InvalidConfigOption(value, default) from e
def setter(config, key, value):
if isinstance(value, set):
# JSON does not support sets.
value = list(sorted(value))
config._set(section, option, json.dumps(value, sort_keys=True, ensure_ascii=False))
return ConfigOption(name, default, getter, setter, validate, None)
def int_option(name, default, minimum, maximum, section, option=None):
option = option or name
def getter(config, key):
return config._config[section][option]
def setter(config, key, value):
config._set(section, option, str(value))
def validate(config, key, value):
try:
value = int(value)
except ValueError as e:
raise InvalidConfigOption(value, default) from e
if (minimum is not None and value < minimum) or \
(maximum is not None and value > maximum):
message = '%s not in [%s, %s]' % (value, minimum or '-∞', maximum or '∞')
raise InvalidConfigOption(value, default, message)
return value
return ConfigOption(name, lambda c, k: default, getter, setter, validate, None)
def boolean_option(name, default, section, option=None):
option = option or name
def getter(config, key):
return config._config[section][option]
def setter(config, key, value):
config._set(section, option, str(value))
def validate(config, key, value):
try:
return boolean(value)
except ValueError as e:
raise InvalidConfigOption(value, default) from e
return ConfigOption(name, lambda c, k: default, getter, setter, validate, None)
def choice_option(name, choices, section, option=None):
default = choices[0]
def validate(config, key, value):
if value not in choices:
raise InvalidConfigOption(value, default)
return value
return raw_option(name, default, section, option, validate)
def plugin_option(name, plugin_type, default, section, option=None):
def validate(config, key, value):
try:
return registry.get_plugin(plugin_type, value).name
except KeyError as e:
raise InvalidConfigOption(value, default) from e
return raw_option(name, default, section, option, validate)
def opacity_option(name, section, option=None):
return int_option(name, 100, 0, 100, section, option)
def path_option(name, default, section, option=None):
option = option or name
def getter(config, key):
return expand_path(config._config[section][option])
def setter(config, key, value):
config._set(section, option, shorten_path(value))
def validate(config, key, value):
if not isinstance(value, str):
raise InvalidConfigOption(value, default)
return value
return ConfigOption(name, lambda c, k: default, getter, setter, validate, None)
def enabled_extensions_option():
def validate(config, key, value):
if not isinstance(value, (list, set, tuple)):
raise InvalidConfigOption(value, ())
return set(value)
return json_option('enabled_extensions', lambda c, k: set(), 'Plugins', 'enabled_extensions', validate)
def machine_specific_options():
def full_key(config, key):
if isinstance(key, tuple):
assert len(key) == 2
return key
return (key, config['machine_type'])
def default(config, key):
machine_class = registry.get_plugin('machine', key[1]).obj
return {
name: params[0]
for name, params in machine_class.get_option_info().items()
}
def getter(config, key):
return config._config[key[1]]
def setter(config, key, value):
config._config[key[1]] = value
def validate(config, key, raw_options):
if not isinstance(raw_options, (dict, configparser.SectionProxy)):
raise InvalidConfigOption(raw_options, default(config, key))
machine_options = OrderedDict()
invalid_options = OrderedDict()
machine_class = registry.get_plugin('machine', key[1]).obj
for name, params in sorted(machine_class.get_option_info().items()):
fallback, convert = params
try:
raw_value = raw_options[name]
except KeyError:
value = fallback
else:
try:
value = convert(raw_value)
except ValueError:
invalid_options[name] = raw_value
value = fallback
machine_options[name] = value
if invalid_options:
raise InvalidConfigOption(invalid_options, machine_options)
return machine_options
return ConfigOption('machine_specific_options', default, getter, setter, validate, full_key)
def system_keymap_option():
def full_key(config, key):
if isinstance(key, tuple):
assert len(key) == 3
return key
return (key, config['system_name'], config['machine_type'])
def location(config, key):
return SYSTEM_CONFIG_SECTION % key[1], SYSTEM_KEYMAP_OPTION % key[2]
def build_keymap(config, key, mappings=None):
system = registry.get_plugin('system', key[1]).obj
machine_class = registry.get_plugin('machine', key[2]).obj
keymap = Keymap(machine_class.get_keys(), system.KEYS + machine_class.get_actions())
if mappings is None:
mappings = system.KEYMAPS.get(key[2])
if mappings is None:
if machine_class.KEYMAP_MACHINE_TYPE is not None:
# Try fallback.
return build_keymap(config, (key[0], key[1], machine_class.KEYMAP_MACHINE_TYPE))
# No fallback...
mappings = {}
keymap.set_mappings(mappings)
return keymap
def default(config, key):
return build_keymap(config, key)
def getter(config, key):
section, option = location(config, key)
return config._config[section][option]
def setter(config, key, keymap):
section, option = location(config, key)
config._set(section, option, str(keymap))
def validate(config, key, value):
try:
return build_keymap(config, key, value)
except (TypeError, ValueError) as e:
raise InvalidConfigOption(value, default(config, key)) from e
return ConfigOption('system_keymap', default, getter, setter, validate, full_key)
def dictionaries_option():
def full_key(config, key):
if isinstance(key, tuple):
assert len(key) == 2
return key
return (key, config['system_name'])
def location(config, key):
return (
SYSTEM_CONFIG_SECTION % key[1],
'dictionaries',
)
def default(config, key):
system = registry.get_plugin('system', key[1]).obj
return [DictionaryConfig(path) for path in system.DEFAULT_DICTIONARIES]
def legacy_getter(config):
options = config._config[LEGACY_DICTIONARY_CONFIG_SECTION].items()
return [
{'path': value}
for name, value in reversed(sorted(options))
if re.match(r'dictionary_file\d*$', name) is not None
]
def getter(config, key):
section, option = location(config, key)
value = config._config.get(section, option, fallback=None)
if value is None:
return legacy_getter(config)
return json.loads(value)
def setter(config, key, dictionaries):
section, option = location(config, key)
config._set(section, option, json.dumps([
d.to_dict() for d in dictionaries
], sort_keys=True))
config._config.remove_section(LEGACY_DICTIONARY_CONFIG_SECTION)
def validate(config, key, value):
dictionaries = []
for d in value:
if isinstance(d, DictionaryConfig):
pass
elif isinstance(d, str):
d = DictionaryConfig(d)
else:
d = DictionaryConfig.from_dict(d)
dictionaries.append(d)
return dictionaries
return ConfigOption('dictionaries', default, getter, setter, validate, full_key)
class Config:
def __init__(self, path=None):
self._config = None
self._cache = {}
# A convenient place for other code to store a file name.
self.path = path
self.clear()
def load(self):
self.clear()
with open(self.path, encoding='utf-8') as fp:
try:
self._config.read_file(fp)
except configparser.Error as e:
raise InvalidConfigurationError(str(e))
def clear(self):
self._config = configparser.RawConfigParser()
self._cache.clear()
def save(self):
with resource_update(self.path) as temp_path:
with open(temp_path, mode='w', encoding='utf-8') as fp:
self._config.write(fp)
def _set(self, section, option, value):
if not self._config.has_section(section):
self._config.add_section(section)
self._config.set(section, option, value)
# Note: order matters, e.g. machine_type comes before
# machine_specific_options and system_keymap because
# the latter depend on the former.
_OPTIONS = OrderedDict((opt.name, opt) for opt in [
# Output.
choice_option('space_placement', ('Before Output', 'After Output'), OUTPUT_CONFIG_SECTION),
boolean_option('start_attached', False, OUTPUT_CONFIG_SECTION),
boolean_option('start_capitalized', False, OUTPUT_CONFIG_SECTION),
int_option('undo_levels', DEFAULT_UNDO_LEVELS, MINIMUM_UNDO_LEVELS, None, OUTPUT_CONFIG_SECTION),
int_option('time_between_key_presses', DEFAULT_TIME_BETWEEN_KEY_PRESSES, MINIMUM_TIME_BETWEEN_KEY_PRESSES, None, OUTPUT_CONFIG_SECTION),
# Logging.
path_option('log_file_name', expand_path('strokes.log'), LOGGING_CONFIG_SECTION, 'log_file'),
boolean_option('enable_stroke_logging', False, LOGGING_CONFIG_SECTION),
boolean_option('enable_translation_logging', False, LOGGING_CONFIG_SECTION),
# GUI.
boolean_option('start_minimized', False, 'Startup', 'Start Minimized'),
boolean_option('show_stroke_display', False, 'Stroke Display', 'show'),
boolean_option('show_suggestions_display', False, 'Suggestions Display', 'show'),
opacity_option('translation_frame_opacity', 'Translation Frame', 'opacity'),
boolean_option('classic_dictionaries_display_order', False, 'GUI'),
# Plugins.
enabled_extensions_option(),
# Machine.
boolean_option('auto_start', False, MACHINE_CONFIG_SECTION),
plugin_option('machine_type', 'machine', 'Keyboard', MACHINE_CONFIG_SECTION),
machine_specific_options(),
# System.
plugin_option('system_name', 'system', DEFAULT_SYSTEM_NAME, 'System', 'name'),
system_keymap_option(),
dictionaries_option(),
])
def _lookup(self, key):
name = key[0] if isinstance(key, tuple) else key
opt = self._OPTIONS[name]
if opt.full_key is not None:
key = opt.full_key(self, key)
return key, opt
def __getitem__(self, key):
key, opt = self._lookup(key)
if key in self._cache:
return self._cache[key]
try:
value = opt.validate(self, key, opt.getter(self, key))
except (configparser.NoOptionError, KeyError):
value = opt.default(self, key)
except InvalidConfigOption as e:
log.error('invalid value for %r option', opt.name, exc_info=True)
value = e.fixed_value
self._cache[key] = value
return value
def __setitem__(self, key, value):
key, opt = self._lookup(key)
value = opt.validate(self._config, key, value)
opt.setter(self, key, value)
self._cache[key] = value
def as_dict(self):
return {opt.name: self[opt.name] for opt in self._OPTIONS.values()}
def update(self, **kwargs):
new_settings = []
new_config = ChainMap({}, self)
for opt in self._OPTIONS.values():
if opt.name in kwargs:
key = opt.name
if opt.full_key is not None:
key = opt.full_key(new_config, key)
value = opt.validate(new_config, key, kwargs[opt.name])
new_settings.append((opt, key, value))
new_config[opt.name] = value
for opt, key, value in new_settings:
opt.setter(self, key, value)
self._cache[key] = value
| 15,574
|
Python
|
.py
| 357
| 34.848739
| 144
| 0.634068
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,671
|
exception.py
|
openstenoproject_plover/plover/exception.py
|
# Copyright (c) 2011 Joshua Harlan Lifton.
# See LICENSE.txt for details.
"""Custom exceptions used by Plover.
The exceptions in this module are typically caught in the main GUI
loop and displayed to the user as an alert dialog.
"""
class InvalidConfigurationError(Exception):
"Raised when there is something wrong in the configuration."
pass
class DictionaryLoaderException(Exception):
"""Dictionary file could not be loaded."""
def __init__(self, path, exception):
super().__init__(path, exception)
self.path = path
self.exception = exception
def __str__(self):
return 'loading dictionary `%s` failed: %s' % (self.path, self.exception)
| 697
|
Python
|
.py
| 17
| 36.529412
| 81
| 0.71471
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,672
|
formatting.py
|
openstenoproject_plover/plover/formatting.py
|
# Copyright (c) 2010-2011 Joshua Harlan Lifton.
# See LICENSE.txt for details.
"""This module converts translations to printable text.
This module defines and implements plover's custom dictionary language.
"""
from enum import Enum
from os.path import commonprefix
from collections import namedtuple
import re
import string
from plover.registry import registry
Case = Enum('case', ((c, c.lower()) for c in '''
CAP_FIRST_WORD
LOWER
LOWER_FIRST_CHAR
TITLE
UPPER
UPPER_FIRST_WORD
'''.split()))
SPACE = ' '
META_ATTACH_FLAG = '^'
META_CARRY_CAPITALIZATION = '~|'
META_GLUE_FLAG = '&'
META_ESCAPE = '\\'
RE_META_ESCAPE = '\\\\'
META_START = '{'
META_END = '}'
META_ESC_START = META_ESCAPE + META_START
META_ESC_END = META_ESCAPE + META_END
def _build_metas_parser(supported_metas):
match_result_from_lastindex = [None]
regex_parts = []
for pattern, name, param in supported_metas:
num_prev_groups = len(match_result_from_lastindex)
if isinstance(pattern, tuple):
pattern = pattern[0].format(*[
'(?:' + re.escape(s) + ')'
for s in pattern[1:]
])
num_groups = re.compile(pattern).groups
if isinstance(name, int):
assert num_groups > 0
assert 0 <= name < num_groups
name += num_prev_groups
if isinstance(param, int):
assert num_groups > 0
assert 0 <= param < num_groups
param += num_prev_groups
if num_groups == 0:
num_groups = 1
else:
pattern = '?:' + pattern
groups = [n + num_prev_groups for n in range(num_groups)]
match_result_from_lastindex.extend(((name, param),) * len(groups))
regex_parts.append('(' + pattern + ')$')
regex = re.compile('|'.join(regex_parts), re.DOTALL | re.IGNORECASE)
def parse(meta):
m = regex.match(meta)
if m is None:
return None, None
meta_name, meta_param = match_result_from_lastindex[m.lastindex]
if isinstance(meta_name, int):
meta_name = m.group(meta_name)
if isinstance(meta_param, int):
meta_param = m.group(meta_param)
return [meta_name, meta_param]
return parse
# Note: declaration order matters!
_parse_meta = _build_metas_parser((
# Generic {:macro:cmdline} syntax.
(r':([^:]+):?(.*)', 0, 1),
# Command.
(r'PLOVER:(.*)', 'command', 0),
# Key combination.
(r'#(.*)', 'key_combo', 0),
# Punctuation.
(r'([,:;])', 'comma', 0),
(r'([.!?])', 'stop' , 0),
# Case.
(r'-\|' , 'case' , Case.CAP_FIRST_WORD.value ),
(r'>' , 'case' , Case.LOWER_FIRST_CHAR.value),
(r'<' , 'case' , Case.UPPER_FIRST_WORD.value),
(r'\*-\|', 'retro_case', Case.CAP_FIRST_WORD.value ),
(r'\*>' , 'retro_case', Case.LOWER_FIRST_CHAR.value),
(r'\*<' , 'retro_case', Case.UPPER_FIRST_WORD.value),
# Explicit word end.
(r'(\$)', 'word_end', 0),
# Conditional.
(r'=(.*)', 'if_next_matches', 0),
# Mode.
(r'MODE:(.*)', 'mode', 0),
# Currency.
(r'\*\((.*)\)', 'retro_currency', 0),
# Glue.
((r'{0}(.*)', META_GLUE_FLAG), 'glue' , 0),
# Carry capitalization.
((r'({0}?{1}.*{0}?)', META_ATTACH_FLAG, META_CARRY_CAPITALIZATION), 'carry_capitalize', 0),
# Attach.
((r'({0}.*{0}?)', META_ATTACH_FLAG), 'attach', 0),
((r'(.*{0})', META_ATTACH_FLAG), 'attach', 0),
))
ATOM_RE = re.compile(r"""(?:%s%s|%s%s|[^%s%s])+ # One or more of anything
# other than unescaped { or }
#
| # or
#
%s(?:%s%s|%s%s|[^%s%s])*%s # Anything of the form {X}
# where X doesn't contain
# unescaped { or }
""" % (RE_META_ESCAPE, META_START, RE_META_ESCAPE,
META_END, META_START, META_END,
META_START,
RE_META_ESCAPE, META_START, RE_META_ESCAPE,
META_END, META_START, META_END,
META_END),
re.VERBOSE)
# A more human-readable version of the above RE is:
#
# re.compile(r"""(?:\\{|\\}|[^{}])+ # One or more of anything other than
# # unescaped { or }
# #
# | # or
# #
# {(?:\\{|\\}|[^{}])*} # Anything of the form {X} where X
# # doesn't contain unescaped { or }
# """, re.VERBOSE)
WORD_RX = re.compile(r'(?:\d+(?:[.,]\d+)+|[\'\w]+[-\w\']*|[^\w\s]+)\s*', re.UNICODE)
class RetroFormatter:
"""Helper for iterating over the result of previous translations.
Support iterating over previous actions, text, fragments of text, or words:
text : "Something something, blah! Blah: 45.8... (blah: foo42) "
fragments: "__________-----------______------________-------_________"
words : "__________---------__----__----__----____-____--_____----"
"""
FRAGMENT_RX = re.compile(r'\s*[^\s]+\s*|^\s*$')
def __init__(self, previous_translations):
self.previous_translations = previous_translations
def iter_last_actions(self):
"""Iterate over past actions (last first)."""
for translation in reversed(self.previous_translations):
yield from reversed(translation.formatting)
def iter_last_fragments(self):
"""Iterate over last text fragments (last first).
A text fragment is a series of non-whitespace characters
followed by zero or more trailing whitespace characters.
"""
replace = 0
next_action = None
current_fragment = ''
for action in self.iter_last_actions():
part = '' if action.text is None else action.text
if next_action is not None and \
next_action.text is not None and \
not next_action.prev_attach:
part += next_action.space_char
if replace:
# Ignore replaced content.
if len(part) > replace:
part = part[:-replace]
replace = 0
else:
replace -= len(part)
part = ''
if part:
# Find out new complete fragments.
fragments = self.FRAGMENT_RX.findall(part + current_fragment)
yield from reversed(fragments[1:])
current_fragment = fragments[0]
replace += len(action.prev_replace)
next_action = action
# Don't forget to process the current (first) fragment.
if not current_fragment.isspace():
yield current_fragment.lstrip()
def last_fragments(self, count=1):
"""Return the last <count> text fragments."""
fragment_list = []
for fragment in self.iter_last_fragments():
fragment_list.insert(0, fragment)
if len(fragment_list) == count:
break
return fragment_list
def iter_last_words(self, strip=False, rx=WORD_RX):
"""Iterate over last words (last first).
If <strip> is False, then trailing whitespace is included
as part of each word (useful for calculating position).
For <strip> to be properly supported when a custom regexp is
passed as <rx>, then it must include trailing whitespace as
part of each word.
"""
assert not rx.groups
for fragment in self.iter_last_fragments():
# Split each fragment into words.
for word in reversed(rx.findall(fragment)):
yield word.rstrip() if strip else word
def last_words(self, count=1, strip=False, rx=WORD_RX):
"""Return the last <count> words."""
word_list = []
for w in self.iter_last_words(strip=strip, rx=rx):
word_list.insert(0, w)
if len(word_list) == count:
break
return word_list
def last_text(self, size):
"""Return the last <size> characters."""
text = ''
if not size:
return text
for fragment in self.iter_last_fragments():
text = fragment + text
if len(text) >= size:
break
return text[-size:]
class _Context(RetroFormatter):
"""Context for formatting translations to actions.
Keep tracks of previous actions as well as newly translated actions,
offer helpers for creating new actions and convenient access to past
actions/text/words.
"""
def __init__(self, previous_translations, last_action):
super().__init__(previous_translations)
assert last_action is not None
self.last_action = last_action
self.translated_actions = []
def new_action(self):
"""Create a new action, only copying global state."""
return self.last_action.new_state()
def copy_last_action(self):
"""Create a new action, cloning the last action state."""
return self.last_action.copy_state()
def translated(self, action):
"""Mark an action as translated."""
assert action is not None
self.translated_actions.append(action)
self.last_action = action
def iter_last_actions(self):
"""Custom iterator with support for newly translated actions."""
yield from reversed(self.translated_actions)
yield from super().iter_last_actions()
class Formatter:
"""Convert translations into output.
The main entry point for this class is format, which takes in translations
to format. Output is sent via an output class passed in through set_output.
Other than setting the output, the formatter class is stateless.
The output class can define the following functions, which will be called
if available:
send_backspaces -- Takes a number and deletes back that many characters.
send_string -- Takes a string and prints it verbatim.
send_key_combination -- Takes a string the dictionary format for specifying
key combinations and issues them.
send_engine_command -- Takes a string which names the special command to
execute.
"""
output_type = namedtuple(
'output', ['send_backspaces', 'send_string', 'send_key_combination',
'send_engine_command'])
def __init__(self):
self.set_output(None)
self.spaces_after = False
self.last_output_spaces_after = False
self.start_capitalized = False
self.start_attached = False
self.space_char = ' '
self._listeners = set()
def add_listener(self, callback):
"""Add a listener for translation outputs.
Arguments:
callback -- A function that takes: a list of translations to undo, a
list of new translations to render, and a translation that is the
context for the new translations.
"""
self._listeners.add(callback)
def remove_listener(self, callback):
"""Remove a listener added by add_listener."""
self._listeners.remove(callback)
def set_output(self, output):
"""Set the output class."""
noop = lambda x: None
output_type = self.output_type
fields = output_type._fields
self._output = output_type(*[getattr(output, f, noop) for f in fields])
def set_space_placement(self, s):
# Set whether spaces will be inserted
# before the output or after the output
self.spaces_after = bool(s == 'After Output')
def last_action(self, previous_translations):
if previous_translations and previous_translations[-1].formatting:
return previous_translations[-1].formatting[-1]
return _Action(next_attach=self.start_attached or self.spaces_after,
next_case=Case.CAP_FIRST_WORD if self.start_capitalized else None,
space_char=self.space_char)
def format(self, undo, do, prev):
"""Format the given translations.
Arguments:
undo -- A sequence of translations that should be undone. The
formatting parameter of the translations will be used to undo the
actions that were taken, if possible.
do -- The new actions to format. The formatting attribute will be
filled in with the result.
prev -- The last translation before the new actions in do. This
translation's formatting attribute provides the context for the new
rendered translations. If there is no context then this may be None.
"""
assert undo or do
if do:
last_action = self.last_action(prev)
ctx = _Context(prev or (), last_action)
for t in do:
if t.english:
t.formatting = _translation_to_actions(t.english, ctx)
else:
t.formatting = _raw_to_actions(t.rtfcre[0], ctx)
new = ctx.translated_actions
else:
new = []
old = [a for t in undo for a in t.formatting]
# Take into account previous look-ahead actions.
if prev:
text = ''
for a in new:
if a.text:
text = a.text
break
tail = []
for a in RetroFormatter(prev).iter_last_actions():
if isinstance(a, _LookAheadAction):
old_a, new_a = a.action, a.update(text)
# Does the look-ahead action need updating?
if new_a == old_a:
# No, we're done.
break
old[0:0] = [old_a] + tail
new[0:0] = [new_a] + tail
text = a.text
tail = []
# Updating this action can impact another
# previous look-ahead action, keep going.
elif a.text is not None:
if a.text:
# Stop when encountering a non-empty text action.
break
tail.insert(0, a)
# Figure out what really changed.
min_length = min(len(old), len(new))
for i in range(min_length):
if old[i] != new[i]:
break
else:
i = min_length
if i > 0:
optimized_away = old[:i]
old = old[i:]
new = new[i:]
else:
optimized_away = []
# Notify listeners.
for callback in self._listeners:
callback(old, new)
# Render output.
if optimized_away:
last_action = optimized_away[-1]
elif prev and prev[-1].formatting:
last_action = prev[-1].formatting[-1]
else:
last_action = None
OutputHelper(self._output, self.last_output_spaces_after,
self.spaces_after).render(last_action, old, new)
self.last_output_spaces_after = self.spaces_after
class TextFormatter:
"""Format a series of action into text."""
def __init__(self, spaces_after):
self.spaces_after = spaces_after
# Initial replaced text.
self.replaced_text = ''
# New appended text.
self.appended_text = ''
self.trailing_space = ''
def _render_action(self, action):
if self.spaces_after and self.trailing_space:
assert self.appended_text.endswith(self.trailing_space)
self.appended_text = self.appended_text[:-len(self.trailing_space)]
if action.prev_replace:
replaced = len(action.prev_replace)
appended = len(self.appended_text)
if replaced > appended:
assert action.prev_replace.endswith(self.appended_text)
replaced -= appended
if replaced > len(self.replaced_text):
assert action.prev_replace.endswith(self.replaced_text)
self.replaced_text = action.prev_replace[:replaced]
else:
assert self.replaced_text.endswith(action.prev_replace)
self.replaced_text = self.replaced_text[:-replaced]
self.replaced_text += action.prev_replace[:replaced]
self.appended_text = ''
else:
assert self.appended_text.endswith(action.prev_replace)
self.appended_text = self.appended_text[:-replaced]
if not action.prev_attach:
self.appended_text += action.space_char
self.appended_text += action.text
if self.spaces_after and not action.next_attach:
self.appended_text += action.space_char
self.trailing_space = action.space_char
else:
self.trailing_space = ''
def render(self, action_list, last_action):
"""Render a series of action.
Note: the function is a generator that yields non-text
actions (commands, combos, ...) for special processing.
"""
if self.spaces_after and last_action is not None:
self.trailing_space = last_action.trailing_space
self.appended_text = last_action.trailing_space
for action in action_list:
if action.text is None:
yield action
else:
self._render_action(action)
def reset(self, trailing_space):
"""Reset current state (rendered text)."""
self.replaced_text = ''
self.appended_text = trailing_space
class OutputHelper:
"""A helper class for minimizing the amount of change on output.
This class figures out the current state, compares it to the new output and
optimizes away extra backspaces and typing.
"""
def __init__(self, output, before_spaces_after, after_spaces_after):
self.output = output
self.before = TextFormatter(before_spaces_after)
self.after = TextFormatter(after_spaces_after)
def flush(self):
# FIXME:
# - what about things like emoji zwj sequences?
# - normalize strings to better handle combining characters?
#
# >>> u"C\u0327"
# 'Ç'
# >>> len(u"C\u0327")
# 2
# >>> len(unicodedata.normalize('NFC', u"C\u0327"))
# 1
if len(self.before.replaced_text) > len(self.after.replaced_text):
assert self.before.replaced_text.endswith(self.after.replaced_text)
replaced_text = self.before.replaced_text
else:
assert self.after.replaced_text.endswith(self.before.replaced_text)
replaced_text = self.after.replaced_text
before = replaced_text[:len(replaced_text)-len(self.before.replaced_text)] + self.before.appended_text
after = replaced_text[:len(replaced_text)-len(self.after.replaced_text)] + self.after.appended_text
common_length = len(commonprefix([before, after]))
erased = len(before) - common_length
if erased:
self.output.send_backspaces(erased)
appended = after[common_length:]
if appended:
self.output.send_string(appended)
self.before.reset(self.after.trailing_space)
self.after.reset(self.after.trailing_space)
def render(self, last_action, undo, do):
# Render undone actions, ignoring non-text actions.
for action in self.before.render(undo, last_action):
pass
# Render new actions.
for action in self.after.render(do, last_action):
self.flush()
if action.combo:
self.output.send_key_combination(action.combo)
elif action.command:
self.output.send_engine_command(action.command)
self.flush()
class _Action:
"""A hybrid class that stores instructions and resulting state.
A single translation may be formatted into one or more actions. The
instructions are used to render the current action and the state is used as
context to render future translations.
"""
def __init__(self,
# Previous.
prev_attach=False, prev_replace='',
# Current.
glue=False, word=None, orthography=True, space_char=' ',
upper_carry=False, case=None, text=None, trailing_space='',
word_is_finished=None, combo=None, command=None,
# Next.
next_attach=False, next_case=None
):
"""Initialize a new action.
Arguments:
prev_attach -- True if there should be no space between this and the
previous action.
prev_replace -- Text that should be deleted for this action.
glue -- True if there be no space between this and the next action if
the next action also has glue set to True.
word -- The current root word (sans prefix, and un-cased). This is
context for future actions whose behavior depends on it such as
suffixes.
upper_carry -- True if we are uppercasing the current word.
word_is_finished -- True if word is finished.
orthography -- True if orthography rules should be applies when adding
a suffix to this action.
space_char -- this character will replace spaces after all other
formatting has been applied
case -- an integer to determine which case to output after formatting
text -- The text that should be rendered for this action.
trailing_space -- This the space that would be added when rendering
up to this action with space placement set to
'after output'.
combo -- The key combo, in plover's key combo language, that should be
executed for this action.
command -- The command that should be executed for this action.
next_attach -- True if there should be no space between this and the next
action.
next_case -- Case to apply to next action: capitalize/lower/upper...
"""
# State variables
self.prev_attach = prev_attach
self.glue = glue
self.word = word
self.upper_carry = upper_carry
self.orthography = orthography
self.next_attach = next_attach
self.next_case = next_case
if word_is_finished is None:
self.word_is_finished = not self.next_attach
else:
self.word_is_finished = word_is_finished
# Persistent state variables
self.space_char = space_char
self.case = case
self.trailing_space = trailing_space
# Instruction variables
self.prev_replace = prev_replace
self.text = text
self.combo = combo
self.command = command
def copy_state(self):
"""Clone this action but only clone the state variables."""
return _Action(
# Previous.
prev_attach=self.next_attach,
# Current.
case=self.case, glue=self.glue, orthography=self.orthography,
space_char=self.space_char, upper_carry=self.upper_carry,
word=self.word, trailing_space=self.trailing_space,
word_is_finished=self.word_is_finished,
# Next.
next_attach=self.next_attach, next_case=self.next_case,
)
def new_state(self):
return _Action(
# Previous.
prev_attach=self.next_attach,
# Current.
space_char=self.space_char, case=self.case,
trailing_space=self.trailing_space,
# Next.
)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
def __str__(self):
kwargs = [
'%s=%r' % (k, v)
for k, v in self.__dict__.items()
if v != self.DEFAULT.__dict__[k]
]
return 'Action(%s)' % ', '.join(sorted(kwargs))
def __repr__(self):
return str(self)
_Action.DEFAULT = _Action()
class _LookAheadAction(_Action):
def __init__(self, pattern, action1, action2):
self.pattern = pattern
self.action1 = action1
self.action2 = action2
self.action = None
self.update('')
def update(self, text):
if re.match(self.pattern, text) is None:
self.action = self.action2
else:
self.action = self.action1
return self.action
def __getattr__(self, name):
return getattr(self.action, name)
def __str__(self):
return 'LookAheadAction(%s)' % str(self.__dict__)
def _translation_to_actions(translation, ctx):
"""Create actions for a translation.
Arguments:
translation -- A string with the translation to render.
ctx -- The context in which this translation is formatted.
Returns: A list of actions.
"""
# Reduce the translation to atoms. An atom is an irreducible string that is
# either entirely a single meta command or entirely text containing no meta
# commands.
if translation.isdigit():
# If a translation is only digits then glue it to neighboring digits.
atoms = [_glue_translation(translation)]
else:
atoms = filter(None, (
x.strip(' ') for x in ATOM_RE.findall(translation))
)
action_list = []
for atom in atoms:
action = _atom_to_action(atom, ctx)
action_list.append(action)
ctx.translated(action)
if not action_list:
action = ctx.copy_last_action()
action_list = [action]
ctx.translated(action)
return action_list
def _raw_to_actions(stroke, ctx):
"""Turn a raw stroke into actions.
Arguments:
stroke -- A string representation of the stroke.
ctx -- The context in which the new actions are created.
Returns: A list of actions.
"""
# If a raw stroke is composed of digits then remove the dash (if
# present) and glue it to any neighboring digits. Otherwise, just
# output the raw stroke as is.
no_dash = stroke.replace('-', '', 1)
if no_dash.isdigit():
return _translation_to_actions(no_dash, ctx)
action = _Action(text=stroke, word=stroke,
case=ctx.last_action.case,
prev_attach=ctx.last_action.next_attach,
space_char=ctx.last_action.space_char,
trailing_space=ctx.last_action.space_char)
ctx.translated(action)
return [action]
def _meta_to_action(meta, ctx):
meta_name, meta_arg = _parse_meta(meta)
if meta_name is not None:
meta_fn = registry.get_plugin('meta', meta_name).obj
action = meta_fn(ctx, meta_arg)
else:
action = ctx.new_action()
return action
def _atom_to_action(atom, ctx):
"""Convert an atom into an action.
Arguments:
atom -- A string holding an atom. An atom is an irreducible string that is
either entirely a single meta command or entirely text containing no meta
commands.
ctx -- The context in which the new action takes place.
Returns: An action for the atom.
"""
meta = _get_meta(atom)
if meta is not None:
meta = _unescape_atom(meta)
action = _meta_to_action(meta, ctx)
else:
action = ctx.new_action()
action.text = _unescape_atom(atom)
_finalize_action(action, ctx)
return action
def _finalize_action(action, ctx):
'''Finalize action's text.'''
if isinstance(action, _LookAheadAction):
_finalize_action(action.action1, ctx)
_finalize_action(action.action2, ctx)
return
text = action.text
if text is None:
return
# Update word.
if action.word is None:
last_word = None
if action.glue and ctx.last_action.glue:
last_word = ctx.last_action.word
action.word = rightmost_word((last_word or '') + text)
# Apply case.
case = ctx.last_action.next_case
if case is None and action.prev_attach and ctx.last_action.upper_carry:
case = Case.UPPER_FIRST_WORD
text = apply_case(text, case)
if case == Case.UPPER_FIRST_WORD:
action.upper_carry = not has_word_boundary(text)
# Apply mode.
action.text = apply_mode(text, action.case, action.space_char,
action.prev_attach, ctx.last_action)
# Update trailing space.
action.trailing_space = '' if action.next_attach else action.space_char
def apply_case(text, case):
if case is None:
return text
if case == Case.CAP_FIRST_WORD:
return capitalize_first_word(text)
if case == Case.LOWER_FIRST_CHAR:
return lower_first_character(text)
if case == Case.UPPER_FIRST_WORD:
return upper_first_word(text)
raise ValueError('%r is not a valid case' % case)
def apply_mode(text, case, space_char, begin, last_action):
# Should title case be applied to the beginning of the next string?
lower_title_case = (begin and not
last_action.case in (
Case.CAP_FIRST_WORD,
Case.UPPER_FIRST_WORD,
))
# Apply case, then replace space character.
text = apply_mode_case(text, case, lower_title_case)
text = apply_mode_space_char(text, space_char)
# Title case is sensitive to lower flag.
if (last_action.next_case == Case.LOWER_FIRST_CHAR
and text and case == Case.TITLE):
text = lower_first_character(text)
return text
def apply_mode_case(text, case, appended):
if case is None:
return text
if case == Case.LOWER:
return text.lower()
if case == Case.UPPER:
return text.upper()
if case == Case.TITLE:
# Do nothing to appended output
if appended:
return text
return capitalize_all_words(text)
raise ValueError('%r is not a valid case' % case)
def apply_mode_space_char(text, space_char):
if space_char == SPACE:
return text
return text.replace(SPACE, space_char)
def _get_meta(atom):
"""Return the meta command, if any, without surrounding meta markups."""
if (atom is not None and
atom.startswith(META_START) and
atom.endswith(META_END)):
return atom[len(META_START):-len(META_END)]
return None
def _glue_translation(s):
"""Mark the given string as a glue stroke."""
return META_START + META_GLUE_FLAG + s + META_END
def _unescape_atom(atom):
"""Replace escaped meta markups with unescaped meta markups."""
atom = atom.replace(META_ESC_START, META_START)
atom = atom.replace(META_ESC_END, META_END)
return atom
def capitalize_first_word(s):
"""Capitalize the first letter of s.
- 'foo bar' -> 'Foo bar'
- 'STUFF' -> 'STUFF'
"""
return s[0:1].upper() + s[1:]
def capitalize_all_words(s):
"""Capitalize each word of s.
- 'foo bar' -> 'Foo Bar'
- "O'Something STUFF" -> "O'something Stuff"
"""
return string.capwords(s, SPACE)
def lower_first_character(s):
"""Lowercase the first letter of s."""
return s[0:1].lower() + s[1:]
def upper_all_words(s):
"""Uppercase the entire s."""
return s.upper()
def upper_first_word(s):
"""Uppercase first word of s."""
m = WORD_RX.match(s)
if m is None:
return s
first_word = m.group()
return first_word.upper() + s[len(first_word):]
def rightmost_word(s):
"""Get the rightmost word in s."""
words = WORD_RX.findall(s)
if not words:
return ''
last_word = words[-1]
if last_word[-1].isspace():
return ''
return last_word
def has_word_boundary(s):
"""Return True if s contains a word boundary
(e.g.: more than 1 word, or white space).
"""
if not s:
return False
if s[0].isspace() or s[-1].isspace():
return True
return len(WORD_RX.findall(s)) > 1
| 32,725
|
Python
|
.py
| 789
| 31.693283
| 110
| 0.584283
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,673
|
i18n.py
|
openstenoproject_plover/plover/i18n.py
|
import os
import gettext
from plover.oslayer.config import CONFIG_DIR, PLATFORM
from plover.oslayer.i18n import get_system_language
from plover.resource import ASSET_SCHEME, resource_filename
def get_language():
# Give priority to LANGUAGE environment variable.
lang = os.environ.get('LANGUAGE')
if lang is not None:
return lang
# Try to get system language.
lang = get_system_language()
if lang is not None:
return lang
# Fallback to English.
return 'en'
def get_locale_dir(package, resource_dir):
locale_dir = os.path.join(CONFIG_DIR, 'messages')
if gettext.find(package, locale_dir):
return locale_dir
return resource_filename(f'{ASSET_SCHEME}{package}:{resource_dir}')
class Translator:
def __init__(self, package, resource_dir='messages', lang=None):
self.package = package
self.resource_dir = resource_dir
if lang is None:
lang = get_language()
self.lang = lang
@property
def lang(self):
return self._lang
@lang.setter
def lang(self, lang):
self._lang = lang
localedir = get_locale_dir(self.package, self.resource_dir)
self._translation = gettext.translation(self.package, localedir=localedir,
languages=[lang], fallback=True)
self.gettext = self._translation.gettext
self.ngettext = self._translation.ngettext
def __call__(self, message):
return self.gettext(message)
def _(self, message):
return message
| 1,575
|
Python
|
.py
| 43
| 29.511628
| 82
| 0.662722
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,674
|
__init__.py
|
openstenoproject_plover/plover/__init__.py
|
# Copyright (c) 2010-2011 Joshua Harlan Lifton.
# See LICENSE.txt for details.
"""Plover: Open Source Stenography Software"""
if __name__ == 'plover':
from plover.i18n import Translator
_ = Translator(__package__)
else:
# exec from `setup.py`, package data
# may not be available, and we don't
# want to translate anyway.
_ = lambda s: s
__version__ = '4.0.0rc2'
__copyright__ = '(C) Open Steno Project'
__url__ = 'http://www.openstenoproject.org/'
__download_url__ = 'http://www.openstenoproject.org/plover'
__credits__ = _("""\
Founded by stenographer Mirabai Knight.
Developers:
Joshua Lifton
Hesky Fisher
Ted Morin
Benoit Pierre
and many more on GitHub:
<https://github.com/openstenoproject/plover>""")
__license__ = 'GNU General Public License v2 or later (GPLv2+)'
# i18n: Short description for Plover, currently not used in the interface.
__description__ = _('Open Source Stenography Software')
__long_description__ = _("""\
Plover is a free open source program intended to bring realtime
stenographic technology not just to stenographers, but also to
hackers, hobbyists, accessibility mavens, and all-around speed demons.""")
| 1,164
|
Python
|
.py
| 31
| 35.580645
| 74
| 0.727595
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,675
|
engine.py
|
openstenoproject_plover/plover/engine.py
|
from collections import namedtuple, OrderedDict
from functools import wraps
from queue import Queue
import os
import shutil
import threading
from plover import log, system
from plover.dictionary.loading_manager import DictionaryLoadingManager
from plover.exception import DictionaryLoaderException
from plover.formatting import Formatter
from plover.misc import shorten_path
from plover.registry import registry
from plover.resource import ASSET_SCHEME, resource_filename
from plover.steno import Stroke
from plover.steno_dictionary import StenoDictionary, StenoDictionaryCollection
from plover.suggestions import Suggestions
from plover.translation import Translator
StartingStrokeState = namedtuple('StartingStrokeState',
'attach capitalize space_char',
defaults=(False, False, ' '))
MachineParams = namedtuple('MachineParams', 'type options keymap')
class ErroredDictionary(StenoDictionary):
""" Placeholder for dictionaries that failed to load. """
def __init__(self, path, exception):
super().__init__()
self.enabled = False
self.readonly = True
self.path = path
self.exception = exception
def __eq__(self, other):
if not isinstance(other, ErroredDictionary):
return False
return (self.path, self.exception) == (other.path, other.exception)
def copy_default_dictionaries(dictionaries_files):
'''Recreate default dictionaries.
Each default dictionary is recreated if it's
in use by the current config and missing.
'''
for dictionary in dictionaries_files:
# Ignore assets.
if dictionary.startswith(ASSET_SCHEME):
continue
# Nothing to do if dictionary file already exists.
if os.path.exists(dictionary):
continue
# Check it's actually a default dictionary.
basename = os.path.basename(dictionary)
if basename not in system.DEFAULT_DICTIONARIES:
continue
default_dictionary = os.path.join(system.DICTIONARIES_ROOT, basename)
log.info('recreating %s from %s', dictionary, default_dictionary)
shutil.copyfile(resource_filename(default_dictionary), dictionary)
def with_lock(func):
# To keep __doc__/__name__ attributes of the initial function.
@wraps(func)
def _with_lock(self, *args, **kwargs):
with self:
return func(self, *args, **kwargs)
return _with_lock
class StenoEngine:
HOOKS = '''
stroked
translated
machine_state_changed
output_changed
config_changed
dictionaries_loaded
send_string
send_backspaces
send_key_combination
add_translation
focus
configure
lookup
suggestions
quit
'''.split()
def __init__(self, config, controller, keyboard_emulation):
self._config = config
self._controller = controller
self._is_running = False
self._queue = Queue()
self._lock = threading.RLock()
self._machine = None
self._machine_state = None
self._machine_params = MachineParams(None, None, None)
self._formatter = Formatter()
self._formatter.set_output(Formatter.output_type(
self._send_backspaces,
self._send_string,
self._send_key_combination,
self._send_engine_command,
))
self._formatter.add_listener(self._on_translated)
self._translator = Translator()
self._translator.add_listener(log.translation)
self._translator.add_listener(self._formatter.format)
self._dictionaries = self._translator.get_dictionary()
self._dictionaries_manager = DictionaryLoadingManager()
self._running_state = self._translator.get_state()
self._translator.clear_state()
self._keyboard_emulation = keyboard_emulation
self._hooks = { hook: [] for hook in self.HOOKS }
self._running_extensions = {}
def __enter__(self):
self._lock.__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._lock.__exit__(exc_type, exc_value, traceback)
def _in_engine_thread(self):
raise NotImplementedError()
def _same_thread_hook(self, func, *args, **kwargs):
if self._in_engine_thread():
func(*args, **kwargs)
else:
self._queue.put((func, args, kwargs))
def run(self):
while True:
func, args, kwargs = self._queue.get()
try:
with self._lock:
if func(*args, **kwargs):
break
except Exception:
log.error('engine %s failed', func.__name__[1:], exc_info=True)
def _on_control_message(self, msg):
if msg[0] == 'command':
self._same_thread_hook(self._execute_engine_command,
*msg[1:], force=True)
else:
log.error('ignoring invalid control message: %r', msg)
def _stop(self):
self._controller.stop()
self._stop_extensions(self._running_extensions.keys())
if self._machine is not None:
self._machine.stop_capture()
self._machine = None
def _start(self):
self._set_output(self._config['auto_start'])
self._update(full=True)
self._controller.start(self._on_control_message)
def _set_dictionaries(self, dictionaries):
def dictionaries_changed(l1, l2):
if len(l1) != len(l2):
return True
for d1, d2 in zip(l1, l2):
if d1 is not d2:
return True
return False
if not dictionaries_changed(dictionaries, self._dictionaries.dicts):
# No change.
return
self._dictionaries.set_dicts(dictionaries)
self._trigger_hook('dictionaries_loaded', StenoDictionaryCollection(dictionaries))
def _update(self, config_update=None, full=False, reset_machine=False):
original_config = self._config.as_dict()
# Update configuration.
if config_update is not None:
self._config.update(**config_update)
config = self._config.as_dict()
else:
config = original_config
# Create configuration update.
if full:
config_update = config
else:
config_update = {
option: value
for option, value in config.items()
if value != original_config[option]
}
# Save config if anything changed.
if config_update:
self._config.save()
# Update logging.
log.set_stroke_filename(config['log_file_name'])
log.enable_stroke_logging(config['enable_stroke_logging'])
log.enable_translation_logging(config['enable_translation_logging'])
# Update output.
self._formatter.set_space_placement(config['space_placement'])
self._formatter.start_attached = config['start_attached']
self._formatter.start_capitalized = config['start_capitalized']
self._translator.set_min_undo_length(config['undo_levels'])
self._keyboard_emulation.set_key_press_delay(config['time_between_key_presses'])
# Update system.
system_name = config['system_name']
if system.NAME != system_name:
log.info('loading system: %s', system_name)
system.setup(system_name)
# Update machine.
update_keymap = False
start_machine = False
machine_params = MachineParams(config['machine_type'],
config['machine_specific_options'],
config['system_keymap'])
# Do not reset if only the keymap changed.
if self._machine_params is None or \
self._machine_params.type != machine_params.type or \
self._machine_params.options != machine_params.options:
reset_machine = True
if reset_machine:
if self._machine is not None:
self._machine.stop_capture()
self._machine = None
machine_class = registry.get_plugin('machine', machine_params.type).obj
log.info('setting machine: %s', machine_params.type)
self._machine = machine_class(machine_params.options)
self._machine.set_suppression(self._is_running)
self._machine.add_state_callback(self._machine_state_callback)
self._machine.add_stroke_callback(self._machine_stroke_callback)
self._machine_params = machine_params
update_keymap = True
start_machine = True
elif self._machine is not None:
update_keymap = 'system_keymap' in config_update
if update_keymap:
machine_keymap = config['system_keymap']
if machine_keymap is not None:
self._machine.set_keymap(machine_keymap)
if start_machine:
self._machine.start_capture()
# Update running extensions.
enabled_extensions = config['enabled_extensions']
running_extensions = set(self._running_extensions)
self._stop_extensions(running_extensions - enabled_extensions)
self._start_extensions(enabled_extensions - running_extensions)
# Trigger `config_changed` hook.
if config_update:
self._trigger_hook('config_changed', config_update)
# Update dictionaries.
config_dictionaries = OrderedDict(
(d.path, d)
for d in config['dictionaries']
)
copy_default_dictionaries(config_dictionaries.keys())
# Start by unloading outdated dictionaries.
self._dictionaries_manager.unload_outdated()
self._set_dictionaries([
d for d in self._dictionaries.dicts
if d.path in config_dictionaries and \
d.path in self._dictionaries_manager
])
# And then (re)load all dictionaries.
dictionaries = []
for result in self._dictionaries_manager.load(config_dictionaries.keys()):
if isinstance(result, DictionaryLoaderException):
d = ErroredDictionary(result.path, result.exception)
# Only show an error if it's new.
if d != self._dictionaries.get(result.path):
log.error('loading dictionary `%s` failed: %s',
shorten_path(result.path), str(result.exception))
else:
d = result
d.enabled = config_dictionaries[d.path].enabled
dictionaries.append(d)
self._set_dictionaries(dictionaries)
def _start_extensions(self, extension_list):
for extension_name in extension_list:
log.info('starting `%s` extension', extension_name)
try:
extension = registry.get_plugin('extension', extension_name).obj(self)
except KeyError:
# Plugin not installed, skip.
continue
try:
extension.start()
except Exception:
log.error('initializing extension `%s` failed', extension_name, exc_info=True)
else:
self._running_extensions[extension_name] = extension
def _stop_extensions(self, extension_list):
for extension_name in list(extension_list):
log.info('stopping `%s` extension', extension_name)
extension = self._running_extensions.pop(extension_name)
extension.stop()
del extension
def _quit(self, code):
self._stop()
self.code = code
self._trigger_hook('quit')
return True
def _toggle_output(self):
self._set_output(not self._is_running)
def _set_output(self, enabled):
if enabled == self._is_running:
return
self._is_running = enabled
if enabled:
self._translator.set_state(self._running_state)
else:
self._translator.clear_state()
if self._machine is not None:
self._machine.set_suppression(enabled)
self._trigger_hook('output_changed', enabled)
def _machine_state_callback(self, machine_state):
self._same_thread_hook(self._on_machine_state_changed, machine_state)
def _machine_stroke_callback(self, steno_keys):
self._same_thread_hook(self._on_stroked, steno_keys)
@with_lock
def _on_machine_state_changed(self, machine_state):
assert machine_state is not None
self._machine_state = machine_state
self._trigger_hook('machine_state_changed', self._machine_params.type, machine_state)
def _consume_engine_command(self, command, force=False):
# The first commands can be used whether plover has output enabled or not.
command_name, *command_args = command.split(':', 1)
command_name = command_name.lower()
if command_name == 'resume':
self._set_output(True)
return True
elif command_name == 'toggle':
self._toggle_output()
return True
elif command_name == 'quit':
self.quit()
return True
if not force and not self._is_running:
return False
# These commands can only be run when plover has output enabled.
if command_name == 'suspend':
self._set_output(False)
elif command_name == 'configure':
self._trigger_hook('configure')
elif command_name == 'focus':
self._trigger_hook('focus')
elif command_name == 'add_translation':
self._trigger_hook('add_translation')
elif command_name == 'lookup':
self._trigger_hook('lookup')
elif command_name == 'suggestions':
self._trigger_hook('suggestions')
else:
command_fn = registry.get_plugin('command', command_name).obj
command_fn(self, command_args[0] if command_args else '')
return False
def _execute_engine_command(self, command, force=False):
self._consume_engine_command(command, force=force)
return False
def _on_stroked(self, steno_keys):
stroke = Stroke(steno_keys)
log.stroke(stroke)
self._translator.translate(stroke)
self._trigger_hook('stroked', stroke)
def _on_translated(self, old, new):
if not self._is_running:
return
self._trigger_hook('translated', old, new)
def _send_backspaces(self, b):
if not self._is_running:
return
self._keyboard_emulation.send_backspaces(b)
self._trigger_hook('send_backspaces', b)
def _send_string(self, s):
if not self._is_running:
return
self._keyboard_emulation.send_string(s)
self._trigger_hook('send_string', s)
def _send_key_combination(self, c):
if not self._is_running:
return
self._keyboard_emulation.send_key_combination(c)
self._trigger_hook('send_key_combination', c)
def _send_engine_command(self, command):
suppress = not self._is_running
suppress &= self._consume_engine_command(command)
if suppress:
self._machine.suppress_last_stroke(self._keyboard_emulation.send_backspaces)
def toggle_output(self):
self._same_thread_hook(self._toggle_output)
def set_output(self, enabled):
self._same_thread_hook(self._set_output, enabled)
@property
@with_lock
def machine_state(self):
return self._machine_state
@property
@with_lock
def output(self):
return self._is_running
@output.setter
def output(self, enabled):
self._same_thread_hook(self._set_output, enabled)
@property
@with_lock
def config(self):
return self._config.as_dict()
@config.setter
def config(self, update):
self._same_thread_hook(self._update, config_update=update)
@with_lock
def __getitem__(self, setting):
return self._config[setting]
def __setitem__(self, setting, value):
self.config = {setting: value}
def reset_machine(self):
self._same_thread_hook(self._update, reset_machine=True)
def load_config(self):
try:
self._config.load()
except Exception:
log.error('loading configuration failed, resetting to default', exc_info=True)
self._config.clear()
return False
return True
def start(self):
self._same_thread_hook(self._start)
def quit(self, code=0):
# We need to go through the queue, even when already called
# from the engine thread so _quit's return code does break
# the thread out of its main loop.
self._queue.put((self._quit, (code,), {}))
def restart(self):
self.quit(-1)
def join(self):
return self.code
@with_lock
def lookup(self, translation):
return self._dictionaries.lookup(translation)
@with_lock
def raw_lookup(self, translation):
return self._dictionaries.raw_lookup(translation)
@with_lock
def lookup_from_all(self, translation):
return self._dictionaries.lookup_from_all(translation)
@with_lock
def raw_lookup_from_all(self, translation):
return self._dictionaries.raw_lookup_from_all(translation)
@with_lock
def reverse_lookup(self, translation):
matches = self._dictionaries.reverse_lookup(translation)
return [] if matches is None else matches
@with_lock
def casereverse_lookup(self, translation):
matches = self._dictionaries.casereverse_lookup(translation)
return set() if matches is None else matches
@with_lock
def add_dictionary_filter(self, dictionary_filter):
self._dictionaries.add_filter(dictionary_filter)
@with_lock
def remove_dictionary_filter(self, dictionary_filter):
self._dictionaries.remove_filter(dictionary_filter)
@with_lock
def get_suggestions(self, translation):
return Suggestions(self._dictionaries).find(translation)
@property
@with_lock
def translator_state(self):
return self._translator.get_state()
@translator_state.setter
@with_lock
def translator_state(self, state):
self._translator.set_state(state)
@with_lock
def clear_translator_state(self, undo=False):
if undo:
state = self._translator.get_state()
if state.translations:
self._formatter.format(state.translations, (), None)
self._translator.clear_state()
@property
@with_lock
def starting_stroke_state(self):
return StartingStrokeState(self._formatter.start_attached,
self._formatter.start_capitalized,
self._formatter.space_char)
@starting_stroke_state.setter
@with_lock
def starting_stroke_state(self, state):
self._formatter.start_attached = state.attach
self._formatter.start_capitalized = state.capitalize
self._formatter.space_char = state.space_char
@with_lock
def add_translation(self, strokes, translation, dictionary_path=None):
if dictionary_path is None:
dictionary_path = self._dictionaries.first_writable().path
self._dictionaries.set(strokes, translation, path=dictionary_path)
self._dictionaries.save(path_list=(dictionary_path,))
@property
@with_lock
def dictionaries(self):
return self._dictionaries
# Hooks.
def _trigger_hook(self, hook, *args, **kwargs):
for callback in self._hooks[hook]:
try:
callback(*args, **kwargs)
except Exception:
log.error('hook %r callback %r failed',
hook, callback,
exc_info=True)
@with_lock
def hook_connect(self, hook, callback):
self._hooks[hook].append(callback)
@with_lock
def hook_disconnect(self, hook, callback):
self._hooks[hook].remove(callback)
| 20,368
|
Python
|
.py
| 496
| 31.441532
| 94
| 0.624779
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,676
|
log.py
|
openstenoproject_plover/plover/log.py
|
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
"""A module to handle logging."""
import os
import sys
import logging
import traceback
from logging.handlers import RotatingFileHandler
from logging import DEBUG, INFO, WARNING, ERROR
from plover.oslayer.config import CONFIG_DIR, PLATFORM
LOG_FORMAT = '%(asctime)s [%(threadName)s] %(levelname)s: %(message)s'
LOG_FILENAME = os.path.realpath(os.path.join(CONFIG_DIR, 'plover.log'))
LOG_MAX_BYTES = 10000000
LOG_COUNT = 9
STROKE_LOG_FORMAT = '%(asctime)s %(message)s'
class NoExceptionTracebackFormatter(logging.Formatter):
"""Custom formatter for formatting exceptions without traceback."""
def format(self, record):
# Calls to formatException are cached.
# (see http://bugs.python.org/issue1295)
orig_exc_text = record.exc_text
record.exc_text = None
try:
return super().format(record)
finally:
record.exc_text = orig_exc_text
def formatException(self, exc_info):
etype, evalue, tb = exc_info
lines = traceback.format_exception_only(etype, evalue)
return ''.join(lines)
class FileHandler(RotatingFileHandler):
def __init__(self, filename=LOG_FILENAME, format=LOG_FORMAT):
super().__init__(filename,
maxBytes=LOG_MAX_BYTES,
backupCount=LOG_COUNT,
encoding='utf-8')
self.setFormatter(logging.Formatter(format))
class PrintHandler(logging.StreamHandler):
""" Handler using L{print_} to output messages. """
def __init__(self, format=LOG_FORMAT):
super().__init__(sys.stderr)
self.setFormatter(logging.Formatter(format))
class Logger:
def __init__(self):
self._print_handler = PrintHandler()
self._print_handler.setLevel(WARNING)
self._file_handler = None
self._platform_handler = None
self._logger = logging.getLogger('plover')
self._logger.addHandler(self._print_handler)
self._logger.setLevel(INFO)
self._stroke_filename = None
self._stroke_logger = logging.getLogger('plover-strokes')
self._stroke_logger.setLevel(INFO)
self._stroke_handler = None
self._log_strokes = False
self._log_translations = False
def has_platform_handler(self):
return self._platform_handler is not None
def setup_platform_handler(self):
if self.has_platform_handler():
return
NotificationHandler = None
try:
from plover.oslayer.log import NotificationHandler
except Exception:
self.info('could not import platform gui log', exc_info=True)
return
try:
handler = NotificationHandler()
self.addHandler(handler)
except Exception:
self.info('could not initialize platform gui log', exc_info=True)
return
self._platform_handler = handler
def set_level(self, level):
self._print_handler.setLevel(level)
if self._file_handler is not None:
self._file_handler.setLevel(level)
self.setLevel(level)
def setup_logfile(self):
assert self._file_handler is None
self._file_handler = FileHandler()
self._file_handler.setLevel(self.level)
self._logger.addHandler(self._file_handler)
def _setup_stroke_logging(self):
is_logging = self._stroke_handler is not None
must_log = ((self._log_strokes or self._log_translations)
and self._stroke_filename is not None)
if must_log:
if is_logging:
start_logging = stop_logging = self._stroke_filename != self._stroke_handler.baseFilename
else:
stop_logging = False
start_logging = True
else:
stop_logging = is_logging
start_logging = False
if stop_logging:
self._stroke_logger.removeHandler(self._stroke_handler)
self._stroke_handler.close()
self._stroke_handler = None
if start_logging:
self._stroke_handler = FileHandler(filename=self._stroke_filename,
format=STROKE_LOG_FORMAT)
self._stroke_logger.addHandler(self._stroke_handler)
def set_stroke_filename(self, filename=None):
if filename is not None:
filename = os.path.realpath(filename)
if self._stroke_filename == filename:
return
assert filename != LOG_FILENAME
self.info('set_stroke_filename(%s)', filename)
self._stroke_filename = filename
self._setup_stroke_logging()
def enable_stroke_logging(self, enable):
if self._log_strokes == enable:
return
self.info('enable_stroke_logging(%s)', enable)
self._log_strokes = enable
self._setup_stroke_logging()
def enable_translation_logging(self, enable):
if self._log_translations == enable:
return
self.info('enable_translation_logging(%s)', enable)
self._log_translations = enable
self._setup_stroke_logging()
def log_stroke(self, stroke):
if not self._log_strokes or self._stroke_handler is None:
return
self._stroke_logger.info('%s', stroke)
def log_translation(self, undo, do, prev):
if not self._log_translations or self._stroke_handler is None:
return
# TODO: Figure out what to actually log here.
for u in undo:
self._stroke_logger.info('*%s', u)
for d in do:
self._stroke_logger.info(d)
# Delegate calls to _logger.
def __getattr__(self, name):
return getattr(self._logger, name)
# Set up default logger.
__logger = Logger()
# The following functions direct all input to __logger.
debug = __logger.debug
info = __logger.info
warning = __logger.warning
error = __logger.error
set_level = __logger.set_level
add_handler = __logger.addHandler
remove_handler = __logger.removeHandler
has_platform_handler = __logger.has_platform_handler
setup_platform_handler = __logger.setup_platform_handler
# Strokes/translation logging.
set_stroke_filename = __logger.set_stroke_filename
stroke = __logger.log_stroke
translation = __logger.log_translation
enable_stroke_logging = __logger.enable_stroke_logging
enable_translation_logging = __logger.enable_translation_logging
# Logfile support.
setup_logfile = __logger.setup_logfile
| 6,574
|
Python
|
.py
| 162
| 32.290123
| 105
| 0.646856
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,677
|
resource.py
|
openstenoproject_plover/plover/resource.py
|
from contextlib import contextmanager
from importlib.util import find_spec
from tempfile import NamedTemporaryFile
import os
import shutil
ASSET_SCHEME = 'asset:'
def _asset_filename(resource_name):
components = resource_name[len(ASSET_SCHEME):].split(':', 1)
if len(components) != 2:
raise ValueError(f'invalid asset: {resource_name}')
if os.path.isabs(components[1]):
raise ValueError(f'invalid asset: {resource_name}')
package_dir = os.path.dirname(find_spec(components[0]).origin)
return os.path.join(package_dir, components[1])
def resource_exists(resource_name):
if resource_name.startswith(ASSET_SCHEME):
resource_name = _asset_filename(resource_name)
return os.path.exists(resource_name)
def resource_filename(resource_name):
if resource_name.startswith(ASSET_SCHEME):
resource_name = _asset_filename(resource_name)
return resource_name
def resource_timestamp(resource_name):
filename = resource_filename(resource_name)
return os.path.getmtime(filename)
@contextmanager
def resource_update(resource_name):
if resource_name.startswith(ASSET_SCHEME):
raise ValueError(f'updating an asset is unsupported: {resource_name}')
filename = resource_filename(resource_name)
directory = os.path.dirname(filename)
extension = os.path.splitext(filename)[1]
tempfile = NamedTemporaryFile(delete=False, dir=directory,
suffix=extension or None)
try:
tempfile.close()
yield tempfile.name
shutil.move(tempfile.name, filename)
finally:
if os.path.exists(tempfile.name):
os.unlink(tempfile.name)
| 1,682
|
Python
|
.py
| 41
| 35.195122
| 78
| 0.721372
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,678
|
steno_dictionary.py
|
openstenoproject_plover/plover/steno_dictionary.py
|
# Copyright (c) 2013 Hesky Fisher.
# See LICENSE.txt for details.
"""StenoDictionary class and related functions.
A steno dictionary maps sequences of steno strokes to translations.
"""
import collections
import os
from plover.resource import ASSET_SCHEME, resource_filename, resource_timestamp, resource_update
class StenoDictionary:
"""A steno dictionary.
This dictionary maps immutable sequences to translations and tracks the
length of the longest key.
Attributes:
longest_key -- A read only property holding the length of the longest key.
timestamp -- File last modification time, used to detect external changes.
"""
# False if class support creation.
readonly = False
def __init__(self):
self._dict = {}
self._longest_key = 0
self.reverse = collections.defaultdict(list)
# Case-insensitive reverse dict
self.casereverse = collections.defaultdict(list)
self.filters = []
self.timestamp = 0
self.readonly = False
self.enabled = True
self.path = None
def __str__(self):
return '%s(%r)' % (self.__class__.__name__, self.path)
def __repr__(self):
return str(self)
@classmethod
def create(cls, resource):
assert not resource.startswith(ASSET_SCHEME)
if cls.readonly:
raise ValueError('%s does not support creation' % cls.__name__)
d = cls()
d.path = resource
return d
@classmethod
def load(cls, resource):
filename = resource_filename(resource)
timestamp = resource_timestamp(filename)
d = cls()
d._load(filename)
if (cls.readonly or
resource.startswith(ASSET_SCHEME) or
not os.access(filename, os.W_OK)):
d.readonly = True
d.path = resource
d.timestamp = timestamp
return d
def save(self):
assert not self.readonly
with resource_update(self.path) as temp_path:
self._save(temp_path)
self.timestamp = resource_timestamp(self.path)
def _load(self, filename):
raise NotImplementedError()
def _save(self, filename):
raise NotImplementedError()
@property
def longest_key(self):
"""The length of the longest key in the dict."""
return self._longest_key
def __len__(self):
return self._dict.__len__()
def __iter__(self):
return self._dict.__iter__()
def __getitem__(self, key):
return self._dict.__getitem__(key)
def clear(self):
assert not self.readonly
self._dict.clear()
self.reverse.clear()
self.casereverse.clear()
self._longest_key = 0
def items(self):
return self._dict.items()
def update(self, *args, **kwargs):
assert not self.readonly
iterable_list = [
a.items() if isinstance(a, (dict, StenoDictionary))
else a for a in args
]
if kwargs:
iterable_list.append(kwargs.items())
if not self._dict:
reverse = self.reverse
casereverse = self.casereverse
longest_key = self._longest_key
assert not (reverse or casereverse or longest_key)
self._dict = dict(*iterable_list)
for key, value in self._dict.items():
reverse[value].append(key)
casereverse[value.lower()].append(value)
key_len = len(key)
if key_len > longest_key:
longest_key = key_len
self._longest_key = longest_key
else:
for iterable in iterable_list:
for key, value in iterable:
self[key] = value
def __setitem__(self, key, value):
assert not self.readonly
if key in self:
del self[key]
self._longest_key = max(self._longest_key, len(key))
self._dict[key] = value
self.reverse[value].append(key)
self.casereverse[value.lower()].append(value)
def get(self, key, fallback=None):
return self._dict.get(key, fallback)
def __delitem__(self, key):
assert not self.readonly
value = self._dict.pop(key)
self.reverse[value].remove(key)
self.casereverse[value.lower()].remove(value)
if len(key) == self.longest_key:
if self._dict:
self._longest_key = max(len(x) for x in self._dict)
else:
self._longest_key = 0
def __contains__(self, key):
return self.get(key) is not None
def reverse_lookup(self, value):
return set(self.reverse.get(value, ()))
def casereverse_lookup(self, value):
return set(self.casereverse.get(value, ()))
class StenoDictionaryCollection:
def __init__(self, dicts=[]):
self.dicts = []
self.filters = []
self.set_dicts(dicts)
@property
def longest_key(self):
return max((d.longest_key for d in self.dicts if d.enabled), default=0)
def set_dicts(self, dicts):
self.dicts = dicts[:]
def _lookup(self, key, dicts=None, filters=()):
if dicts is None:
dicts = self.dicts
key_len = len(key)
if key_len > self.longest_key:
return None
for d in dicts:
if not d.enabled:
continue
if key_len > d.longest_key:
continue
value = d.get(key)
if value:
if not any(f(key, value) for f in filters):
return value
def _lookup_from_all(self, key, dicts=None, filters=()):
''' Key lookup from all dictionaries
Returns list of (value, dictionary) tuples
'''
if dicts is None:
dicts = self.dicts
key_len = len(key)
if key_len > self.longest_key:
return None
values = []
for d in dicts:
if not d.enabled:
continue
if key_len > d.longest_key:
continue
value = d.get(key)
if value:
if not any(f(key, value) for f in filters):
values.append((value, d))
return values
def __str__(self):
return 'StenoDictionaryCollection' + repr(tuple(self.dicts))
def __repr__(self):
return str(self)
def lookup(self, key):
return self._lookup(key, filters=self.filters)
def raw_lookup(self, key):
return self._lookup(key)
def lookup_from_all(self, key):
return self._lookup_from_all(key, filters=self.filters)
def raw_lookup_from_all(self, key):
return self._lookup_from_all(key)
def reverse_lookup(self, value):
keys = set()
for n, d in enumerate(self.dicts):
if not d.enabled:
continue
# Ignore key if it's overridden by a higher priority dictionary.
keys.update(k for k in d.reverse_lookup(value)
if self._lookup(k, dicts=self.dicts[:n]) is None)
return keys
def casereverse_lookup(self, value):
keys = set()
for d in self.dicts:
if not d.enabled:
continue
keys.update(d.casereverse_lookup(value))
return keys
def first_writable(self):
'''Return the first writable dictionary.'''
for d in self.dicts:
if not d.readonly:
return d
raise KeyError('no writable dictionary')
def set(self, key, value, path=None):
if path is None:
d = self.first_writable()
else:
d = self[path]
d[key] = value
def save(self, path_list=None):
'''Save the dictionaries in <path_list>.
If <path_list> is None, all writable dictionaries are saved'''
if path_list is None:
dict_list = [d for d in self if not d.readonly]
else:
dict_list = [self[path] for path in path_list]
for d in dict_list:
assert not d.readonly
d.save()
def get(self, path):
for d in self.dicts:
if d.path == path:
return d
def __getitem__(self, path):
d = self.get(path)
if d is None:
raise KeyError(repr(path))
return d
def __iter__(self):
for d in self.dicts:
yield d.path
def add_filter(self, f):
self.filters.append(f)
def remove_filter(self, f):
self.filters.remove(f)
| 8,634
|
Python
|
.py
| 243
| 25.950617
| 96
| 0.575114
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,679
|
orthography.py
|
openstenoproject_plover/plover/orthography.py
|
# Copyright (c) 2010-2011 Joshua Harlan Lifton.
# See LICENSE.txt for details.
"""Functions that implement some English orthographic rules."""
from plover import system
def make_candidates_from_rules(word, suffix, check=lambda x: True):
candidates = []
for r in system.ORTHOGRAPHY_RULES:
m = r[0].match(word + " ^ " + suffix)
if m:
expanded = m.expand(r[1])
if check(expanded):
candidates.append(expanded)
return candidates
def _add_suffix(word, suffix):
in_dict_f = lambda x: x in system.ORTHOGRAPHY_WORDS
candidates = []
alias = system.ORTHOGRAPHY_RULES_ALIASES.get(suffix, None)
if alias is not None:
candidates.extend(make_candidates_from_rules(word, alias, in_dict_f))
# Try a simple join if it is in the dictionary.
simple = word + suffix
if in_dict_f(simple):
candidates.append(simple)
# Try rules with dict lookup.
candidates.extend(make_candidates_from_rules(word, suffix, in_dict_f))
# For all candidates sort by prominence in dictionary and, since sort is
# stable, also by the order added to candidates list.
if candidates:
candidates.sort(key=lambda x: system.ORTHOGRAPHY_WORDS[x])
return candidates[0]
# Try rules without dict lookup.
candidates = make_candidates_from_rules(word, suffix)
if candidates:
return candidates[0]
# If all else fails then just do a simple join.
return simple
def add_suffix(word, suffix):
"""Add a suffix to a word by applying the rules above
Arguments:
word -- A word
suffix -- The suffix to add
"""
suffix, sep, rest = suffix.partition(' ')
expanded = _add_suffix(word, suffix)
return expanded + sep + rest
| 1,814
|
Python
|
.py
| 45
| 33.466667
| 77
| 0.677927
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,680
|
registry.py
|
openstenoproject_plover/plover/registry.py
|
from collections import namedtuple
import pkg_resources
from plover.oslayer.config import PLUGINS_PLATFORM
from plover import log
class Plugin:
def __init__(self, plugin_type, name, obj):
self.plugin_type = plugin_type
self.name = name
self.obj = obj
self.__doc__ = obj.__doc__ or ''
def __str__(self):
return f'{self.plugin_type}:{self.name}'
PluginDistribution = namedtuple('PluginDistribution', 'dist plugins')
class Registry:
PLUGIN_TYPES = (
'command',
'dictionary',
'extension',
'gui',
'gui.qt.machine_option',
'gui.qt.tool',
'machine',
'macro',
'meta',
'system',
)
def __init__(self, suppress_errors=True):
self._plugins = {}
self._distributions = {}
self._suppress_errors = suppress_errors
for plugin_type in self.PLUGIN_TYPES:
self._plugins[plugin_type] = {}
def register_plugin(self, plugin_type, name, obj):
plugin = Plugin(plugin_type, name, obj)
self._plugins[plugin_type][name.lower()] = plugin
return plugin
def register_plugin_from_entrypoint(self, plugin_type, entrypoint):
log.info('%s: %s (from %s in %s)', plugin_type, entrypoint.name,
entrypoint.dist, entrypoint.dist.location)
try:
obj = entrypoint.load()
except:
log.error('error loading %s plugin: %s (from %s)', plugin_type,
entrypoint.name, entrypoint.module_name, exc_info=True)
if not self._suppress_errors:
raise
else:
plugin = self.register_plugin(plugin_type, entrypoint.name, obj)
# Keep track of distributions providing plugins.
dist_id = str(entrypoint.dist)
dist = self._distributions.get(dist_id)
if dist is None:
dist = PluginDistribution(entrypoint.dist, set())
self._distributions[dist_id] = dist
dist.plugins.add(plugin)
def get_plugin(self, plugin_type, plugin_name):
return self._plugins[plugin_type][plugin_name.lower()]
def list_plugins(self, plugin_type):
return sorted(self._plugins[plugin_type].values(),
key=lambda p: p.name)
def list_distributions(self):
return [dist for dist_id, dist in sorted(self._distributions.items())]
def update(self):
# Is support for the QT GUI available?
try:
pkg_resources.load_entry_point('plover', 'plover.gui', 'qt')
except (pkg_resources.ResolutionError, ImportError):
has_gui_qt = False
else:
has_gui_qt = True
# Register available plugins.
for plugin_type in self.PLUGIN_TYPES:
if plugin_type.startswith('gui.qt.') and not has_gui_qt:
continue
entrypoint_type = f'plover.{plugin_type}'
for entrypoint in pkg_resources.iter_entry_points(entrypoint_type):
if 'gui_qt' in entrypoint.extras and not has_gui_qt:
continue
self.register_plugin_from_entrypoint(plugin_type, entrypoint)
if PLUGINS_PLATFORM is None:
continue
entrypoint_type = f'plover.{PLUGINS_PLATFORM}.{plugin_type}'
for entrypoint in pkg_resources.iter_entry_points(entrypoint_type):
self.register_plugin_from_entrypoint(plugin_type, entrypoint)
registry = Registry()
| 3,544
|
Python
|
.py
| 85
| 31.505882
| 79
| 0.602792
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,681
|
misc.py
|
openstenoproject_plover/plover/misc.py
|
# Copyright (c) 2016 Open Steno Project
# See LICENSE.txt for details.
import os
from plover.oslayer.config import CONFIG_DIR
from plover.resource import ASSET_SCHEME
def popcount_8(v):
"""Population count for an 8 bit integer"""
assert 0 <= v <= 255
v -= ((v >> 1) & 0x55555555)
v = (v & 0x33333333) + ((v >> 2) & 0x33333333)
return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24
def expand_path(path):
''' Expand path.
- if starting with "~/", it is substituted with the user home directory
- if relative, it is resolved relative to CONFIG_DIR
'''
if path.startswith(ASSET_SCHEME):
return path
path = os.path.expanduser(path)
path = normalize_path(os.path.join(CONFIG_DIR, path))
return path
def shorten_path(path):
''' Shorten path.
- if the path is below CONFIG_DIR, a relative path to it is returned
- if path is below the user home directory, "~/" is substituted to it
Note: relative path are automatically assumed to be relative to CONFIG_DIR.
'''
if path.startswith(ASSET_SCHEME):
return path
path = normalize_path(os.path.join(CONFIG_DIR, path))
config_dir = normalize_path(CONFIG_DIR)
if not config_dir.endswith(os.sep):
config_dir += os.sep
if path.startswith(config_dir):
return path[len(config_dir):]
home_dir = normalize_path(os.path.expanduser('~'))
if not home_dir.endswith(os.sep):
home_dir += os.sep
if path.startswith(home_dir):
return os.path.join('~', path[len(home_dir):])
return path
def normalize_path(path):
''' Normalize path: return canonical path, normalizing case on Windows.
'''
if path.startswith(ASSET_SCHEME):
return path
return os.path.normcase(os.path.realpath(path))
def boolean(value):
if isinstance(value, str):
v = value.lower()
if v in ('1', 'yes', 'true', 'on'):
return True
if v in ('0', 'no', 'false', 'off'):
return False
raise ValueError(value)
return bool(value)
def to_surrogate_pair(char):
pairs = []
for code in char:
code_point = ord(code)
if code_point >= 0x10000:
high_part = (code_point - 0x10000) // 0x400 + 0xD800
low_part = (code_point - 0x10000) % 0x400 + 0xDC00
pairs += (high_part, low_part)
else:
pairs.append(code_point)
return pairs
| 2,455
|
Python
|
.py
| 67
| 30.268657
| 83
| 0.628788
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,682
|
translation.py
|
openstenoproject_plover/plover/translation.py
|
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
"""Stenography translation.
This module handles translating streams of strokes in translations. Two classes
compose this module:
Translation -- A data model class that encapsulates a sequence of Stroke objects
in the context of a particular dictionary. The dictionary in question maps
stroke sequences to strings, which are typically words or phrases, but could
also be meta commands.
Translator -- A state machine that takes in a single Stroke object at a time and
emits one or more Translation objects based on a greedy conversion algorithm.
"""
from collections import namedtuple
import re
from plover.steno import Stroke
from plover.steno_dictionary import StenoDictionaryCollection
from plover.registry import registry
from plover import system
_ESCAPE_RX = re.compile('(\\\\[nrt]|[\n\r\t])')
_ESCAPE_REPLACEMENTS = {
'\n': r'\n',
'\r': r'\r',
'\t': r'\t',
r'\n': r'\\n',
r'\r': r'\\r',
r'\t': r'\\t',
}
def escape_translation(translation):
return _ESCAPE_RX.sub(lambda m: _ESCAPE_REPLACEMENTS[m.group(0)], translation)
_UNESCAPE_RX = re.compile(r'((?<!\\)|\\)\\([nrt])')
_UNESCAPE_REPLACEMENTS = {
r'\\n': r'\n',
r'\\r': r'\r',
r'\\t': r'\t',
r'\n' : '\n',
r'\r' : '\r',
r'\t' : '\t',
}
def unescape_translation(translation):
return _UNESCAPE_RX.sub(lambda m: _UNESCAPE_REPLACEMENTS[m.group(0)], translation)
_LEGACY_MACROS_ALIASES = {
'{*}': 'retro_toggle_asterisk',
'{*!}': 'retro_delete_space',
'{*?}': 'retro_insert_space',
'{*+}': 'repeat_last_stroke',
}
_MACRO_RX = re.compile(r'=\w+(:|$)')
Macro = namedtuple('Macro', 'name stroke cmdline')
def _mapping_to_macro(mapping, stroke):
'''Return a macro/stroke if mapping is one, or None otherwise.'''
macro, cmdline = None, ''
if mapping is None:
if stroke.is_correction:
macro = 'undo'
else:
if mapping in _LEGACY_MACROS_ALIASES:
macro = _LEGACY_MACROS_ALIASES[mapping]
elif _MACRO_RX.match(mapping):
args = mapping[1:].split(':', 1)
macro = args[0]
if len(args) == 2:
cmdline = args[1]
return Macro(macro, stroke, cmdline) if macro else None
class Translation:
"""A data model for the mapping between a sequence of Strokes and a string.
This class represents the mapping between a sequence of Stroke objects and
a text string, typically a word or phrase. This class is used as the output
from translation and the input to formatting. The class contains the
following attributes:
strokes -- A sequence of Stroke objects from which the translation is
derived.
rtfcre -- A tuple of RTFCRE strings representing the stroke list. This is
used as the key in the translation mapping.
english -- The value of the dictionary mapping given the rtfcre
key, or None if no mapping exists.
replaced -- A list of translations that were replaced by this one. If this
translation is undone then it is replaced by these.
formatting -- Information stored on the translation by the formatter for
sticky state (e.g. capitalize next stroke) and to hold undo info.
"""
def __init__(self, outline, translation):
"""Create a translation by looking up strokes in a dictionary.
Arguments:
outline -- A list of Stroke objects.
translation -- A translation for the outline or None.
"""
self.strokes = outline
self.rtfcre = tuple(s.rtfcre for s in outline)
self.english = translation
self.replaced = []
self.formatting = []
self.is_retrospective_command = False
def __eq__(self, other):
return self.rtfcre == other.rtfcre and self.english == other.english
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
if self.english is None:
translation = 'None'
else:
translation = escape_translation(self.english)
translation = '"%s"' % translation.replace('"', r'\"')
return 'Translation(%s : %s)' % (self.rtfcre, translation)
def __repr__(self):
return str(self)
def __len__(self):
if self.strokes is not None:
return len(self.strokes)
return 0
def has_undo(self):
# If there is no formatting then we're not dealing with a formatter
# so all translations can be undone.
# TODO: combos are not undoable but in some contexts they appear
# as text. Should we provide a way to undo those? or is backspace
# enough?
if not self.formatting:
return True
if self.replaced:
return True
for a in self.formatting:
if a.text or a.prev_replace:
return True
return False
class Translator:
"""Converts a stenotype key stream to a translation stream.
An instance of this class serves as a state machine for processing key
presses as they come off a stenotype machine. Key presses arrive in batches,
each batch representing a single stenotype chord. The Translator class
receives each chord as a Stroke and adds the Stroke to an internal,
length-limited FIFO, which is then translated into a sequence of Translation
objects. The resulting sequence of Translations is compared to those
previously emitted by the state machine and a sequence of new Translations
(some corrections and some new) is emitted.
The internal Stroke FIFO is translated in a greedy fashion; the Translator
finds a translation for the longest sequence of Strokes that starts with the
oldest Stroke in the FIFO before moving on to newer Strokes that haven't yet
been translated. In practical terms, this means that corrections are needed
for cases in which a Translation comprises two or more Strokes, at least the
first of which is a valid Translation in and of itself.
For example, consider the case in which the first Stroke can be translated
as 'cat'. In this case, a Translation object representing 'cat' will be
emitted as soon as the Stroke is processed by the Translator. If the next
Stroke is such that combined with the first they form 'catalogue', then the
Translator will first issue a correction for the initial 'cat' Translation
and then issue a new Translation for 'catalogue'.
A Translator takes input via the translate method and provides translation
output to every function that has registered via the add_callback method.
"""
def __init__(self):
self._undo_length = 0
self._dictionary = None
self.set_dictionary(StenoDictionaryCollection())
self._listeners = set()
self._state = _State()
self._to_undo = []
self._to_do = 0
def translate(self, stroke):
"""Process a single stroke."""
self.translate_stroke(stroke)
self.flush()
def set_dictionary(self, d):
"""Set the dictionary."""
self._dictionary = d
def get_dictionary(self):
return self._dictionary
def add_listener(self, callback):
"""Add a listener for translation outputs.
Arguments:
callback -- A function that takes: a list of translations to undo, a
list of new translations to render, and a translation that is the
context for the new translations.
"""
self._listeners.add(callback)
def remove_listener(self, callback):
"""Remove a listener added by add_listener."""
self._listeners.remove(callback)
def set_min_undo_length(self, n):
"""Set the minimum number of strokes that can be undone.
The actual number may be larger depending on the translations in the
dictionary.
"""
self._undo_length = n
self._resize_translations()
def flush(self, extra_translations=None):
'''Process translations scheduled for undoing/doing.
Arguments:
extra_translations -- Extra translations to add to the list
of translation to do. Note: those will
not be saved to the state history.
'''
if self._to_do:
prev = self._state.prev(self._to_do)
do = self._state.translations[-self._to_do:]
else:
prev = self._state.prev()
do = []
if extra_translations is not None:
do.extend(extra_translations)
undo = self._to_undo
self._to_undo = []
self._to_do = 0
if undo or do:
self._output(undo, do, prev)
self._resize_translations()
def _output(self, undo, do, prev):
for callback in self._listeners:
callback(undo, do, prev)
def _resize_translations(self):
self._state.restrict_size(max(self._dictionary.longest_key,
self._undo_length))
def get_state(self):
"""Get the state of the translator."""
return self._state
def set_state(self, state):
"""Set the state of the translator."""
self._state = state
def clear_state(self):
"""Reset the state of the translator."""
self._state = _State()
def translate_stroke(self, stroke):
"""Process a stroke.
See the class documentation for details of how Stroke objects
are converted to Translation objects.
Arguments:
stroke -- The Stroke object to process.
"""
max_len = self._dictionary.longest_key
mapping = self._lookup_with_prefix(max_len, self._state.translations, [stroke])
macro = _mapping_to_macro(mapping, stroke)
if macro is not None:
self.translate_macro(macro)
return
t = (
# No prefix lookups (note we avoid looking up [stroke] again).
self._find_longest_match(2, max_len, stroke) or
# Return [stroke] result if mapped.
(mapping is not None and Translation([stroke], mapping)) or
# No direct match, try with suffixes.
self._find_longest_match(1, max_len, stroke, system.SUFFIX_KEYS) or
# Fallback to untranslate.
Translation([stroke], None)
)
self.translate_translation(t)
def translate_macro(self, macro):
macro_fn = registry.get_plugin('macro', macro.name).obj
macro_fn(self, macro.stroke, macro.cmdline)
def translate_translation(self, t):
self._undo(*t.replaced)
self._do(t)
def untranslate_translation(self, t):
self._undo(t)
self._do(*t.replaced)
def _undo(self, *translations):
for t in reversed(translations):
assert t == self._state.translations.pop()
if self._to_do:
self._to_do -= 1
else:
self._to_undo.insert(0, t)
def _do(self, *translations):
self._state.translations.extend(translations)
self._to_do += len(translations)
def _find_longest_match(self, min_len, max_len, stroke, suffixes=()):
'''Find mapping with the longest series of strokes.
min_len -- Minimum number of strokes involved.
max_len -- Maximum number of strokes involved.
stroke -- The latest stroke.
suffixes -- List of suffix keys to try.
Return the corresponding translation, or None if no match is found.
Note: the code either look for a direct match (empty suffix
list), or assume the last stroke contains an implicit suffix
and look for a corresponding match, but not both.
'''
if suffixes:
# Implicit suffix lookup, determine possible suffixes.
suffixes = self._lookup_involved_suffixes(stroke, suffixes)
if not suffixes:
# No suffix involved, abort.
return None
# Figure out how much of the translation buffer can be involved in this
# stroke and build the stroke list for translation.
num_strokes = 1
translation_count = 0
for t in reversed(self._state.translations):
num_strokes += len(t)
if num_strokes > max_len:
break
translation_count += 1
translation_index = len(self._state.translations) - translation_count
translations = self._state.translations[translation_index:]
# The new stroke can either create a new translation or replace
# existing translations by matching a longer entry in the
# dictionary.
for i in range(len(translations)+1):
replaced = translations[i:]
strokes = [s for t in replaced for s in t.strokes]
strokes.append(stroke)
if len(strokes) < min_len:
continue
mapping = self._lookup_with_prefix(max_len, translations[:i], strokes, suffixes)
if mapping is not None:
t = Translation(strokes, mapping)
t.replaced = replaced
return t
return None
def _lookup_strokes(self, strokes):
'''Look for a matching translation.
strokes -- a list of Stroke instances.
Return the resulting mapping.
'''
return self._dictionary.lookup(tuple(s.rtfcre for s in strokes))
def _lookup_with_suffix(self, strokes, suffixes=()):
'''Look for a matching translation.
suffixes -- A list of (suffix stroke, suffix mapping) pairs to try.
If the suffix list is empty, look for a direct match.
Otherwise, assume the last stroke contains an implicit suffix,
and look for a corresponding match.
'''
if not suffixes:
# No suffix, do a regular lookup.
return self._lookup_strokes(strokes)
for suffix_stroke, suffix_mapping in suffixes:
assert suffix_stroke in strokes[-1]
main_mapping = self._lookup_strokes(strokes[:-1] + [strokes[-1] - suffix_stroke])
if main_mapping is not None:
return main_mapping + ' ' + suffix_mapping
return None
def _lookup_involved_suffixes(self, stroke, suffixes):
'''Find possible implicit suffixes for a stroke.
stroke -- The stroke to check for implicit suffixes.
suffixes -- List of supported suffix keys.
Return a list of (suffix_stroke, suffix_mapping) pairs.
'''
possible_suffixes = []
for suffix_stroke in map(Stroke, suffixes):
if suffix_stroke not in stroke:
continue
suffix_mapping = self._lookup_strokes((suffix_stroke,))
if suffix_mapping is None:
continue
possible_suffixes.append((suffix_stroke, suffix_mapping))
return possible_suffixes
def lookup(self, strokes, suffixes=()):
result = self._lookup_strokes(strokes)
if result is not None:
return result
suffixes = self._lookup_involved_suffixes(strokes[-1], suffixes)
if not suffixes:
return None
return self._lookup_with_suffix(strokes, suffixes)
@staticmethod
def _previous_word_is_finished(last_translations):
if not last_translations:
return True
formatting = last_translations[-1].formatting
if not formatting:
return True
return formatting[-1].word_is_finished
def _lookup_with_prefix(self, max_len, last_translations, strokes, suffixes=()):
if len(strokes) < max_len and self._previous_word_is_finished(last_translations):
mapping = self._lookup_with_suffix([Stroke.PREFIX_STROKE] + strokes, suffixes)
if mapping is not None:
return mapping
if len(strokes) <= max_len:
return self._lookup_with_suffix(strokes, suffixes)
return None
class _State:
"""An object representing the current state of the translator state machine.
Attributes:
translations -- A list of all previous translations that are still undoable.
tail -- The oldest translation still saved but is no longer undoable.
"""
def __init__(self):
self.translations = []
self.tail = None
def prev(self, count=None):
"""Get the most recent translations."""
if count is not None:
prev = self.translations[:-count]
else:
prev = self.translations
if prev:
return prev
if self.tail is not None:
return [self.tail]
return None
def restrict_size(self, n):
"""Reduce the history of translations to n."""
stroke_count = 0
translation_count = 0
for t in reversed(self.translations):
stroke_count += len(t)
translation_count += 1
if stroke_count >= n:
break
translation_index = len(self.translations) - translation_count
if translation_index:
self.tail = self.translations[translation_index - 1]
del self.translations[:translation_index]
| 17,317
|
Python
|
.py
| 399
| 34.694236
| 93
| 0.636266
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,683
|
loading_manager.py
|
openstenoproject_plover/plover/dictionary/loading_manager.py
|
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
"""Centralized place for dictionary loading operation."""
import threading
import time
from plover.dictionary.base import load_dictionary
from plover.exception import DictionaryLoaderException
from plover.resource import resource_timestamp
from plover import log
class DictionaryLoadingManager:
def __init__(self):
self.dictionaries = {}
def __len__(self):
return len(self.dictionaries)
def __getitem__(self, filename):
return self.dictionaries[filename].get()
def __contains__(self, filename):
return filename in self.dictionaries
def start_loading(self, filename):
op = self.dictionaries.get(filename)
if op is not None and not op.needs_reloading():
return op
log.info('%s dictionary: %s', 'loading' if op is None else 'reloading', filename)
op = DictionaryLoadingOperation(filename)
self.dictionaries[filename] = op
return op
def unload_outdated(self):
for filename, op in list(self.dictionaries.items()):
if op.needs_reloading():
del self.dictionaries[filename]
def load(self, filenames):
start_time = time.time()
self.dictionaries = {f: self.start_loading(f) for f in filenames}
results = [
self.dictionaries[f].get()
for f in filenames
]
log.info('loaded %u dictionaries in %.3fs',
len(results), time.time() - start_time)
return results
class DictionaryLoadingOperation:
def __init__(self, filename):
self.loading_thread = threading.Thread(target=self.load)
self.filename = filename
self.result = None
self.loading_thread.start()
def needs_reloading(self):
try:
new_timestamp = resource_timestamp(self.filename)
except:
# Bad resource name, permission denied, path
# does not exist, ...
new_timestamp = None
# If no change in timestamp: don't reload.
if new_timestamp == self.result.timestamp:
return False
# If we could not get the new timestamp:
# the dictionary is not available anymore,
# attempt to reload to notify the user.
if new_timestamp is None:
return True
# If we're here, and no previous timestamp exists,
# then the file was previously inaccessible, reload.
if self.result.timestamp is None:
return True
# Otherwise, just compare timestamps.
return self.result.timestamp < new_timestamp
def load(self):
timestamp = None
try:
timestamp = resource_timestamp(self.filename)
self.result = load_dictionary(self.filename)
except Exception as e:
log.debug('loading dictionary %s failed', self.filename, exc_info=True)
self.result = DictionaryLoaderException(self.filename, e)
self.result.timestamp = timestamp
def get(self):
self.loading_thread.join()
return self.result
| 3,149
|
Python
|
.py
| 79
| 31.278481
| 89
| 0.645478
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,684
|
json_dict.py
|
openstenoproject_plover/plover/dictionary/json_dict.py
|
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
"""Parsing a json formatted dictionary.
"""
try:
import simplejson as json
except ImportError:
import json
from plover.dictionary.helpers import StenoNormalizer
from plover.steno_dictionary import StenoDictionary
from plover.steno import steno_to_sort_key
class JsonDictionary(StenoDictionary):
def _load(self, filename):
with open(filename, 'rb') as fp:
contents = fp.read()
for encoding in ('utf-8', 'latin-1'):
try:
contents = contents.decode(encoding)
except UnicodeDecodeError:
continue
else:
break
else:
raise ValueError('\'%s\' encoding could not be determined' % (filename,))
d = dict(json.loads(contents))
with StenoNormalizer(filename) as normalize_steno:
self.update((normalize_steno(x[0]), x[1]) for x in d.items())
def _save(self, filename):
mappings = [('/'.join(k), v) for k, v in self.items()]
mappings.sort(key=lambda i: steno_to_sort_key(i[0], strict=False))
with open(filename, 'w', encoding='utf-8', newline='\n') as fp:
json.dump(dict(mappings), fp, ensure_ascii=False,
indent=0, separators=(',', ': '))
fp.write('\n')
| 1,359
|
Python
|
.py
| 34
| 31.5
| 85
| 0.614275
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,685
|
rtfcre_parse.py
|
openstenoproject_plover/plover/dictionary/rtfcre_parse.py
|
from collections import deque
import sys
import re
from rtf_tokenize import RtfTokenizer
from plover import log
class RtfParseError(Exception):
def __init__(self, lnum, cnum, fmt, *fmt_args):
msg = 'line %u, column %u: %s' % (lnum + 1, cnum + 1, fmt % fmt_args)
super().__init__(msg)
class BadRtfError(Exception):
def __init__(self, fmt, *fmt_args):
msg = fmt % fmt_args
super().__init__(msg)
def finalize_translation(text):
if not text:
return text
# caseCATalyst doesn't put punctuation in \cxp: treat any isolated
# punctuation at the beginning of the translation as special.
if text[0] in '.?!:;,' and text[1:] in ('', ' '):
return '{' + text[0] + '}' + text[1:]
left_ws = len(text) - len(text.lstrip())
if left_ws > 1:
text = '{^' + text[:left_ws] + '^}' + text[left_ws:]
right_ws = len(text) - len(text.rstrip())
if right_ws > 1:
text = text[:-right_ws] + '{^' + text[-right_ws:] + '^}'
return text
def parse_rtfcre(text, normalize=lambda s: s, skip_errors=True):
not_text = r'\{}'
style_rx = re.compile('s[0-9]+')
tokenizer = RtfTokenizer(text)
next_token = tokenizer.next_token
rewind_token = tokenizer.rewind_token
# Check header.
if next_token() != '{' or next_token() != r'\rtf1':
raise BadRtfError('invalid header')
# Parse header/document.
g_destination, g_text = 'rtf1', ''
group_stack = deque()
stylesheet = {}
steno = None
while True:
token = next_token()
# EOF.
if token is None:
err = RtfParseError(tokenizer.lnum, tokenizer.cnum, 'unexpected end of file')
if not skip_errors:
raise err
log.error('%s', err)
break
# Group start.
if token == '{':
# Always rewind the last token?
rewind = False
# Is it an ignored group?
is_ignored = False
destination = None
token = next_token()
# Ignored?
if token == r'\*':
token = next_token()
is_ignored = True
# Destination?
if token[0] == '\\':
destination = token[1:]
# Steno.
if destination == 'cxs':
if group_stack:
err = RtfParseError(tokenizer.lnum, tokenizer.cnum, 'starting new mapping, but previous is unfinished')
if not skip_errors:
raise err
log.error('%s', err)
# Simulate missing group end(s).
assert group_stack[0][0] == 'rtf1'
rewind_token(token)
if is_ignored:
rewind_token(r'\*')
rewind_token('{')
for __ in range(len(group_stack)):
rewind_token('}')
continue
if steno is not None:
yield normalize(steno), finalize_translation(g_text)
steno = None
is_ignored = False
# Reset text.
g_text = ''
elif destination in {
# Fingerspelling.
'cxfing',
# Stenovations extensions...
'cxsvatdictflags',
# Plover macro.
'cxplovermacro',
# Plover meta.
'cxplovermeta',
}:
is_ignored = False
elif style_rx.fullmatch(destination):
pass
else:
# In the case of e.g. `{\par...`,
# `\par` must be handled as a
# control word.
rewind = True
else:
rewind = True
if is_ignored:
# Skip ignored content.
stack_depth = 1
while True:
token = next_token()
if token is None:
err = RtfParseError(tokenizer.lnum, tokenizer.cnum, 'unexpected end of file')
if not skip_errors:
raise err
log.error('%s', err)
break
if token == '{':
stack_depth += 1
elif token == '}':
stack_depth -= 1
if not stack_depth:
break
if stack_depth:
break
continue
group_stack.append((g_destination, g_text))
g_destination, g_text = destination, ''
if rewind:
rewind_token(token)
continue
# Group end.
if token == '}':
if not group_stack:
token = next_token()
if token is None:
# The end...
break
err = RtfParseError(tokenizer.lnum, tokenizer.cnum, 'expected end of file, got: %r', token[0])
if not skip_errors:
raise err
log.error('%s', err)
rewind_token(token)
continue
# Steno.
if g_destination == 'cxs':
steno = g_text
text = ''
# Punctuation.
elif g_destination == 'cxp':
text = g_text.strip()
if text in {'.', '!', '?', ',', ';', ':'}:
text = '{' + text + '}'
elif text == "'":
text = "{^'}"
elif text in ('-', '/'):
text = '{^' + text + '^}'
else:
# Show unknown punctuation as given.
text = '{^' + g_text + '^}'
# Stenovations extensions...
elif g_destination == 'cxsvatdictflags':
if 'N' in g_text:
text = '{-|}'
else:
text = ''
# Fingerspelling.
elif g_destination == 'cxfing':
text = '{&' + g_text + '}'
# Plover macro.
elif g_destination == 'cxplovermacro':
text = '=' + g_text
# Plover meta.
elif g_destination == 'cxplovermeta':
text = '{' + g_text + '}'
# Style declaration.
elif (g_destination is not None and
style_rx.fullmatch(g_destination) and
group_stack[-1][0] == 'stylesheet'):
stylesheet[g_destination] = g_text
else:
text = g_text
g_destination, g_text = group_stack.pop()
g_text += text
continue
# Control char/word.
if token[0] == '\\':
ctrl = token[1:]
text = {
# Ignore.
'*': '',
# Hard space.
'~': '{^ ^}',
# Non-breaking hyphen.
'_': '{^-^}',
# Escaped newline: \par.
'': '\n\n',
'\n': '\n\n',
'\r': '\n\n',
# Escaped characters.
'\\': '\\',
'{': '{',
'}': '}',
'-': '-',
# Line break.
'line': '\n',
# Paragraph break.
'par': '\n\n',
# Tab.
'tab': '\t',
# Force Cap.
'cxfc': '{-|}',
# Force Lower Case.
'cxfl': '{>}',
}.get(ctrl)
if text is not None:
g_text += text
# Delete Spaces.
elif ctrl == 'cxds':
token = next_token()
if token is None or token[0] in not_text:
g_text += '{^}'
rewind_token(token)
else:
text = token
token = next_token()
if token == r'\cxds':
# Infix
g_text += '{^' + text + '^}'
else:
# Prefix.
g_text += '{^' + text + '}'
rewind_token(token)
# Delete Last Stroke.
elif ctrl == 'cxdstroke':
g_text = '=undo'
# Fingerspelling.
elif ctrl == 'cxfing':
token = next_token()
if token is None or token[0] in not_text:
err = RtfParseError(tokenizer.lnum, tokenizer.cnum, 'expected text, got: %r', token)
if not skip_errors:
raise err
log.error('%s', err)
rewind_token(token)
else:
g_text += '{&' + token + '}'
elif style_rx.fullmatch(ctrl):
# Workaround for caseCATalyst declaring
# new styles without a preceding \par.
if not g_text.endswith('\n\n'):
g_text += '\n\n'
# Indent continuation styles.
if stylesheet.get(ctrl, '').startswith('Contin'):
g_text += ' '
continue
# Text.
text = token
token = next_token()
if token == r'\cxds':
# Suffix.
text = '{' + text + '^}'
else:
rewind_token(token)
g_text += text
if steno is not None:
yield normalize(steno), finalize_translation(g_text)
def main(todo, filename):
with open(filename, 'rb') as fp:
text = fp.read().decode('cp1252')
if todo == 'tokenize':
next_token = RtfTokenizer(text).next_token
while next_token() is not None:
pass
elif todo == 'parse':
for __ in parse_rtfcre(text):
pass
elif todo == 'dump_tokenize':
tokenizer = RtfTokenizer(text)
while True:
token = tokenizer.next_token()
if token is None:
break
print('%3u:%-3u %r' % (tokenizer.lnum+1, tokenizer.cnum+1, token))
elif todo == 'dump_parse':
for mapping in parse_rtfcre(text):
print(mapping)
else:
raise ValueError(todo)
if __name__ == '__main__':
assert len(sys.argv) == 3
main(sys.argv[1], sys.argv[2])
| 10,771
|
Python
|
.py
| 293
| 21.836177
| 127
| 0.416515
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,686
|
__init__.py
|
openstenoproject_plover/plover/dictionary/__init__.py
|
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
"""Various dictionary formats."""
| 100
|
Python
|
.py
| 3
| 32
| 33
| 0.75
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,687
|
base.py
|
openstenoproject_plover/plover/dictionary/base.py
|
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
# TODO: maybe move this code into the StenoDictionary itself. The current saver
# structure is odd and awkward.
# TODO: write tests for this file
"""Common elements to all dictionary formats."""
from os.path import splitext
import functools
import threading
from plover.registry import registry
def _get_dictionary_class(filename):
extension = splitext(filename)[1].lower()[1:]
try:
dict_module = registry.get_plugin('dictionary', extension).obj
except KeyError:
raise ValueError(
'Unsupported extension: %s. Supported extensions: %s' %
(extension, ', '.join(plugin.name for plugin in
registry.list_plugins('dictionary'))))
return dict_module
def _locked(fn):
lock = threading.Lock()
@functools.wraps(fn)
def wrapper(*args, **kwargs):
with lock:
fn(*args, **kwargs)
return wrapper
def _threaded(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
t = threading.Thread(target=fn, args=args, kwargs=kwargs)
t.start()
return wrapper
def create_dictionary(resource, threaded_save=True):
'''Create a new dictionary.
The format is inferred from the extension.
Note: the file is not created! The resulting dictionary save
method must be called to finalize the creation on disk.
'''
d = _get_dictionary_class(resource).create(resource)
if threaded_save:
d.save = _threaded(_locked(d.save))
return d
def load_dictionary(resource, threaded_save=True):
'''Load a dictionary from a file.
The format is inferred from the extension.
'''
d = _get_dictionary_class(resource).load(resource)
if not d.readonly and threaded_save:
d.save = _threaded(_locked(d.save))
return d
| 1,865
|
Python
|
.py
| 51
| 30.862745
| 80
| 0.683509
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,688
|
helpers.py
|
openstenoproject_plover/plover/dictionary/helpers.py
|
from plover import _, log
from plover.misc import shorten_path
from plover.steno import normalize_steno
class StenoNormalizer:
def __init__(self, dictionary_path):
self._dictionary_path = dictionary_path
self._errors_count = 0
def normalize(self, steno):
try:
return normalize_steno(steno)
except ValueError:
self._errors_count += 1
return tuple(steno.split('/'))
def __enter__(self):
return self.normalize
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None and self._errors_count:
log.warning(_('dictionary `%s` loaded with %u invalid steno errors'),
shorten_path(self._dictionary_path), self._errors_count)
| 772
|
Python
|
.py
| 19
| 32.157895
| 81
| 0.633199
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,689
|
rtfcre_dict.py
|
openstenoproject_plover/plover/dictionary/rtfcre_dict.py
|
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
#
# TODO: Convert non-ascii characters to UTF8
# TODO: What does ^ mean in Eclipse?
# TODO: What does #N mean in Eclipse?
# TODO: convert supported commands from Eclipse
"""Parsing an RTF/CRE dictionary.
RTF/CRE spec:
https://web.archive.org/web/20201017075356/http://www.legalxml.org/workgroups/substantive/transcripts/cre-spec.htm
"""
import re
import string
from plover import __version__ as plover_version
from plover.dictionary.helpers import StenoNormalizer
from plover.formatting import ATOM_RE
from plover.steno_dictionary import StenoDictionary
from .rtfcre_parse import parse_rtfcre
HEADER = (r'{\rtf1\ansi{\*\cxrev100}'
r'\cxdict{\*\cxsystem Plover %s}'
r'{\stylesheet{\s0 Normal;}}') % plover_version
class RegexFormatter:
def __init__(self, spec_list, escape_fn):
self._escape_fn = escape_fn
self._format_for_lastindex = [None]
pattern_list = []
for pattern, replacement in spec_list:
num_groups = len(self._format_for_lastindex)
pattern_groups = re.compile(pattern).groups
if pattern_groups:
needed = []
for token in string.Formatter().parse(replacement):
field_name = token[1]
if not field_name:
continue
group = int(field_name)
assert 0 <= group <= pattern_groups
needed.append(group + num_groups)
else:
pattern = '(' + pattern + ')'
pattern_groups = 1
needed = []
for n in range(pattern_groups):
self._format_for_lastindex.append((needed, replacement))
pattern_list.append(pattern)
self._format_rx = re.compile('|'.join(pattern_list))
def format(self, s):
m = self._format_rx.fullmatch(s)
if m is None:
return None
needed, replacement = self._format_for_lastindex[m.lastindex]
return replacement.format(*(self._escape_fn(m.group(g)) for g in needed))
class TranslationFormatter:
TO_ESCAPE = (
(r'([\\{}])', r'\\\1' ),
(r'\n\n' , r'\\par ' ),
(r'\n' , r'\\line '),
(r'\t' , r'\\tab ' ),
)
ATOMS_FORMATTERS = (
# Note order matters!
(r'{\.}' , r'{{\cxp. }}' ),
(r'{!}' , r'{{\cxp! }}' ),
(r'{\?}' , r'{{\cxp? }}' ),
(r'{\,}' , r'{{\cxp, }}' ),
(r'{:}' , r'{{\cxp: }}' ),
(r'{;}' , r'{{\cxp; }}' ),
(r'{\^ \^}' , r'\~' ),
(r'{\^-\^}' , r'\_' ),
(r'{\^\^?}' , r'{{\cxds}}' ),
(r'{\^([^^}]*)\^}' , r'{{\cxds {0}\cxds}}' ),
(r'{\^([^^}]*)}' , r'{{\cxds {0}}}' ),
(r'{([^^}]*)\^}' , r'{{{0}\cxds}}' ),
(r'{-\|}' , r'\cxfc ' ),
(r'{>}' , r'\cxfl ' ),
(r'{ }' , r' ' ),
(r'{&([^}]+)}' , r'{{\cxfing {0}}}' ),
(r'{(.*)}' , r'{{\*\cxplovermeta {0}}}' ),
)
TRANSLATIONS_FORMATTERS = (
(r'{\*}' , r'{{\*\cxplovermacro retrospective_toggle_asterisk}}'),
(r'{\*!}' , r'{{\*\cxplovermacro retrospective_delete_space}}'),
(r'{\*\?}' , r'{{\*\cxplovermacro retrospective_insert_space}}'),
(r'{\*\+}' , r'{{\*\cxplovermacro repeat_last_stroke}}'),
(r'=undo' , r'\cxdstroke' ),
(r'=(\w+(?::.*)?)' , r'{{\*\cxplovermacro {0}}}' ),
)
def __init__(self):
self._to_escape = [
(re.compile(pattern), replacement)
for pattern, replacement in self.TO_ESCAPE
]
self._atom_formatter = RegexFormatter(self.ATOMS_FORMATTERS, self.escape)
self._translation_formatter = RegexFormatter(self.TRANSLATIONS_FORMATTERS, self.escape)
def escape(self, text):
for rx, replacement in self._to_escape:
text = rx.sub(replacement, text)
return text
def format(self, translation):
s = self._translation_formatter.format(translation)
if s is not None:
return s
parts = []
for atom in ATOM_RE.findall(translation):
atom = atom.strip()
if not atom:
continue
s = self._atom_formatter.format(atom)
if s is None:
s = self.escape(atom)
parts.append(s)
return ''.join(parts)
class RtfDictionary(StenoDictionary):
def _load(self, filename):
with open(filename, 'rb') as fp:
text = fp.read().decode('cp1252')
with StenoNormalizer(filename) as normalize_steno:
self.update(parse_rtfcre(text, normalize=normalize_steno))
def _save(self, filename):
translation_formatter = TranslationFormatter()
with open(filename, 'w', encoding='cp1252', newline='\r\n') as fp:
print(HEADER, file=fp)
for s, t in self.items():
s = '/'.join(s)
t = translation_formatter.format(t)
entry = r'{\*\cxs %s}%s' % (s, t)
print(entry, file=fp)
print('}', file=fp)
| 5,899
|
Python
|
.py
| 128
| 36.664063
| 114
| 0.455036
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,690
|
undo.py
|
openstenoproject_plover/plover/macro/undo.py
|
from plover.translation import Translation
from plover.oslayer.config import PLATFORM
if PLATFORM == 'mac':
BACK_STRING = '{#Alt_L(BackSpace)}{^}'
else:
BACK_STRING = '{#Control_L(BackSpace)}{^}'
def undo(translator, stroke, cmdline):
assert not cmdline
for t in reversed(translator.get_state().translations):
translator.untranslate_translation(t)
if t.has_undo():
return
# There is no more buffer to delete from -- remove undo and add a
# stroke that removes last word on the user's OS, but don't add it
# to the state history.
translator.flush([Translation([stroke], BACK_STRING)])
| 647
|
Python
|
.py
| 16
| 35.5
| 70
| 0.69586
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,691
|
repeat.py
|
openstenoproject_plover/plover/macro/repeat.py
|
from plover.steno import Stroke
def last_stroke(translator, stroke, cmdline):
assert not cmdline
# Repeat last stroke
translations = translator.get_state().translations
if not translations:
return
stroke = Stroke(translations[-1].strokes[-1].steno_keys)
translator.translate_stroke(stroke)
| 325
|
Python
|
.py
| 9
| 31.222222
| 60
| 0.744409
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,692
|
retro.py
|
openstenoproject_plover/plover/macro/retro.py
|
from plover.translation import Translation
from plover.steno import Stroke
from plover import system
def toggle_asterisk(translator, stroke, cmdline):
assert not cmdline
# Toggle asterisk of previous stroke
translations = translator.get_state().translations
if not translations:
return
t = translations[-1]
translator.untranslate_translation(t)
keys = set(t.strokes[-1].steno_keys)
if '*' in keys:
keys.remove('*')
else:
keys.add('*')
translator.translate_stroke(Stroke(keys))
def delete_space(translator, stroke, cmdline):
assert not cmdline
# Retrospective delete space
translations = translator.get_state().translations
if len(translations) < 2:
return
replaced = translations[-2:]
if replaced[1].is_retrospective_command:
return
english = []
for t in replaced:
if t.english is not None:
english.append(t.english)
elif len(t.rtfcre) == 1 and t.rtfcre[0].isdigit():
english.append('{&%s}' % t.rtfcre[0])
if len(english) > 1:
t = Translation([stroke], '{^~|^}'.join(english))
t.replaced = replaced
t.is_retrospective_command = True
translator.translate_translation(t)
def insert_space(translator, stroke, cmdline):
assert not cmdline
# Retrospective insert space
translations = translator.get_state().translations
if not translations:
return
replaced = translations[-1]
if replaced.is_retrospective_command:
return
lookup_stroke = replaced.strokes[-1]
english = [t.english or '/'.join(t.rtfcre)
for t in replaced.replaced]
if english:
english.append(
translator.lookup([lookup_stroke], system.SUFFIX_KEYS)
or lookup_stroke.rtfcre
)
t = Translation([stroke], ' '.join(english))
t.replaced = [replaced]
t.is_retrospective_command = True
translator.translate_translation(t)
| 2,004
|
Python
|
.py
| 58
| 27.827586
| 66
| 0.658939
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,693
|
set_config.py
|
openstenoproject_plover/plover/command/set_config.py
|
import ast
def set_config(engine, cmdline):
"""
Set one or more Plover config options upon executing a stroke pattern.
Syntax:
{PLOVER:SET_CONFIG:option:value}
{PLOVER:SET_CONFIG:option1:value1,option2:value2,...}
Example usage:
"O*EP": "{PLOVER:SET_CONFIG:'translation_frame_opacity':100}",
"STA*RT": "{PLOVER:SET_CONFIG:'start_attached':True,'start_capitalized':True}",
Be careful with nested quotes. Plover's JSON dictionaries use double quotes
by default, so use single quotes for config option names and other strings.
"""
# Each config setting can be processed as a key:value pair in a dict.
# The engine.config property setter will update all settings at once.
engine.config = _cmdline_to_dict(cmdline)
def _cmdline_to_dict(cmdline):
""" Add braces and parse the entire command line as a Python dict literal. """
try:
opt_dict = ast.literal_eval('{'+cmdline+'}')
assert isinstance(opt_dict, dict)
return opt_dict
except (AssertionError, SyntaxError, ValueError) as e:
raise ValueError('Bad command string "%s" for PLOVER:SET_CONFIG.\n' % cmdline
+ 'See for reference:\n\n' + set_config.__doc__) from e
| 1,243
|
Python
|
.py
| 25
| 43.44
| 85
| 0.685384
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,694
|
about_dialog.py
|
openstenoproject_plover/plover/gui_qt/about_dialog.py
|
import re
from PyQt5.QtWidgets import QDialog
import plover
from plover.gui_qt.about_dialog_ui import Ui_AboutDialog
class AboutDialog(QDialog, Ui_AboutDialog):
ROLE = 'about'
def __init__(self, engine):
super().__init__()
self.setupUi(self)
credits = plover.__credits__
credits = re.sub(r'<([^>]*)>', r'<a href="\1">\1</a>', credits)
credits = credits.replace('\n', '<br/>')
self.text.setHtml(
'''
<style>
h1 {text-align:center;}
h2 {text-align:center;}
p {text-align:center;}
</style>
<p><img src="%(icon)s"/></p>
<h1>%(name)s %(version)s</h1>
<p>%(description)s</p>
<p><i>Copyright %(copyright)s</i></p>
<p>License: <a href="%(license_url)s">%(license)s</a></p>
<p>Project Homepage: <a href='%(url)s'>%(url)s</a></p>
<h2>Credits:</h2>
<p>%(credits)s</p>
''' % {
'icon' : ':/plover.png',
'name' : plover.__name__.capitalize(),
'version' : plover.__version__,
'description': plover.__long_description__,
'copyright' : plover.__copyright__.replace('(C)', '©'),
'license' : plover.__license__,
'license_url': 'https://www.gnu.org/licenses/gpl-2.0-standalone.html',
'url' : plover.__download_url__,
'credits' : credits,
})
| 1,555
|
Python
|
.py
| 38
| 29.394737
| 86
| 0.473824
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,695
|
suggestions_dialog.py
|
openstenoproject_plover/plover/gui_qt/suggestions_dialog.py
|
import re
from PyQt5.QtCore import Qt
from PyQt5.QtGui import (
QCursor,
QFont,
)
from PyQt5.QtWidgets import (
QAction,
QFontDialog,
QMenu,
)
from plover import _
from plover.suggestions import Suggestion
from plover.formatting import RetroFormatter
from plover.gui_qt.suggestions_dialog_ui import Ui_SuggestionsDialog
from plover.gui_qt.utils import ToolBar
from plover.gui_qt.tool import Tool
class SuggestionsDialog(Tool, Ui_SuggestionsDialog):
# i18n: Widget: “SuggestionsDialog”, tooltip.
__doc__ = _('Suggest possible strokes for the last written words.')
TITLE = _('Suggestions')
ICON = ':/lightbulb.svg'
ROLE = 'suggestions'
SHORTCUT = 'Ctrl+J'
WORD_RX = re.compile(r'(?:\w+|[^\w\s]+)\s*')
STYLE_TRANSLATION, STYLE_STROKES = range(2)
# Anatomy of the text document:
# - "root" frame:
# - 0+ "suggestions" frames
# - 1+ "translation" frames
# - 1-10 "strokes" frames
def __init__(self, engine):
super().__init__(engine)
self.setupUi(self)
self._last_suggestions = None
# Toolbar.
self.layout().addWidget(ToolBar(
self.action_ToggleOnTop,
self.action_SelectFont,
self.action_Clear,
))
self.action_Clear.setEnabled(False)
# Font popup menu.
self._font_menu = QMenu()
# i18n: Widget: “SuggestionsDialog”, “font” menu.
self._font_menu_text = QAction(_('&Text'), self._font_menu)
# i18n: Widget: “SuggestionsDialog”, “font” menu.
self._font_menu_strokes = QAction(_('&Strokes'), self._font_menu)
self._font_menu.addActions([self._font_menu_text, self._font_menu_strokes])
engine.signal_connect('translated', self.on_translation)
self.suggestions.setFocus()
self.restore_state()
self.finished.connect(self.save_state)
def _get_font(self, name):
return getattr(self.suggestions, name)
def _set_font(self, name, font):
setattr(self.suggestions, name, font)
def _restore_state(self, settings):
for name in (
'text_font',
'strokes_font',
):
font_string = settings.value(name)
if font_string is None:
continue
font = QFont()
if not font.fromString(font_string):
continue
self._set_font(name, font)
ontop = settings.value('ontop', None, bool)
if ontop is not None:
self.action_ToggleOnTop.setChecked(ontop)
self.on_toggle_ontop(ontop)
def _save_state(self, settings):
for name in (
'text_font',
'strokes_font',
):
font = self._get_font(name)
font_string = font.toString()
settings.setValue(name, font_string)
ontop = bool(self.windowFlags() & Qt.WindowStaysOnTopHint)
settings.setValue('ontop', ontop)
def _show_suggestions(self, suggestion_list):
self.suggestions.append(suggestion_list)
self.action_Clear.setEnabled(True)
@staticmethod
def tails(ls):
''' Return all tail combinations (a la Haskell)
tails :: [x] -> [[x]]
>>> tails('abcd')
['abcd', 'bcd', 'cd', d']
'''
for i in range(len(ls)):
yield ls[i:]
def on_translation(self, old, new):
# Check for new output.
for a in reversed(new):
if a.text and not a.text.isspace():
break
else:
return
# Get the last 10 words.
with self._engine:
last_translations = self._engine.translator_state.translations
retro_formatter = RetroFormatter(last_translations)
split_words = retro_formatter.last_words(10, rx=self.WORD_RX)
suggestion_list = []
for phrase in self.tails(split_words):
phrase = ''.join(phrase)
suggestion_list.extend(self._engine.get_suggestions(phrase))
if not suggestion_list and split_words:
suggestion_list = [Suggestion(split_words[-1], [])]
if suggestion_list and suggestion_list != self._last_suggestions:
self._last_suggestions = suggestion_list
self._show_suggestions(suggestion_list)
def on_select_font(self):
action = self._font_menu.exec_(QCursor.pos())
if action is None:
return
if action == self._font_menu_text:
name = 'text_font'
font_options = ()
elif action == self._font_menu_strokes:
name = 'strokes_font'
font_options = (QFontDialog.MonospacedFonts,)
font = self._get_font(name)
font, ok = QFontDialog.getFont(font, self, '', *font_options)
if ok:
self._set_font(name, font)
def on_toggle_ontop(self, ontop):
flags = self.windowFlags()
if ontop:
flags |= Qt.WindowStaysOnTopHint
else:
flags &= ~Qt.WindowStaysOnTopHint
self.setWindowFlags(flags)
self.show()
def on_clear(self):
self.action_Clear.setEnabled(False)
self._last_suggestions = None
self.suggestions.clear()
| 5,294
|
Python
|
.py
| 142
| 28.239437
| 83
| 0.599765
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,696
|
add_translation_dialog.py
|
openstenoproject_plover/plover/gui_qt/add_translation_dialog.py
|
from PyQt5.QtWidgets import QDialogButtonBox
from plover import _
from plover.gui_qt.add_translation_dialog_ui import Ui_AddTranslationDialog
from plover.gui_qt.tool import Tool
class AddTranslationDialog(Tool, Ui_AddTranslationDialog):
# i18n: Widget: “AddTranslationDialog”, tooltip.
__doc__ = _('Add a new translation to the dictionary.')
TITLE = _('Add Translation')
ICON = ':/translation_add.svg'
ROLE = 'add_translation'
SHORTCUT = 'Ctrl+N'
def __init__(self, engine, dictionary_path=None):
super().__init__(engine)
self.setupUi(self)
self.add_translation.select_dictionary(dictionary_path)
self.add_translation.mappingValid.connect(self.on_mapping_valid)
self.on_mapping_valid(self.add_translation.mapping_is_valid)
engine.signal_connect('config_changed', self.on_config_changed)
self.on_config_changed(engine.config)
self.installEventFilter(self)
self.restore_state()
self.finished.connect(self.save_state)
def on_mapping_valid(self, valid):
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(valid)
def on_config_changed(self, config_update):
if 'translation_frame_opacity' in config_update:
opacity = config_update.get('translation_frame_opacity')
if opacity is None:
return
assert 0 <= opacity <= 100
self.setWindowOpacity(opacity / 100.0)
def accept(self):
self.add_translation.save_entry()
super().accept()
def reject(self):
self.add_translation.reject()
super().reject()
| 1,639
|
Python
|
.py
| 37
| 36.513514
| 75
| 0.68242
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,697
|
paper_tape.py
|
openstenoproject_plover/plover/gui_qt/paper_tape.py
|
import time
from PyQt5.QtCore import (
QAbstractListModel,
QMimeData,
QModelIndex,
Qt,
)
from PyQt5.QtGui import QFont
from PyQt5.QtWidgets import (
QFileDialog,
QFontDialog,
QMessageBox,
)
from wcwidth import wcwidth
from plover import _, system
from .paper_tape_ui import Ui_PaperTape
from .utils import ActionCopyViewSelectionToClipboard, ToolBar
from .tool import Tool
STYLE_PAPER, STYLE_RAW = (
# i18n: Paper tape style.
_('Paper'),
# i18n: Paper tape style.
_('Raw'),
)
TAPE_STYLES = (STYLE_PAPER, STYLE_RAW)
class TapeModel(QAbstractListModel):
def __init__(self):
super().__init__()
self._stroke_list = []
self._style = None
self._all_keys = None
self._numbers = None
def rowCount(self, parent):
return 0 if parent.isValid() else len(self._stroke_list)
@property
def style(self):
return self._style
@style.setter
def style(self, style):
assert style in TAPE_STYLES
self.layoutAboutToBeChanged.emit()
self._style = style
self.layoutChanged.emit()
def _paper_format(self, stroke):
text = self._all_keys_filler * 1
keys = stroke.steno_keys[:]
if any(key in self._numbers for key in keys):
keys.append('#')
for key in keys:
index = system.KEY_ORDER[key]
text[index] = self._all_keys[index]
return ''.join(text)
@staticmethod
def _raw_format(stroke):
return stroke.rtfcre
def data(self, index, role):
if not index.isValid():
return None
stroke = self._stroke_list[index.row()]
if role == Qt.DisplayRole:
if self._style == STYLE_PAPER:
return self._paper_format(stroke)
if self._style == STYLE_RAW:
return self._raw_format(stroke)
if role == Qt.AccessibleTextRole:
return stroke.rtfcre
return None
def reset(self):
self.modelAboutToBeReset.emit()
self._all_keys = ''.join(key.strip('-') for key in system.KEYS)
self._all_keys_filler = [
' ' * wcwidth(k)
for k in self._all_keys
]
self._numbers = set(system.NUMBERS.values())
self._stroke_list.clear()
self.modelReset.emit()
return self._all_keys
def append(self, stroke):
row = len(self._stroke_list)
self.beginInsertRows(QModelIndex(), row, row)
self._stroke_list.append(stroke)
self.endInsertRows()
def mimeTypes(self):
return ['text/plain']
def mimeData(self, indexes):
data = QMimeData()
data.setText('\n'.join(filter(None, (
self.data(index, Qt.DisplayRole)
for index in indexes
))))
return data
class PaperTape(Tool, Ui_PaperTape):
# i18n: Widget: “PaperTape”, tooltip.
__doc__ = _('Paper tape display of strokes.')
TITLE = _('Paper Tape')
ICON = ':/tape.svg'
ROLE = 'paper_tape'
SHORTCUT = 'Ctrl+T'
def __init__(self, engine):
super().__init__(engine)
self.setupUi(self)
self._model = TapeModel()
self.header.setContentsMargins(4, 0, 0, 0)
self.styles.addItems(TAPE_STYLES)
self.tape.setModel(self._model)
self.tape.setSelectionMode(self.tape.ExtendedSelection)
self._copy_action = ActionCopyViewSelectionToClipboard(self.tape)
self.tape.addAction(self._copy_action)
# Toolbar.
self.layout().addWidget(ToolBar(
self.action_ToggleOnTop,
self.action_SelectFont,
self.action_Clear,
self.action_Save,
))
self.action_Clear.setEnabled(False)
self.action_Save.setEnabled(False)
engine.signal_connect('config_changed', self.on_config_changed)
self.on_config_changed(engine.config)
engine.signal_connect('stroked', self.on_stroke)
self.tape.setFocus()
self.restore_state()
self.finished.connect(self.save_state)
def _restore_state(self, settings):
style = settings.value('style', None, int)
if style is not None:
style = TAPE_STYLES[style]
self.styles.setCurrentText(style)
self.on_style_changed(style)
font_string = settings.value('font')
if font_string is not None:
font = QFont()
if font.fromString(font_string):
self.header.setFont(font)
self.tape.setFont(font)
ontop = settings.value('ontop', None, bool)
if ontop is not None:
self.action_ToggleOnTop.setChecked(ontop)
self.on_toggle_ontop(ontop)
def _save_state(self, settings):
settings.setValue('style', TAPE_STYLES.index(self._style))
settings.setValue('font', self.tape.font().toString())
ontop = bool(self.windowFlags() & Qt.WindowStaysOnTopHint)
settings.setValue('ontop', ontop)
def on_config_changed(self, config):
if 'system_name' in config:
all_keys = self._model.reset()
self.header.setText(all_keys)
@property
def _scroll_at_end(self):
scrollbar = self.tape.verticalScrollBar()
return scrollbar.value() == scrollbar.maximum()
@property
def _style(self):
return self.styles.currentText()
def on_stroke(self, stroke):
scroll_at_end = self._scroll_at_end
self._model.append(stroke)
if scroll_at_end:
self.tape.scrollToBottom()
self.action_Clear.setEnabled(True)
self.action_Save.setEnabled(True)
def on_style_changed(self, style):
assert style in TAPE_STYLES
scroll_at_end = self._scroll_at_end
self._model.style = style
self.header.setVisible(style == STYLE_PAPER)
if scroll_at_end:
self.tape.scrollToBottom()
def on_select_font(self):
font, ok = QFontDialog.getFont(self.tape.font(), self, '',
QFontDialog.MonospacedFonts)
if ok:
self.header.setFont(font)
self.tape.setFont(font)
def on_toggle_ontop(self, ontop):
flags = self.windowFlags()
if ontop:
flags |= Qt.WindowStaysOnTopHint
else:
flags &= ~Qt.WindowStaysOnTopHint
self.setWindowFlags(flags)
self.show()
def on_clear(self):
flags = self.windowFlags()
msgbox = QMessageBox()
msgbox.setText(_('Do you want to clear the paper tape?'))
msgbox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
# Make sure the message box ends up above the paper tape!
msgbox.setWindowFlags(msgbox.windowFlags() | (flags & Qt.WindowStaysOnTopHint))
if QMessageBox.Yes != msgbox.exec_():
return
self._strokes = []
self.action_Clear.setEnabled(False)
self.action_Save.setEnabled(False)
self._model.reset()
def on_save(self):
filename_suggestion = 'steno-notes-%s.txt' % time.strftime('%Y-%m-%d-%H-%M')
filename = QFileDialog.getSaveFileName(
self, _('Save Paper Tape'), filename_suggestion,
# i18n: Paper tape, "save" file picker.
_('Text files (*.txt)'),
)[0]
if not filename:
return
with open(filename, 'w') as fp:
for row in range(self._model.rowCount(self._model.index(-1, -1))):
print(self._model.data(self._model.index(row, 0), Qt.DisplayRole), file=fp)
| 7,633
|
Python
|
.py
| 209
| 27.779904
| 91
| 0.607854
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,698
|
log_qt.py
|
openstenoproject_plover/plover/gui_qt/log_qt.py
|
import logging
from PyQt5.QtCore import QObject, pyqtSignal
from plover import log
class NotificationHandler(QObject, logging.Handler):
emitSignal = pyqtSignal(int, str)
def __init__(self):
super().__init__()
self.setLevel(log.WARNING)
self.setFormatter(log.NoExceptionTracebackFormatter('%(levelname)s: %(message)s'))
def emit(self, record):
level = record.levelno
message = self.format(record)
self.emitSignal.emit(level, message)
| 501
|
Python
|
.py
| 13
| 32.307692
| 90
| 0.702083
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|
18,699
|
add_translation_widget.py
|
openstenoproject_plover/plover/gui_qt/add_translation_widget.py
|
from collections import namedtuple
from html import escape as html_escape
from os.path import split as os_path_split
from PyQt5.QtCore import QEvent, pyqtSignal
from PyQt5.QtWidgets import QApplication, QWidget
from plover import _
from plover.misc import shorten_path
from plover.steno import normalize_steno, sort_steno_strokes
from plover.engine import StartingStrokeState
from plover.translation import escape_translation, unescape_translation
from plover.formatting import RetroFormatter
from plover.resource import resource_filename
from plover.gui_qt.add_translation_widget_ui import Ui_AddTranslationWidget
from plover.gui_qt.steno_validator import StenoValidator
class AddTranslationWidget(QWidget, Ui_AddTranslationWidget):
# i18n: Widget: “AddTranslationWidget”, tooltip.
__doc__ = _('Add a new translation to the dictionary.')
EngineState = namedtuple('EngineState', 'dictionary_filter translator starting_stroke')
mappingValid = pyqtSignal(bool)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setupUi(self)
engine = QApplication.instance().engine
self._engine = engine
self._dictionaries = []
self._reverse_order = False
self._selected_dictionary = None
self._mapping_is_valid = False
engine.signal_connect('config_changed', self.on_config_changed)
self.on_config_changed(engine.config)
engine.signal_connect('dictionaries_loaded', self.on_dictionaries_loaded)
self.on_dictionaries_loaded(self._engine.dictionaries)
self._special_fmt = (
'<span style="' +
'font-family:monospace;' +
'">%s</span>'
)
self._special_fmt_bold = (
'<span style="' +
'font-family:monospace;' +
'font-weight:bold;' +
'">%s</span>'
)
self.strokes.setValidator(StenoValidator())
self.strokes.installEventFilter(self)
self.translation.installEventFilter(self)
with engine:
# Pre-populate the strokes or translations with last stroke/word.
last_translations = engine.translator_state.translations
translation = None
for t in reversed(last_translations):
# Find the last undoable stroke.
if t.has_undo():
translation = t
break
# Is it a raw stroke?
if translation is not None and not translation.english:
# Yes.
self.strokes.setText(translation.formatting[0].text)
self.on_strokes_edited()
self.strokes.selectAll()
else:
# No, grab the last-formatted word.
retro_formatter = RetroFormatter(last_translations)
last_words = retro_formatter.last_words(strip=True)
if last_words:
self.translation.setText(last_words[0])
self.on_translation_edited()
self._original_state = self.EngineState(None,
engine.translator_state,
engine.starting_stroke_state)
engine.clear_translator_state()
self._strokes_state = self.EngineState(self._dictionary_filter,
engine.translator_state,
StartingStrokeState(True, False, '/'))
engine.clear_translator_state()
self._translations_state = self.EngineState(None,
engine.translator_state,
StartingStrokeState(True, False, ' '))
self._engine_state = self._original_state
self._focus = None
@property
def mapping_is_valid(self):
return self._mapping_is_valid
def select_dictionary(self, dictionary_path):
self._selected_dictionary = dictionary_path
self._update_items()
def eventFilter(self, watched, event):
if event.type() == QEvent.FocusIn:
if watched == self.strokes:
self._focus_strokes()
elif watched == self.translation:
self._focus_translation()
elif event.type() == QEvent.FocusOut:
if watched in (self.strokes, self.translation):
self._unfocus()
return False
def _set_engine_state(self, state):
with self._engine as engine:
prev_state = self._engine_state
if prev_state is not None and prev_state.dictionary_filter is not None:
engine.remove_dictionary_filter(prev_state.dictionary_filter)
engine.translator_state = state.translator
engine.starting_stroke_state = state.starting_stroke
if state.dictionary_filter is not None:
engine.add_dictionary_filter(state.dictionary_filter)
self._engine_state = state
@staticmethod
def _dictionary_filter(key, value):
# Allow undo...
if value == '=undo':
return False
# ...and translations with special entries. Do this by looking for
# braces but take into account escaped braces and slashes.
escaped = value.replace('\\\\', '').replace('\\{', '')
special = '{#' in escaped or '{PLOVER:' in escaped
return not special
def _unfocus(self):
self._unfocus_strokes()
self._unfocus_translation()
def _focus_strokes(self):
if self._focus == 'strokes':
return
self._unfocus_translation()
self._set_engine_state(self._strokes_state)
self._focus = 'strokes'
def _unfocus_strokes(self):
if self._focus != 'strokes':
return
self._set_engine_state(self._original_state)
self._focus = None
def _focus_translation(self):
if self._focus == 'translation':
return
self._unfocus_strokes()
self._set_engine_state(self._translations_state)
self._focus = 'translation'
def _unfocus_translation(self):
if self._focus != 'translation':
return
self._set_engine_state(self._original_state)
self._focus = None
def _strokes(self):
strokes = self.strokes.text().strip()
has_prefix = strokes.startswith('/')
strokes = '/'.join(strokes.replace('/', ' ').split())
if has_prefix:
strokes = '/' + strokes
strokes = normalize_steno(strokes)
return strokes
def _translation(self):
translation = self.translation.text().strip()
return unescape_translation(translation)
def _update_items(self, dictionaries=None, reverse_order=None):
if dictionaries is not None:
self._dictionaries = dictionaries
if reverse_order is not None:
self._reverse_order = reverse_order
iterable = self._dictionaries
if self._reverse_order:
iterable = reversed(iterable)
self.dictionary.clear()
for d in iterable:
item = shorten_path(d.path)
if not d.enabled:
# i18n: Widget: “AddTranslationWidget”.
item = _('{dictionary} (disabled)').format(dictionary=item)
self.dictionary.addItem(item)
selected_index = 0
if self._selected_dictionary is None:
# No user selection, select first enabled dictionary.
for n, d in enumerate(self._dictionaries):
if d.enabled:
selected_index = n
break
else:
# Keep user selection.
for n, d in enumerate(self._dictionaries):
if d.path == self._selected_dictionary:
selected_index = n
break
if self._reverse_order:
selected_index = self.dictionary.count() - selected_index - 1
self.dictionary.setCurrentIndex(selected_index)
def on_dictionaries_loaded(self, dictionaries):
# We only care about loaded writable dictionaries.
dictionaries = [
d
for d in dictionaries.dicts
if not d.readonly
]
if dictionaries != self._dictionaries:
self._update_items(dictionaries=dictionaries)
def on_config_changed(self, config_update):
if 'classic_dictionaries_display_order' in config_update:
self._update_items(reverse_order=config_update['classic_dictionaries_display_order'])
def on_dictionary_selected(self, index):
if self._reverse_order:
index = len(self._dictionaries) - index - 1
self._selected_dictionary = self._dictionaries[index].path
def _format_label(self, fmt, strokes, translation=None, filename=None):
if strokes:
strokes = ', '.join(self._special_fmt % html_escape('/'.join(s))
for s in sort_steno_strokes(strokes))
if translation:
translation = self._special_fmt_bold % html_escape(escape_translation(translation))
if filename:
filename = html_escape(filename)
return fmt.format(strokes=strokes, translation=translation, filename=filename)
def on_strokes_edited(self):
mapping_is_valid = self.strokes.hasAcceptableInput()
if mapping_is_valid != self._mapping_is_valid:
self._mapping_is_valid = mapping_is_valid
self.mappingValid.emit(mapping_is_valid)
if not mapping_is_valid:
return
strokes = self._strokes()
if strokes:
translations = self._engine.raw_lookup_from_all(strokes)
if translations:
# i18n: Widget: “AddTranslationWidget”.
info = self._format_label(_('{strokes} maps to '), (strokes,))
entries = [
self._format_label(
('• ' if i else '') + '<bf>{translation}<bf/>\t({filename})',
None,
translation,
os_path_split(resource_filename(dictionary.path))[1]
) for i, (translation, dictionary) in enumerate(translations)
]
if (len(entries) > 1):
# i18n: Widget: “AddTranslationWidget”.
entries.insert(1, '<br />' + _('Overwritten entries:'))
info += '<br />'.join(entries)
else:
info = self._format_label(
# i18n: Widget: “AddTranslationWidget”.
_('{strokes} is not mapped in any dictionary'),
(strokes, )
)
else:
info = ''
self.strokes_info.setText(info)
def on_translation_edited(self):
translation = self._translation()
if translation:
strokes = self._engine.reverse_lookup(translation)
if strokes:
# i18n: Widget: “AddTranslationWidget”.
fmt = _('{translation} is mapped to: {strokes}')
else:
# i18n: Widget: “AddTranslationWidget”.
fmt = _('{translation} is not in the dictionary')
info = self._format_label(fmt, strokes, translation)
else:
info = ''
self.translation_info.setText(info)
def save_entry(self):
self._unfocus()
strokes = self._strokes()
translation = self._translation()
if strokes and translation:
index = self.dictionary.currentIndex()
if self._reverse_order:
index = -index - 1
dictionary = self._dictionaries[index]
old_translation = self._engine.dictionaries[dictionary.path].get(strokes)
self._engine.add_translation(strokes, translation,
dictionary_path=dictionary.path)
return dictionary, strokes, old_translation, translation
def reject(self):
self._unfocus()
self._set_engine_state(self._original_state)
| 12,331
|
Python
|
.py
| 273
| 32.684982
| 97
| 0.584237
|
openstenoproject/plover
| 2,318
| 281
| 179
|
GPL-2.0
|
9/5/2024, 5:12:22 PM (Europe/Amsterdam)
|