id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6,600
|
shinken_contactgroup_nomembers.cfg
|
shinken-solutions_shinken/test/etc/shinken_contactgroup_nomembers.cfg
|
accept_passive_host_checks=1
accept_passive_service_checks=1
additional_freshness_latency=15
admin_email=shinken@localhost
admin_pager=shinken@localhost
auto_reschedule_checks=0
auto_rescheduling_interval=30
auto_rescheduling_window=180
cached_host_check_horizon=15
cached_service_check_horizon=15
cfg_file=contactgroup_nomembers/hosts.cfg
cfg_file=standard/services.cfg
cfg_file=contactgroup_nomembers/contacts.cfg
cfg_file=standard/commands.cfg
cfg_file=standard/timeperiods.cfg
cfg_file=standard/hostgroups.cfg
cfg_file=standard/servicegroups.cfg
cfg_file=standard/shinken-specific.cfg
check_external_commands=1
check_for_orphaned_hosts=1
check_for_orphaned_services=1
check_host_freshness=0
check_result_path=var/spool/checkresults
check_result_reaper_frequency=10
check_service_freshness=1
command_check_interval=-1
command_file=var/shinken.cmd
daemon_dumps_core=0
date_format=iso8601
debug_file=var/shinken.debug
debug_level=112
debug_verbosity=1
enable_embedded_perl=0
enable_environment_macros=1
enable_event_handlers=1
enable_flap_detection=0
enable_notifications=1
enable_predictive_host_dependency_checks=1
enable_predictive_service_dependency_checks=1
event_broker_options=-1
event_handler_timeout=30
execute_host_checks=1
execute_service_checks=1
external_command_buffer_slots=4096
high_host_flap_threshold=20
high_service_flap_threshold=20
host_check_timeout=30
host_freshness_check_interval=60
host_inter_check_delay_method=s
illegal_macro_output_chars=`~\$&|'"<>
illegal_object_name_chars=`~!\$%^&*|'"<>?,()=
interval_length=60
lock_file=var/shinken.pid
log_archive_path=var/archives
log_event_handlers=1
log_external_commands=1
log_file=var/shinken.log
log_host_retries=1
log_initial_states=1
log_notifications=1
log_passive_checks=1
log_rotation_method=d
log_service_retries=1
low_host_flap_threshold=5
low_service_flap_threshold=5
max_check_result_file_age=3600
max_check_result_reaper_time=30
max_concurrent_checks=0
max_debug_file_size=1000000
max_host_check_spread=30
max_service_check_spread=30
shinken_group=shinken
shinken_user=shinken
notification_timeout=30
object_cache_file=var/objects.cache
obsess_over_hosts=0
obsess_over_services=0
ocsp_timeout=5
#p1_file=/tmp/test_shinken/plugins/p1.pl
p1_file=/usr/local/shinken/bin/p1.pl
passive_host_checks_are_soft=0
perfdata_timeout=5
precached_object_file=var/objects.precache
process_performance_data=1
resource_file=resource.cfg
retain_state_information=1
retained_contact_host_attribute_mask=0
retained_contact_service_attribute_mask=0
retained_host_attribute_mask=0
retained_process_host_attribute_mask=0
retained_process_service_attribute_mask=0
retained_service_attribute_mask=0
retention_update_interval=60
service_check_timeout=60
service_freshness_check_interval=60
service_inter_check_delay_method=s
service_interleave_factor=s
##shinken_group=shinken
##shinken_user=shinken
#shinken_group=shinken
#shinken_user=shinken
sleep_time=0.25
soft_state_dependencies=0
state_retention_file=var/retention.dat
status_file=var/status.dat
status_update_interval=5
temp_file=tmp/shinken.tmp
temp_path=var/tmp
translate_passive_host_checks=0
use_aggressive_host_checking=0
use_embedded_perl_implicitly=0
use_large_installation_tweaks=0
use_regexp_matching=0
use_retained_program_state=1
use_retained_scheduling_info=1
use_syslog=0
use_true_regexp_matching=0
enable_problem_impacts_states_change=1
| 3,367
|
Python
|
.tac
| 118
| 27.533898
| 45
| 0.857187
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,601
|
shinken_contactgroups_plus_inheritance.cfg
|
shinken-solutions_shinken/test/etc/shinken_contactgroups_plus_inheritance.cfg
|
accept_passive_host_checks=1
accept_passive_service_checks=1
additional_freshness_latency=15
admin_email=shinken@localhost
admin_pager=shinken@localhost
auto_reschedule_checks=0
auto_rescheduling_interval=30
auto_rescheduling_window=180
cached_host_check_horizon=15
cached_service_check_horizon=15
cfg_file=contactgroups_plus_inheritance/hosts.cfg
cfg_file=contactgroups_plus_inheritance/services.cfg
cfg_file=contactgroups_plus_inheritance/contacts.cfg
cfg_file=contactgroups_plus_inheritance/commands.cfg
cfg_file=standard/timeperiods.cfg
cfg_file=contactgroups_plus_inheritance/hostgroups.cfg
cfg_file=standard/servicegroups.cfg
cfg_file=standard/shinken-specific.cfg
check_external_commands=1
check_for_orphaned_hosts=1
check_for_orphaned_services=1
check_host_freshness=0
check_result_path=var/spool/checkresults
check_result_reaper_frequency=10
check_service_freshness=1
command_check_interval=-1
command_file=var/shinken.cmd
daemon_dumps_core=0
date_format=iso8601
debug_file=var/shinken.debug
debug_level=112
debug_verbosity=1
enable_embedded_perl=0
enable_environment_macros=1
enable_event_handlers=1
enable_flap_detection=0
enable_notifications=1
enable_predictive_host_dependency_checks=1
enable_predictive_service_dependency_checks=1
event_broker_options=-1
event_handler_timeout=30
execute_host_checks=1
execute_service_checks=1
external_command_buffer_slots=4096
high_host_flap_threshold=20
high_service_flap_threshold=20
host_check_timeout=30
host_freshness_check_interval=60
host_inter_check_delay_method=s
illegal_macro_output_chars=`~\$&|'"<>
illegal_object_name_chars=`~!\$%^&*|'"<>?,()=
interval_length=60
lock_file=var/shinken.pid
log_archive_path=var/archives
log_event_handlers=1
log_external_commands=1
log_file=var/shinken.log
log_host_retries=1
log_initial_states=0
log_notifications=1
log_passive_checks=1
log_rotation_method=d
log_service_retries=1
low_host_flap_threshold=5
low_service_flap_threshold=5
max_check_result_file_age=3600
max_check_result_reaper_time=30
max_concurrent_checks=0
max_debug_file_size=1000000
max_host_check_spread=30
max_service_check_spread=30
shinken_group=shinken
shinken_user=shinken
notification_timeout=30
object_cache_file=var/objects.cache
obsess_over_hosts=0
obsess_over_services=0
ocsp_timeout=5
#p1_file=/tmp/test_shinken/plugins/p1.pl
p1_file=/usr/local/shinken/bin/p1.pl
passive_host_checks_are_soft=0
perfdata_timeout=5
precached_object_file=var/objects.precache
process_performance_data=1
resource_file=resource.cfg
retain_state_information=1
retained_contact_host_attribute_mask=0
retained_contact_service_attribute_mask=0
retained_host_attribute_mask=0
retained_process_host_attribute_mask=0
retained_process_service_attribute_mask=0
retained_service_attribute_mask=0
retention_update_interval=60
service_check_timeout=60
service_freshness_check_interval=60
service_inter_check_delay_method=s
service_interleave_factor=s
##shinken_group=shinken
##shinken_user=shinken
#shinken_group=shinken
#shinken_user=shinken
sleep_time=0.25
soft_state_dependencies=0
state_retention_file=var/retention.dat
status_file=var/status.dat
status_update_interval=5
temp_file=tmp/shinken.tmp
temp_path=var/tmp
translate_passive_host_checks=0
use_aggressive_host_checking=0
use_embedded_perl_implicitly=0
use_large_installation_tweaks=0
use_regexp_matching=0
use_retained_program_state=1
use_retained_scheduling_info=1
use_syslog=0
use_true_regexp_matching=0
enable_problem_impacts_states_change=1
no_event_handlers_during_downtimes=1
| 3,486
|
Python
|
.tac
| 119
| 28.294118
| 54
| 0.858331
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,602
|
contacts.cfg
|
shinken-solutions_shinken/test/etc/objects_and_notifways/contacts.cfg
|
define contactgroup{
contactgroup_name test_contact
alias test_contacts_alias
members test_contact
}
define contact{
contact_name test_contact
alias test_contact_alias
service_notification_period 24x7
host_notification_period 24x7
service_notification_options w,u,c,r,f
host_notification_options d,u,r,f,s
service_notification_commands notify-service
host_notification_commands notify-host
email nobody@localhost
can_submit_commands 1
}
# And a contact with notif ways
define contact{
contact_name test_contact_nw
alias test_contact_alias
email nobody@localhost
notificationways email_in_day,sms_the_night
}
#Email the whole 24x7 is ok
define notificationway{
notificationway_name email_in_day
service_notification_period 24x7
host_notification_period 24x7
service_notification_options w,u,c,r,f
host_notification_options d,u,r,f,s
service_notification_commands notify-service
host_notification_commands notify-host
}
define timeperiod{
timeperiod_name night
}
#But SMS only the night
define notificationway{
notificationway_name sms_the_night
service_notification_period night
host_notification_period night
service_notification_options c ; so only CRITICAL
host_notification_options d ; and DOWN
service_notification_commands notify-service
host_notification_commands notify-host
}
| 1,782
|
Python
|
.tac
| 47
| 33.042553
| 62
| 0.621089
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,603
|
contacts.cfg
|
shinken-solutions_shinken/test/etc/dependencies/contacts.cfg
|
define contactgroup{
contactgroup_name test_contact
alias test_contacts_alias
members test_contact
}
define contact{
contact_name test_contact
alias test_contact_alias
service_notification_period 24x7
host_notification_period 24x7
service_notification_options w,u,c,r
host_notification_options d,r
service_notification_commands notify-service
host_notification_commands notify-host
email nobody@localhost
}
| 593
|
Python
|
.tac
| 16
| 33.0625
| 54
| 0.59792
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,604
|
contacts.cfg
|
shinken-solutions_shinken/test/etc/problem_impact/contacts.cfg
|
define contactgroup{
contactgroup_name test_contact
alias test_contacts_alias
members test_contact
}
define contact{
contact_name test_contact
alias test_contact_alias
service_notification_period 24x7
host_notification_period 24x7
service_notification_options w,u,c,r
host_notification_options d,r
service_notification_commands notify-service
host_notification_commands notify-host
email nobody@localhost
min_criticity 5
}
| 619
|
Python
|
.tac
| 17
| 32.294118
| 54
| 0.595674
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,605
|
contacts.cfg
|
shinken-solutions_shinken/test/etc/escalations/contacts.cfg
|
define contactgroup{
contactgroup_name test_contact
alias test_contacts_alias
members test_contact
}
define contact{
contact_name test_contact
alias test_contact_alias
service_notification_period 24x7
host_notification_period 24x7
service_notification_options w,u,c,r,f
host_notification_options d,u,r,f,s
service_notification_commands notify-service
host_notification_commands notify-host
email nobody@localhost
can_submit_commands 1
}
define contact{
contact_name level1
alias level1
service_notification_period 24x7
host_notification_period 24x7
service_notification_options w,u,c,r,f
host_notification_options d,u,r,f,s
service_notification_commands notify-service
host_notification_commands notify-host
email nobody@localhost
can_submit_commands 1
}
define contact{
contact_name level2
alias level2
service_notification_period 24x7
host_notification_period 24x7
service_notification_options w,u,c,r,f
host_notification_options d,u,r,f,s
service_notification_commands notify-service
host_notification_commands notify-host
email nobody@localhost
can_submit_commands 1
}
define contact{
contact_name level3
alias level3
service_notification_period 24x7
host_notification_period 24x7
service_notification_options w,u,c,r,f
host_notification_options d,u,r,f,s
service_notification_commands notify-service
host_notification_commands notify-host
email nobody@localhost
can_submit_commands 1
}
#The first escalation level come from level1 to level2, from nb=2 to 4
define escalation{
escalation_name ToLevel2
first_notification 2
last_notification 4
notification_interval 1
escalation_period 24x7 ;optionnal, if none, always true
escalation_options d,u,r,w,c ;optionnal, if none, all states (d,u,r,w,c)
contacts level2
}
# Then go level3 after >=5
define escalation{
escalation_name ToLevel3
first_notification 5
last_notification 0
notification_interval 1
escalation_period 24x7 ;optionnal, if none, always true
escalation_options d,u,r,w,c ;optionnal, if none, all states (d,u,r,w,c)
contacts level3
}
# Now thesame, but time based
define escalation{
escalation_name ToLevel2-time
first_notification_time 60 ; at 1hour, go here
last_notification_time 120 ; after 2 hours, stop here
notification_interval 1
escalation_period 24x7 ;optionnal, if none, always true
escalation_options d,u,r,w,c ;optionnal, if none, all states (d,u,r,w,c)
contacts level2
}
# Now thesame, but time based
define escalation{
escalation_name ToLevel3-time
first_notification_time 120 ; at 2hours, go here
last_notification_time 0 ; after, still go here
escalation_period 24x7 ;optionnal, if none, always true
escalation_options d,u,r,w,c ;optionnal, if none, all states (d,u,r,w,c)
contacts level3
}
# Now thesame, but time based
define escalation{
escalation_name ToLevel2-shortinterval
first_notification_time 1 ; at 1hour, go here
last_notification_time 120 ; after 2 hours, stop here
notification_interval 2 ; WILL BE EACH 10s (interval_length will be put at 5s
escalation_period 24x7 ;optionnal, if none, always true
escalation_options d,u,r,w,c ;optionnal, if none, all states (d,u,r,w,c)
contacts level2
}
# Now thesame, but time based
define escalation{
escalation_name ToLevel3-shortinterval
first_notification_time 4 ; at 1hour, go here
last_notification_time 120 ; after 2 hours, stop here
notification_interval 1 ; WILL BE EACH 10s (interval_length will be put at 5s
escalation_period 24x7 ;optionnal, if none, always true
escalation_options d,u,r,w,c ;optionnal, if none, all states (d,u,r,w,c)
contacts level3
}
| 4,919
|
Python
|
.tac
| 112
| 38.642857
| 99
| 0.587633
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,606
|
contacts.cfg
|
shinken-solutions_shinken/test/etc/linkify_template/contacts.cfg
|
define contactgroup{
contactgroup_name test_contact
alias test_contacts_alias
members test_contact
}
define contact{
contact_name test_contact
alias test_contact_alias
service_notification_period 24x7
host_notification_period 24x7
service_notification_options w,u,c,r,f
host_notification_options d,u,r,f,s
service_notification_commands notify-service
host_notification_commands notify-host
email nobody@localhost
can_submit_commands 1
}
define contact{
contact_name contact_tpl
alias contact_tpl
service_notification_period 24x7
host_notification_period 24x7
service_notification_options w,u,c,r,f
host_notification_options d,u,r,f,s
service_notification_commands notify-service
host_notification_commands notify-host
email nobody@localhost
can_submit_commands 1
register 0
}
| 1,141
|
Python
|
.tac
| 30
| 33.766667
| 54
| 0.587015
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,607
|
contacts.cfg
|
shinken-solutions_shinken/test/etc/livestatus_authuser/contacts.cfg
|
define contact {
name generic-contact
service_notification_period 24x7
host_notification_period 24x7
service_notification_options w,u,c,r,f
host_notification_options d,u,r,f,s
service_notification_commands notify-service
host_notification_commands notify-host
register 0
}
define contact {
contact_name oradba1
use generic-contact
}
define contact {
contact_name oradba2
use generic-contact
}
define contact {
contact_name mydba1
use generic-contact
}
define contact {
contact_name mydba2
use generic-contact
}
define contact {
contact_name web1
use generic-contact
}
define contact {
contact_name web2
use generic-contact
}
define contact {
contact_name cc1
use generic-contact
}
define contact {
contact_name cc2
use generic-contact
}
define contact {
contact_name cc3
use generic-contact
}
define contact {
contact_name adm1
use generic-contact
}
define contact {
contact_name adm2
use generic-contact
}
define contact {
contact_name adm3
use generic-contact
}
define contact {
contact_name bill
use generic-contact
}
define contact {
contact_name steve
use generic-contact
}
| 1,865
|
Python
|
.tac
| 66
| 25.939394
| 48
| 0.485426
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,608
|
contactgroups.cfg
|
shinken-solutions_shinken/test/etc/livestatus_authuser/contactgroups.cfg
|
define contactgroup {
contactgroup_name oradba
members oradba1,oradba2
}
define contactgroup {
contactgroup_name mydba
members mydba1,mydba2
}
define contactgroup {
contactgroup_name web
members web1,web2
}
define contactgroup {
contactgroup_name cc
members cc1,cc2,cc3
}
define contactgroup {
contactgroup_name adm
members adm1,adm2,adm3
}
define contactgroup {
contactgroup_name winadm
members bill, steve
}
| 656
|
Python
|
.tac
| 24
| 25.083333
| 48
| 0.535144
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,609
|
contacts.cfg
|
shinken-solutions_shinken/test/etc/exclude_include_services/contacts.cfg
|
define contactgroup{
contactgroup_name admins
alias admins_alias
members admin
}
define contact{
contact_name admin
alias admin_alias
service_notification_period 24x7
host_notification_period 24x7
service_notification_options w,u,c,r,f
host_notification_options d,u,r,f,s
service_notification_commands notify-service
host_notification_commands notify-host
email nobody@localhost
can_submit_commands 1
}
| 630
|
Python
|
.tac
| 17
| 32.941176
| 52
| 0.552288
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,610
|
contacts.cfg
|
shinken-solutions_shinken/test/etc/standard/contacts.cfg
|
define contactgroup{
contactgroup_name test_contact
alias test_contacts_alias
members test_contact
}
define contact{
contact_name test_contact
alias test_contact_alias
service_notification_period 24x7
host_notification_period 24x7
service_notification_options w,u,c,r,f
host_notification_options d,u,r,f,s
service_notification_commands notify-service
host_notification_commands notify-host
email nobody@localhost
can_submit_commands 1
contactgroups another_contact_test
}
| 697
|
Python
|
.tac
| 18
| 34.555556
| 56
| 0.587021
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,611
|
contacts.cfg
|
shinken-solutions_shinken/test/etc/notif_macros/contacts.cfg
|
define contactgroup{
contactgroup_name test_contact
alias test_contacts_alias
members test_contact
}
define contact{
contact_name test_contact
alias test_contact_alias
service_notification_period 24x7
host_notification_period 24x7
service_notification_options w,u,c,r,f
host_notification_options d,u,r,f,s
service_notification_commands macros_check!toto
host_notification_commands notify-host
can_submit_commands 1
email monemail@masociete.domain
_TESTC sender@masociete.domain
}
| 679
|
Python
|
.tac
| 18
| 33.555556
| 54
| 0.610606
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,612
|
contacts.cfg
|
shinken-solutions_shinken/test/etc/contactgroup_nomembers/contacts.cfg
|
define contactgroup{
contactgroup_name test_contact
alias test_contacts_alias
members test_contact
}
define contact{
contact_name test_contact
alias test_contact_alias
service_notification_period 24x7
host_notification_period 24x7
service_notification_options w,u,c,r,f
host_notification_options d,u,r,f,s
service_notification_commands notify-service
host_notification_commands notify-host
email nobody@localhost
can_submit_commands 1
}
define contactgroup{
contactgroup_name test_contact_nomember
alias test_contacts_alias_nomember
}
| 772
|
Python
|
.tac
| 21
| 32.761905
| 56
| 0.601604
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,613
|
contacts.cfg
|
shinken-solutions_shinken/test/etc/uknown_event_handler/contacts.cfg
|
define contactgroup{
contactgroup_name test_contact
alias test_contacts_alias
members test_contact
}
define contact{
contact_name test_contact
alias test_contact_alias
service_notification_period 24x7
host_notification_period 24x7
service_notification_options w,u,c,r,f
host_notification_options d,u,r,f,s
service_notification_commands notify-service
host_notification_commands notify-host
email nobody@localhost
can_submit_commands 1
}
| 640
|
Python
|
.tac
| 17
| 33.529412
| 54
| 0.590032
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,614
|
contacts.cfg
|
shinken-solutions_shinken/test/etc/core/contacts.cfg
|
# This is a default admin
# CHANGE ITS PASSWORD!
define contact{
use generic-contact
contact_name admin
email shinken@localhost
pager 0600000000 ; contact phone number
password admin
is_admin 1
}
# This is a default guest user
# CHANGE ITS PASSWORD or remove it
define contact{
use generic-contact
contact_name guest
email guest@localhost
password guest
can_submit_commands 0
}
| 524
|
Python
|
.tac
| 19
| 24.105263
| 55
| 0.603586
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,615
|
contactgroups.cfg
|
shinken-solutions_shinken/test/etc/core/contactgroups.cfg
|
# Create some contact groups
define contactgroup{
contactgroup_name admins
alias admins
members admin
}
define contactgroup{
contactgroup_name users
alias users
members admin
}
| 260
|
Python
|
.tac
| 11
| 20.181818
| 30
| 0.597561
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,616
|
contacts.cfg
|
shinken-solutions_shinken/test/etc/property_override/contacts.cfg
|
define contactgroup{
contactgroup_name admins
alias admins_alias
members admin
}
define contact{
contact_name admin
alias admin_alias
service_notification_period 24x7
host_notification_period 24x7
service_notification_options w,u,c,r,f
host_notification_options d,u,r,f,s
service_notification_commands notify-service
host_notification_commands notify-host
email nobody@localhost
can_submit_commands 1
}
| 630
|
Python
|
.tac
| 17
| 32.941176
| 52
| 0.552288
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,617
|
contacts.cfg
|
shinken-solutions_shinken/test/etc/contactgroups_plus_inheritance/contacts.cfg
|
define contactgroup{
contactgroup_name test_contact_1
alias test_contacts_alias_1
members test_contact_1
}
define contactgroup{
contactgroup_name test_contact_2
alias test_contacts_alias_2
members test_contact_2
}
define contact{
contact_name test_contact_1
alias test_contact_alias_1
service_notification_period 24x7
host_notification_period 24x7
service_notification_options w,u,c,r,f
host_notification_options d,u,r,f,s
service_notification_commands notify-service
host_notification_commands notify-host
email nobody@localhost
can_submit_commands 1
}
define contact{
contact_name test_contact_2
alias test_contact_alias_2
service_notification_period 24x7
host_notification_period 24x7
service_notification_options w,u,c,r,f
host_notification_options d,u,r,f,s
service_notification_commands notify-service
host_notification_commands notify-host
email nobody@localhost
can_submit_commands 1
}
| 1,305
|
Python
|
.tac
| 34
| 34.117647
| 56
| 0.588608
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,618
|
contacts.cfg
|
shinken-solutions_shinken/test/etc/notif_way/contacts.cfg
|
define contactgroup{
contactgroup_name test_contact
alias test_contacts_alias
members test_contact
}
define contactgroup{
contactgroup_name test_contact_template
alias test_contacts_template
members test_contact_template_1,test_contact_template_2
}
define contact{
name contact_template
host_notifications_enabled 1
service_notifications_enabled 1
email shinken@localhost
notificationways email_in_work
can_submit_commands 1
register 0
}
define contact{
contact_name test_contact
alias test_contact_alias
email nobody@localhost
can_submit_commands 1
notificationways email_in_day,sms_the_night
}
define contact{
contact_name test_contact_simple
alias test_contact_simple
service_notification_period 24x7
host_notification_period 24x7
#no w here, for tests
service_notification_options u,c,r,f
#and no flapping hehe
host_notification_options d,u,r,s
service_notification_commands notify-service
host_notification_commands notify-host
email nobody@localhost
can_submit_commands 1
}
define contact{
use contact_template
contact_name test_contact_template_1
alias test_contact_alias_3
service_notification_period 24x7
host_notification_period 24x7
service_notification_options w,u,c,r,f
host_notification_options d,u,r,f,s
service_notification_commands notify-service
host_notification_commands notify-host
email nobody@localhost
can_submit_commands 1
}
define contact{
use contact_template
contact_name test_contact_template_2
alias test_contact_alias_4
service_notification_period 24x7
host_notification_period 24x7
service_notification_options w,u,c,r,f
host_notification_options d,u,r,f,s
service_notification_commands notify-service-sms
host_notification_commands notify-host-sms
email nobody@localhost
can_submit_commands 1
}
#EMail the whole 24x7 is ok
define notificationway{
notificationway_name email_in_day
service_notification_period 24x7
host_notification_period 24x7
service_notification_options w,u,c,r,f
host_notification_options d,u,r,f,s
service_notification_commands notify-service
host_notification_commands notify-host
}
#But SMS only the night
define notificationway{
notificationway_name sms_the_night
service_notification_period night
host_notification_period night
service_notification_options c
host_notification_options d
service_notification_commands notify-service-sms
host_notification_commands notify-host-sms
min_criticity 5
}
define notificationway{
notificationway_name email_in_work
service_notification_period work
host_notification_period work
service_notification_options w,u,c,r,f
host_notification_options d,u,r,f,s
service_notification_commands notify-service-work
host_notification_commands notify-host-work
}
| 3,798
|
Python
|
.tac
| 96
| 34.46875
| 75
| 0.602005
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,619
|
contacts.cfg
|
shinken-solutions_shinken/test/etc/notif_too_much/contacts.cfg
|
define contactgroup{
contactgroup_name test_contact
alias test_contacts_alias
members test_contact
}
define contact{
contact_name test_contact
alias test_contact_alias
email nobody@localhost
notificationways email_in_day,never
}
define notificationway{
notificationway_name email_in_day
service_notification_period 24x7
host_notification_period 24x7
service_notification_options w,u,c,r,f
host_notification_options d,u,r,f,s
service_notification_commands notify-service
host_notification_commands notify-host
}
define notificationway{
notificationway_name never
service_notification_period none
host_notification_period none
service_notification_options w,u,c,r,f
host_notification_options d,u,r,f,s
service_notification_commands notify-service2
host_notification_commands notify-host
}
| 1,118
|
Python
|
.tac
| 29
| 32.965517
| 54
| 0.613678
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,620
|
contactdowntime.py
|
shinken-solutions_shinken/shinken/contactdowntime.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import time
from shinken.log import logger
""" TODO: Add some comment about this class for the doc"""
class ContactDowntime(object):
id = 1
# Just to list the properties we will send as pickle
# so to others daemons, so all but NOT REF
properties = {
# 'activate_me': None,
# 'entry_time': None,
# 'fixed': None,
'start_time': None,
# 'duration': None,
# 'trigger_id': None,
'end_time': None,
# 'real_end_time': None,
'author': None,
'comment': None,
'is_in_effect': None,
# 'has_been_triggered': None,
'can_be_deleted': None,
}
# Schedule a contact downtime. It's far more easy than a host/service
# one because we got a beginning, and an end. That's all for running.
# got also an author and a comment for logging purpose.
def __init__(self, ref, start_time, end_time, author, comment):
self.id = self.__class__.id
self.__class__.id += 1
self.ref = ref # pointer to srv or host we are apply
self.start_time = start_time
self.end_time = end_time
self.author = author
self.comment = comment
self.is_in_effect = False
self.can_be_deleted = False
# self.add_automatic_comment()
# Check if we came into the activation of this downtime
def check_activation(self):
now = time.time()
was_is_in_effect = self.is_in_effect
self.is_in_effect = (self.start_time <= now <= self.end_time)
logger.debug("CHECK ACTIVATION:%s", self.is_in_effect)
# Raise a log entry when we get in the downtime
if not was_is_in_effect and self.is_in_effect:
self.enter()
# Same for exit purpose
if was_is_in_effect and not self.is_in_effect:
self.exit()
def in_scheduled_downtime(self):
return self.is_in_effect
# The referenced host/service object enters now a (or another) scheduled
# downtime. Write a log message only if it was not already in a downtime
def enter(self):
self.ref.raise_enter_downtime_log_entry()
# The end of the downtime was reached.
def exit(self):
self.ref.raise_exit_downtime_log_entry()
self.can_be_deleted = True
# A scheduled downtime was prematurely canceled
def cancel(self):
self.is_in_effect = False
self.ref.raise_cancel_downtime_log_entry()
self.can_be_deleted = True
def __getstate__(self):
# print("Asking a getstate for a downtime on", self.ref.get_dbg_name())
cls = self.__class__
# id is not in *_properties
res = [self.id]
for prop in cls.properties:
res.append(getattr(self, prop))
# We reverse because we want to recreate
# By check at properties in the same order
res.reverse()
return res
# Inverted function of getstate
def __setstate__(self, state):
cls = self.__class__
self.id = state.pop()
for prop in cls.properties:
val = state.pop()
setattr(self, prop, val)
if self.id >= cls.id:
cls.id = self.id + 1
| 4,248
|
Python
|
.tac
| 106
| 33.556604
| 82
| 0.63903
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,621
|
contact.py
|
shinken-solutions_shinken/shinken/objects/contact.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.objects.item import Item, Items
from shinken.util import strip_and_uniq
from shinken.property import BoolProp, IntegerProp, StringProp, ListProp
from shinken.log import logger, naglog_result
_special_properties = (
'service_notification_commands', 'host_notification_commands',
'service_notification_period', 'host_notification_period',
'service_notification_options', 'host_notification_options',
'host_notification_commands', 'contact_name'
)
_simple_way_parameters = (
'service_notification_period', 'host_notification_period',
'service_notification_options', 'host_notification_options',
'service_notification_commands', 'host_notification_commands',
'min_business_impact'
)
class Contact(Item):
id = 1 # zero is always special in database, so we do not take risk here
my_type = 'contact'
properties = Item.properties.copy()
properties.update({
'contact_name': StringProp(fill_brok=['full_status']),
'alias': StringProp(default='none', fill_brok=['full_status']),
'contactgroups': ListProp(default=[], fill_brok=['full_status']),
'host_notifications_enabled': BoolProp(default=True, fill_brok=['full_status']),
'service_notifications_enabled': BoolProp(default=True, fill_brok=['full_status']),
'host_notification_period': StringProp(fill_brok=['full_status']),
'service_notification_period': StringProp(fill_brok=['full_status']),
'host_notification_options': ListProp(default=[''], fill_brok=['full_status'],
split_on_coma=True),
'service_notification_options': ListProp(default=[''], fill_brok=['full_status'],
split_on_coma=True),
# To be consistent with notificationway object attributes
'host_notification_commands': ListProp(fill_brok=['full_status']),
'service_notification_commands': ListProp(fill_brok=['full_status']),
'min_business_impact': IntegerProp(default=0, fill_brok=['full_status']),
'email': StringProp(default='none', fill_brok=['full_status']),
'pager': StringProp(default='none', fill_brok=['full_status']),
'address1': StringProp(default='none', fill_brok=['full_status']),
'address2': StringProp(default='none', fill_brok=['full_status']),
'address3': StringProp(default='none', fill_brok=['full_status']),
'address4': StringProp(default='none', fill_brok=['full_status']),
'address5': StringProp(default='none', fill_brok=['full_status']),
'address6': StringProp(default='none', fill_brok=['full_status']),
'can_submit_commands': BoolProp(default=False, fill_brok=['full_status']),
'is_admin': BoolProp(default=False, fill_brok=['full_status']),
'expert': BoolProp(default=False, fill_brok=['full_status']),
'retain_status_information': BoolProp(default=True, fill_brok=['full_status']),
'notificationways': ListProp(default=[], fill_brok=['full_status']),
'password': StringProp(default='NOPASSWORDSET', fill_brok=['full_status']),
})
running_properties = Item.running_properties.copy()
running_properties.update({
'modified_attributes': IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'downtimes': StringProp(default=[], fill_brok=['full_status'], retention=True),
})
# This tab is used to transform old parameters name into new ones
# so from Nagios2 format, to Nagios3 ones.
# Or Shinken deprecated names like criticity
old_properties = {
'min_criticity': 'min_business_impact',
}
macros = {
'CONTACTNAME': 'contact_name',
'CONTACTALIAS': 'alias',
'CONTACTEMAIL': 'email',
'CONTACTPAGER': 'pager',
'CONTACTADDRESS1': 'address1',
'CONTACTADDRESS2': 'address2',
'CONTACTADDRESS3': 'address3',
'CONTACTADDRESS4': 'address4',
'CONTACTADDRESS5': 'address5',
'CONTACTADDRESS6': 'address6',
'CONTACTGROUPNAME': 'get_groupname',
'CONTACTGROUPNAMES': 'get_groupnames'
}
# For debugging purpose only (nice name)
def get_name(self):
try:
return self.contact_name
except AttributeError:
return 'UnnamedContact'
# Search for notification_options with state and if t is
# in service_notification_period
def want_service_notification(self, t, state, type, business_impact, cmd=None):
if not self.service_notifications_enabled:
return False
# If we are in downtime, we do nto want notification
for dt in self.downtimes:
if dt.is_in_effect:
return False
# Now the rest is for sub notificationways. If one is OK, we are ok
# We will filter in another phase
for nw in self.notificationways:
nw_b = nw.want_service_notification(t, state, type, business_impact, cmd)
if nw_b:
return True
# Oh... no one is ok for it? so no, sorry
return False
# Search for notification_options with state and if t is in
# host_notification_period
def want_host_notification(self, t, state, type, business_impact, cmd=None):
if not self.host_notifications_enabled:
return False
# If we are in downtime, we do nto want notification
for dt in self.downtimes:
if dt.is_in_effect:
return False
# Now it's all for sub notificationways. If one is OK, we are OK
# We will filter in another phase
for nw in self.notificationways:
nw_b = nw.want_host_notification(t, state, type, business_impact, cmd)
if nw_b:
return True
# Oh, nobody..so NO :)
return False
# Call to get our commands to launch a Notification
def get_notification_commands(self, type):
r = []
# service_notification_commands for service
notif_commands_prop = type + '_notification_commands'
for nw in self.notificationways:
r.extend(getattr(nw, notif_commands_prop))
return r
# Check is required prop are set:
# contacts OR contactgroups is need
def is_correct(self):
state = True
cls = self.__class__
# All of the above are checks in the notificationways part
for prop, entry in cls.properties.items():
if prop not in _special_properties:
if not hasattr(self, prop) and entry.required:
logger.error("[contact::%s] %s property not set", self.get_name(), prop)
state = False # Bad boy...
# There is a case where there is no nw: when there is not special_prop defined
# at all!!
if self.notificationways == []:
for p in _special_properties:
if not hasattr(self, p):
logger.error("[contact::%s] %s property is missing", self.get_name(), p)
state = False
if hasattr(self, 'contact_name'):
for c in cls.illegal_object_name_chars:
if c in self.contact_name:
logger.error("[contact::%s] %s character not allowed in contact_name",
self.get_name(), c)
state = False
else:
if hasattr(self, 'alias'): # take the alias if we miss the contact_name
self.contact_name = self.alias
return state
# Raise a log entry when a downtime begins
# CONTACT DOWNTIME ALERT:
# test_contact;STARTED; Contact has entered a period of scheduled downtime
def raise_enter_downtime_log_entry(self):
naglog_result('info', "CONTACT DOWNTIME ALERT: %s;STARTED; Contact has "
"entered a period of scheduled downtime" % self.get_name())
# Raise a log entry when a downtime has finished
# CONTACT DOWNTIME ALERT:
# test_contact;STOPPED; Contact has exited from a period of scheduled downtime
def raise_exit_downtime_log_entry(self):
naglog_result('info', "CONTACT DOWNTIME ALERT: %s;STOPPED; Contact has "
"exited from a period of scheduled downtime" % self.get_name())
# Raise a log entry when a downtime prematurely ends
# CONTACT DOWNTIME ALERT:
# test_contact;CANCELLED; Contact has entered a period of scheduled downtime
def raise_cancel_downtime_log_entry(self):
naglog_result('info', "CONTACT DOWNTIME ALERT: %s;CANCELLED; Scheduled "
"downtime for contact has been cancelled." % self.get_name())
class Contacts(Items):
name_property = "contact_name"
inner_class = Contact
def linkify(self, timeperiods, commands, notificationways):
# self.linkify_with_timeperiods(timeperiods, 'service_notification_period')
# self.linkify_with_timeperiods(timeperiods, 'host_notification_period')
# self.linkify_command_list_with_commands(commands, 'service_notification_commands')
# self.linkify_command_list_with_commands(commands, 'host_notification_commands')
self.linkify_with_notificationways(notificationways)
# We've got a notificationways property with , separated contacts names
# and we want have a list of NotificationWay
def linkify_with_notificationways(self, notificationways):
for i in self:
if not hasattr(i, 'notificationways'):
continue
new_notificationways = []
for nw_name in strip_and_uniq(i.notificationways):
nw = notificationways.find_by_name(nw_name)
if nw is not None:
new_notificationways.append(nw)
else:
err = "The 'notificationways' of the %s '%s' named '%s' is unknown!" %\
(i.__class__.my_type, i.get_name(), nw_name)
i.configuration_errors.append(err)
# Get the list, but first make elements uniq
i.notificationways = list(set(new_notificationways))
def late_linkify_c_by_commands(self, commands):
for i in self:
for nw in i.notificationways:
nw.late_linkify_nw_by_commands(commands)
# We look for contacts property in contacts and
def explode(self, contactgroups, notificationways):
# Contactgroups property need to be fullfill for got the informations
self.apply_partial_inheritance('contactgroups')
# _special properties maybe came from a template, so
# import them before grok ourselves
for prop in _special_properties:
if prop == 'contact_name':
continue
self.apply_partial_inheritance(prop)
# Register ourself into the contactsgroups we are in
for c in self:
if not (hasattr(c, 'contact_name') and hasattr(c, 'contactgroups')):
continue
for cg in c.contactgroups:
contactgroups.add_member(c.contact_name, cg.strip())
# Now create a notification way with the simple parameter of the
# contacts
for c in self:
need_notificationway = False
params = {}
for p in _simple_way_parameters:
if hasattr(c, p):
need_notificationway = True
params[p] = getattr(c, p)
else: # put a default text value
# Remove the value and put a default value
setattr(c, p, c.properties[p].default)
if need_notificationway:
# print("Create notif way with", params)
cname = getattr(c, 'contact_name', getattr(c, 'alias', ''))
nw_name = cname + '_inner_notificationway'
notificationways.new_inner_member(nw_name, params)
if not hasattr(c, 'notificationways'):
c.notificationways = [nw_name]
else:
c.notificationways = list(c.notificationways)
c.notificationways.append(nw_name)
| 13,347
|
Python
|
.tac
| 261
| 41.321839
| 97
| 0.6312
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,622
|
contactgroup.py
|
shinken-solutions_shinken/shinken/objects/contactgroup.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# Contactgroups are groups for contacts
# They are just used for the config read and explode by elements
from __future__ import absolute_import, division, print_function, unicode_literals
from shinken.objects.itemgroup import Itemgroup, Itemgroups
from shinken.property import IntegerProp, StringProp
from shinken.log import logger
class Contactgroup(Itemgroup):
id = 1
my_type = 'contactgroup'
properties = Itemgroup.properties.copy()
properties.update({
'id': IntegerProp(default=0, fill_brok=['full_status']),
'contactgroup_name': StringProp(fill_brok=['full_status']),
'contactgroup_members': StringProp(fill_brok=['full_status']),
'alias': StringProp(fill_brok=['full_status']),
})
macros = {
'CONTACTGROUPALIAS': 'alias',
'CONTACTGROUPMEMBERS': 'get_members'
}
def get_contacts(self):
if getattr(self, 'members', None) is not None:
return [m for m in self.members]
else:
return []
def get_name(self):
return getattr(self, 'contactgroup_name', 'UNNAMED-CONTACTGROUP')
def get_contactgroup_members(self):
if self.has('contactgroup_members'):
return [m.strip() for m in self.contactgroup_members.split(',')]
else:
return []
# We fillfull properties with template ones if need
# Because hostgroup we call may not have it's members
# we call get_hosts_by_explosion on it
def get_contacts_by_explosion(self, contactgroups):
# First we tag the hg so it will not be explode
# if a son of it already call it
self.already_explode = True
# Now the recursive part
# rec_tag is set to False every CG we explode
# so if True here, it must be a loop in HG
# calls... not GOOD!
if self.rec_tag:
logger.error("[contactgroup::%s] got a loop in contactgroup definition",
self.get_name())
if self.has('members'):
return self.members
else:
return ''
# Ok, not a loop, we tag it and continue
self.rec_tag = True
cg_mbrs = self.get_contactgroup_members()
for cg_mbr in cg_mbrs:
cg = contactgroups.find_by_name(cg_mbr.strip())
if cg is not None:
value = cg.get_contacts_by_explosion(contactgroups)
if value is not None:
self.add_string_member(value)
if self.has('members'):
return self.members
else:
return ''
class Contactgroups(Itemgroups):
name_property = "contactgroup_name" # is used for finding contactgroup
inner_class = Contactgroup
def get_members_by_name(self, cgname):
cg = self.find_by_name(cgname)
if cg is None:
return []
return cg.get_contacts()
def add_contactgroup(self, cg):
self.add_item(cg)
def linkify(self, contacts):
self.linkify_cg_by_cont(contacts)
# We just search for each host the id of the host
# and replace the name by the id
def linkify_cg_by_cont(self, contacts):
for cg in self:
mbrs = cg.get_contacts()
# The new member list, in id
new_mbrs = []
for mbr in mbrs:
mbr = mbr.strip() # protect with strip at the begining so don't care about spaces
if mbr == '': # void entry, skip this
continue
m = contacts.find_by_name(mbr)
# Maybe the contact is missing, if so, must be put in unknown_members
if m is not None:
new_mbrs.append(m)
else:
cg.add_string_unknown_member(mbr)
# Make members uniq
new_mbrs = list(set(new_mbrs))
# We find the id, we replace the names
cg.replace_members(new_mbrs)
# Add a contact string to a contact member
# if the contact group do not exist, create it
def add_member(self, cname, cgname):
cg = self.find_by_name(cgname)
# if the id do not exist, create the cg
if cg is None:
cg = Contactgroup({'contactgroup_name': cgname, 'alias': cgname, 'members': cname})
self.add_contactgroup(cg)
else:
cg.add_string_member(cname)
# Use to fill members with contactgroup_members
def explode(self):
# We do not want a same hg to be explode again and again
# so we tag it
for tmp_cg in self.items.values():
tmp_cg.already_explode = False
for cg in self.items.values():
if cg.has('contactgroup_members') and not cg.already_explode:
# get_contacts_by_explosion is a recursive
# function, so we must tag hg so we do not loop
for tmp_cg in self.items.values():
tmp_cg.rec_tag = False
cg.get_contacts_by_explosion(self)
# We clean the tags
for tmp_cg in self.items.values():
if hasattr(tmp_cg, 'rec_tag'):
del tmp_cg.rec_tag
del tmp_cg.already_explode
| 6,188
|
Python
|
.tac
| 146
| 33.472603
| 98
| 0.616933
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,623
|
generic-contact.cfg
|
shinken-solutions_shinken/etc/templates/generic-contact.cfg
|
# Contact definition
# By default the contact will ask notification by mails
define contact{
name generic-contact
host_notifications_enabled 1
service_notifications_enabled 1
email shinken@localhost
can_submit_commands 1
notificationways email
register 0
}
| 341
|
Python
|
.tac
| 11
| 28
| 55
| 0.651515
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,624
|
contactgroup.rst
|
shinken-solutions_shinken/doc/source/08_configobjects/contactgroup.rst
|
.. _configobjects/contactgroup:
========================
Contact Group Definition
========================
Description
============
A contact group definition is used to group one or more :ref:`contacts <configobjects/contact>` together for the purpose of sending out alert/recovery :ref:`notifications <thebasics/notifications>`.
Definition Format:
===================
Bold directives are required, while the others are optional.
===================== =======================
define contactgroup{
**contactgroup_name** **contactgroup_name**
**alias** **alias**
members *contacts*
contactgroup_members *contactgroups*
}
===================== =======================
Example Definition:
====================
::
define contactgroup{
contactgroup_name novell-admins
alias Novell Administrators
members jdoe,rtobert,tzach
}
Directive Descriptions:
========================
contactgroup_name
This directive is a short name used to identify the contact group.
alias
This directive is used to define a longer name or description used to identify the contact group.
members
This directive is used to define a list of the *short names* of :ref:`contacts <configobjects/contact>` that should be included in this group. Multiple contact names should be separated by commas. This directive may be used as an alternative to (or in addition to) using the *contactgroups* directive in :ref:`contact <configobjects/contact>` definitions.
contactgroup_members
This optional directive can be used to include contacts from other "sub" contact groups in this contact group. Specify a comma-delimited list of short names of other contact groups whose members should be included in this group.
| 1,881
|
Python
|
.tac
| 36
| 47.305556
| 357
| 0.642036
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,625
|
contact.rst
|
shinken-solutions_shinken/doc/source/08_configobjects/contact.rst
|
.. _configobjects/contact:
===================
Contact Definition
===================
Description
============
A contact definition is used to identify someone who should be contacted in the event of a problem on your network. The different arguments to a contact definition are described below.
Definition Format
==================
Bold directives are required, while the others are optional.
================================= =====================================
define contact{
**contact_name** ***contact_name***
alias *alias*
contactgroups *contactgroup_names*
**host_notifications_enabled** **[0/1]**
**service_notifications_enabled** **[0/1]**
**host_notification_period** ***timeperiod_name***
**service_notification_period** ***timeperiod_name***
**host_notification_options** **[d,u,r,f,s,n]**
**service_notification_options** **[w,u,c,r,f,s,n]**
**host_notification_commands** ***command_name***
**service_notification_commands** ***command_name***
email *email_address*
pager *pager_number or pager_email_gateway*
address*x* *additional_contact_address*
can_submit_commands [0/1]
is_admin [0/1]
retain_status_information [0/1]
retain_nonstatus_information [0/1]
min_business_impact [0/1/2/3/4/5]
}
================================= =====================================
Example Definition
===================
::
define contact{
contact_name jdoe
alias John Doe
host_notifications_enabled 1
service_notifications_enabled 1
service_notification_period 24x7
host_notification_period 24x7
service_notification_options w,u,c,r
host_notification_options d,u,r
service_notification_commands notify-service-by-email
host_notification_commands notify-host-by-email
email jdoe@localhost.localdomain
pager 555-5555@pagergateway.localhost.localdomain
address1 xxxxx.xyyy@icq.com
address2 555-555-5555
can_submit_commands 1
}
Directive Descriptions
=======================
contact_name
This directive is used to define a short name used to identify the contact. It is referenced in :ref:`contact group <configobjects/contactgroup>` definitions. Under the right circumstances, the $CONTACTNAME$ :ref:`macro <thebasics/macros>` will contain this value.
alias
This directive is used to define a longer name or description for the contact. Under the rights circumstances, the $CONTACTALIAS$ :ref:`macro <thebasics/macros>` will contain this value. If not specified, the *contact_name* will be used as the alias.
contactgroups
This directive is used to identify the *short name(s)* of the :ref:`contactgroup(s) <configobjects/contactgroup>` that the contact belongs to. Multiple contactgroups should be separated by commas. This directive may be used as an alternative to (or in addition to) using the *members* directive in :ref:`contactgroup <configobjects/contactgroup>` definitions.
host_notifications_enabled
This directive is used to determine whether or not the contact will receive notifications about host problems and recoveries. Values :
* 0 = don't send notifications
* 1 = send notifications
service_notifications_enabled
This directive is used to determine whether or not the contact will receive notifications about service problems and recoveries. Values:
* 0 = don't send notifications
* 1 = send notifications
host_notification_period
This directive is used to specify the short name of the :ref:`time period <configobjects/timeperiod>` during which the contact can be notified about host problems or recoveries. You can think of this as an “on call" time for host notifications for the contact. Read the documentation on :ref:`time periods <thebasics/timeperiods>` for more information on how this works and potential problems that may result from improper use.
service_notification_period
This directive is used to specify the short name of the :ref:`time period <configobjects/timeperiod>` during which the contact can be notified about service problems or recoveries. You can think of this as an “on call" time for service notifications for the contact. Read the documentation on :ref:`time periods <thebasics/timeperiods>` for more information on how this works and potential problems that may result from improper use.
host_notification_commands
This directive is used to define a list of the *short names* of the :ref:`commands <configobjects/command>` used to notify the contact of a *host* problem or recovery. Multiple notification commands should be separated by commas. All notification commands are executed when the contact needs to be notified. The maximum amount of time that a notification command can run is controlled by the :ref:`notification_timeout <configuration/configmain-advanced#notification_timeout>` option.
host_notification_options
This directive is used to define the host states for which notifications can be sent out to this contact. Valid options are a combination of one or more of the following:
* d = notify on DOWN host states
* u = notify on UNREACHABLE host states
* r = notify on host recoveries (UP states)
* f = notify when the host starts and stops :ref:`flapping <advanced/flapping>`,
* s = send notifications when host or service :ref:`scheduled downtime <advanced/downtime>` starts and ends. If you specify **n** (none) as an option, the contact will not receive any type of host notifications.
service_notification_options
This directive is used to define the service states for which notifications can be sent out to this contact. Valid options are a combination of one or more of the following:
* w = notify on WARNING service states
* u = notify on UNKNOWN service states
* c = notify on CRITICAL service states
* r = notify on service recoveries (OK states)
* f = notify when the service starts and stops :ref:`flapping <advanced/flapping>`.
* n = (none) : the contact will not receive any type of service notifications.
service_notification_commands
This directive is used to define a list of the *short names* of the :ref:`commands <configobjects/command>` used to notify the contact of a *service* problem or recovery. Multiple notification commands should be separated by commas. All notification commands are executed when the contact needs to be notified. The maximum amount of time that a notification command can run is controlled by the :ref:`notification_timeout <configuration/configmain-advanced#notification_timeout>` option.
email
This directive is used to define an email address for the contact. Depending on how you configure your notification commands, it can be used to sendout an alert email to the contact. Under the right circumstances, the $CONTACTEMAIL$ :ref:`macro <thebasics/macros>` will contain this value.
pager
This directive is used to define a pager number for the contact. It can also be an email address to a pager gateway (i.e. *pagejoe@pagenet.com* ). Depending on how you configure your notification commands, it can be used to send out an alert page to the contact. Under the right circumstances, the $CONTACTPAGER$ :ref:`macro <thebasics/macros>` will contain this value.
address*x*
Address directives are used to define additional “addresses" for the contact. These addresses can be anything - cell phone numbers, instant messaging addresses, etc. Depending on how you configure your notification commands, they can be used to send out an alert o the contact. Up to six addresses can be defined using these directives (*address1* through *address6*). The $CONTACTADDRESS*x*$ :ref:`macro <thebasics/macros>` will contain this value.
can_submit_commands
This directive is used to determine whether or not the contact can submit :ref:`external commands <advanced/extcommands>` to Shinken from the WebUI. Values:
* 0 = don't allow contact to submit commands
* 1 = allow contact to submit commands.
is_admin
This directive is used to determine whether or not the contact can see all object in :ref:`WebUI <integration/webui>`. Values:
* 0 = normal user, can see all objects he is in contact
* 1 = allow contact to see all objects
retain_status_information
This directive is used to determine whether or not status-related information about the contact is retained across program restarts. This is only useful if you have enabled state retention using the :ref:`retain_state_information <configuration/configmain-advanced#retain_state_information>` directive. Value :
* 0 = disable status information retention
* 1 = enable status information retention.
retain_nonstatus_information
This directive is used to determine whether or not non-status information about the contact is retained across program restarts. This is only useful if you have enabled state retention using the :ref:`retain_state_information <configuration/configmain-advanced#retain_state_information>` directive. Value :
* 0 = disable non-status information retention
* 1 = enable non-status information retention
min_business_impact
This directive is use to define the minimum business criticity level of a service/host the contact will be notified. Please see :ref:`root_problems_and_impacts <architecture/problems-and-impacts>` for more details.
* 0 = less important
* 1 = more important than 0
* 2 = more important than 1
* 3 = more important than 2
* 4 = more important than 3
* 5 = most important
| 9,889
|
Python
|
.tac
| 122
| 77.614754
| 489
| 0.727076
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,626
|
contactgroups.cfg
|
shinken-solutions_shinken/for_fedora/etc/contactgroups.cfg
|
# Create some contact groups
define contactgroup{
contactgroup_name admins
alias admins
members nagiosadmin
}
| 119
|
Python
|
.tac
| 6
| 17.333333
| 28
| 0.828829
|
shinken-solutions/shinken
| 1,133
| 337
| 226
|
AGPL-3.0
|
9/5/2024, 5:09:53 PM (Europe/Amsterdam)
|
6,627
|
dnsrecon.py
|
darkoperator_dnsrecon/dnsrecon.py
|
#!/usr/bin/env python3
# Note: This script runs dnsrecon
from dnsrecon import __main__
if __name__ == '__main__':
__main__.main()
| 136
|
Python
|
.py
| 5
| 25
| 33
| 0.635659
|
darkoperator/dnsrecon
| 2,582
| 529
| 13
|
GPL-2.0
|
9/5/2024, 5:10:01 PM (Europe/Amsterdam)
|
6,628
|
parser.py
|
darkoperator_dnsrecon/tools/parser.py
|
#!/usr/bin/env python3
# DNSRecon Data Parser
#
# Copyright (C) 2012 Carlos Perez
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; Applies version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__version__ = '0.0.7'
__author__ = 'Carlos Perez, Carlos_Perez@darkoperator.com'
import csv
import getopt
import os
import re
import sys
import xml.etree.ElementTree as cElementTree
from netaddr import *
# Function Definitions
# ------------------------------------------------------------------------------
def print_status(message=''):
print(f'\033[1;34m[*]\033[1;m {message}')
def print_good(message=''):
print(f'\033[1;32m[*]\033[1;m {message}')
def print_error(message=''):
print(f'\033[1;31m[-]\033[1;m {message}')
def print_debug(message=''):
print(f'\033[1;31m[!]\033[1;m {message}')
def print_line(message=''):
print(f'{message}')
def process_range(arg):
"""
This function will take a string representation of a range for IPv4 or IPv6 in
CIDR or Range format and return a list of IPs.
"""
try:
ip_list = None
range_vals = []
if re.match(r'\S*/\S*', arg):
ip_list = IPNetwork(arg)
range_vals.extend(arg.split('-'))
if len(range_vals) == 2:
ip_list = IPNetwork(IPRange(range_vals[0], range_vals[1])).cidrs()[-1]
except Exception:
print_error(f'Range provided is not valid: {arg()}')
return []
return ip_list
def xml_parse(xm_file, ifilter, tfilter, nfilter, list):
"""
Function for parsing XML files created by DNSRecon and apply filters.
"""
iplist = []
for event, elem in cElementTree.iterparse(xm_file):
# Check if it is a record
if elem.tag == 'record':
# Check that it is a RR Type that has an IP Address
if 'address' in elem.attrib:
# Check if the IP is in the filter list of IPs to ignore
if (len(ifilter) == 0 or IPAddress(elem.attrib['address']) in ifilter) and (elem.attrib['address'] != 'no_ip'):
# Check if the RR Type against the types
if re.match(tfilter, elem.attrib['type'], re.I):
# Process A, AAAA and PTR Records
if re.search(r'PTR|^[A]$|AAAA', elem.attrib['type']) and re.search(nfilter, elem.attrib['name'], re.I):
if list:
if elem.attrib['address'] not in iplist:
print(elem.attrib['address'])
else:
print_good(f"{elem.attrib['type']} {elem.attrib['name']} {elem.attrib['address']}")
# Process NS Records
elif re.search(r'NS', elem.attrib['type']) and re.search(nfilter, elem.attrib['target'], re.I):
if list:
if elem.attrib['address'] not in iplist:
iplist.append(elem.attrib['address'])
else:
print_good(f"{elem.attrib['type']} {elem.attrib['target']} {elem.attrib['address']}")
# Process SOA Records
elif re.search(r'SOA', elem.attrib['type']) and re.search(nfilter, elem.attrib['mname'], re.I):
if list:
if elem.attrib['address'] not in iplist:
iplist.append(elem.attrib['address'])
else:
print_good(f"{elem.attrib['type']} {elem.attrib['mname']} {elem.attrib['address']}")
# Process MS Records
elif re.search(r'MX', elem.attrib['type']) and re.search(nfilter, elem.attrib['exchange'], re.I):
if list:
if elem.attrib['address'] not in iplist:
iplist.append(elem.attrib['address'])
else:
print_good(f"{elem.attrib['type']} {elem.attrib['exchange']} {elem.attrib['address']}")
# Process SRV Records
elif re.search(r'SRV', elem.attrib['type']) and re.search(nfilter, elem.attrib['target'], re.I):
if list:
if elem.attrib['address'] not in iplist:
iplist.append(elem.attrib['address'])
else:
print_good(
'{0} {1} {2} {3} {4}'.format(
elem.attrib['type'],
elem.attrib['name'],
elem.attrib['address'],
elem.attrib['target'],
elem.attrib['port'],
)
)
else:
if re.match(tfilter, elem.attrib['type'], re.I):
# Process TXT and SPF Records
if re.search(r'TXT|SPF', elem.attrib['type']):
if not list:
print_good('{0} {1}'.format(elem.attrib['type'], elem.attrib['strings']))
# Process IPs in a list
if len(iplist) > 0:
try:
for ip in filter(None, iplist):
print_line(ip)
except OSError:
sys.exit(0)
def csv_parse(csv_file, ifilter, tfilter, nfilter, list):
"""
Function for parsing CSV files created by DNSRecon and apply filters.
"""
iplist = []
reader = csv.reader(open(csv_file), delimiter=',')
next(reader)
for row in reader:
# Check if IP is in the filter list of addresses to ignore
if ((len(ifilter) == 0) or (IPAddress(row[2]) in ifilter)) and (row[2] != 'no_ip'):
# Check Host Name regex and type list
if re.search(tfilter, row[0], re.I) and re.search(nfilter, row[1], re.I):
if list:
if row[2] not in iplist:
print(row[2])
else:
print_good(' '.join(row))
# Process IPs for target list if available
# if len(iplist) > 0:
# for ip in filter(None, iplist):
# print_line(ip)
def extract_hostnames(file):
host_names = []
hostname_pattern = re.compile('(^[^.]*)')
file_type = detect_type(file)
if file_type == 'xml':
for event, elem in cElementTree.iterparse(file):
# Check if it is a record
if elem.tag == 'record':
# Check that it is a RR Type that has an IP Address
if 'address' in elem.attrib:
# Process A, AAAA and PTR Records
if re.search(r'PTR|^[A]$|AAAA', elem.attrib['type']):
host_names.append(re.search(hostname_pattern, elem.attrib['name']).group(1))
# Process NS Records
elif re.search(r'NS', elem.attrib['type']):
host_names.append(re.search(hostname_pattern, elem.attrib['target']).group(1))
# Process SOA Records
elif re.search(r'SOA', elem.attrib['type']):
host_names.append(re.search(hostname_pattern, elem.attrib['mname']).group(1))
# Process MX Records
elif re.search(r'MX', elem.attrib['type']):
host_names.append(re.search(hostname_pattern, elem.attrib['exchange']).group(1))
# Process SRV Records
elif re.search(r'SRV', elem.attrib['type']):
host_names.append(re.search(hostname_pattern, elem.attrib['target']).group(1))
elif file_type == 'csv':
reader = csv.reader(open(file), delimiter=',')
reader.next()
for row in reader:
host_names.append(re.search(hostname_pattern, row[1]).group(1))
host_names = list(set(host_names))
# Return list with no empty values
return filter(None, host_names)
def detect_type(file):
"""
Function for detecting the file type by checking the first line of the file.
Returns xml, csv or None.
"""
ftype = None
# Get the fist lile of the file for checking
with open(file) as file:
firs_line = file.readline()
# Determine file type based on the fist line content
if re.search('(xml version)', firs_line):
ftype = 'xml'
elif re.search(r'\w*,[^,]*,[^,]*', firs_line):
ftype = 'csv'
else:
raise Exception('Unsupported File Type')
return ftype
def usage():
print(f'Version: {__version__}')
print('DNSRecon output file parser')
print('Usage: parser.py <options>\n')
print('Options:')
print(' -h, --help Show this help message and exit')
print(' -f, --file <file> DNSRecon XML or CSV output file to parse.')
print(' -l, --list Output an unique IP List that can be used with other tools.')
print(' -i, --ips <ranges> IP Ranges in a comma separated list each in formats (first-last)')
print(' or in (range/bitmask) for ranges to be included from output.')
print(' For A, AAAA, NS, MX, SOA, SRV and PTR Records.')
print(' -t, --type <type> Resource Record Types as a regular expression to filter output.')
print(' For A, AAAA, NS, MX, SOA, TXT, SPF, SRV and PTR Records.')
print(' -s, --str <regex> Regular expression between quotes for filtering host names on.')
print(' For A, AAAA, NS, MX, SOA, SRV and PTR Records.')
print(' -n, --name Return list of unique host names.')
print(' For A, AAAA, NS, MX, SOA, SRV and PTR Records.')
sys.exit(0)
def main():
#
# Option Variables
#
ip_filter = []
name_filter = '(.*)'
type_filter = '(.*)'
target_list = False
file = None
names = False
#
# Define options
#
try:
options, args = getopt.getopt(
sys.argv[1:],
'hi:t:s:lf:n',
['help', 'ips=', 'type=', 'str=', 'list', 'file=', 'name'],
)
except getopt.GetoptError as error:
print_error('Wrong Option Provided!')
print_error(error)
return
#
# Parse options
#
for opt, arg in options:
if opt in ('-t', '--type'):
type_filter = arg
elif opt in ('-i', '--ips'):
ipranges = arg.split(',')
for r in ipranges:
ip_filter.extend(process_range(r))
elif opt in ('-s', '--str'):
name_filter = f'({arg})'
elif opt in ('-l', '--list'):
target_list = True
elif opt in ('-f', '--file'):
# Check if the dictionary file exists
if os.path.isfile(arg):
file = arg
else:
print_error(f'File {arg} does not exist!')
exit(1)
elif opt in ('-r', '--range'):
ip_list = []
ip_range = process_range(arg)
if len(ip_range) > 0:
ip_list.extend(ip_range)
else:
sys.exit(1)
elif opt in ('-n', '--name'):
names = True
elif opt in '-h':
usage()
# start execution based on options
if file:
if names:
try:
found_names = extract_hostnames(file)
found_names.sort()
for n in found_names:
print_line(n)
except OSError:
sys.exit(0)
else:
file_type = detect_type(file)
if file_type == 'xml':
xml_parse(file, ip_filter, type_filter, name_filter, target_list)
elif file_type == 'csv':
csv_parse(file, ip_filter, type_filter, name_filter, target_list)
else:
print_error('A DNSRecon XML or CSV output file must be provided to be parsed')
usage()
if __name__ == '__main__':
main()
| 13,003
|
Python
|
.py
| 290
| 32.451724
| 127
| 0.513311
|
darkoperator/dnsrecon
| 2,582
| 529
| 13
|
GPL-2.0
|
9/5/2024, 5:10:01 PM (Europe/Amsterdam)
|
6,629
|
tld_downloader.py
|
darkoperator_dnsrecon/tools/tld_downloader.py
|
#!/usr/bin/python3
# Read the TLD data from https://tld-list.com/df/tld-list-details.json
import argparse
import json
def read_tld_data(file_path):
with open(file_path) as f:
return json.load(f)
def filter_and_split_tlds(tld_data, tld_type):
sponsored = []
unsponsored = []
for k, v in tld_data.items():
if v['type'] == tld_type:
if v.get('sponsor'):
sponsored.append(k)
else:
unsponsored.append(k)
return sponsored, unsponsored
def main():
parser = argparse.ArgumentParser(description='Filter TLD data from a local file')
parser.add_argument('--file', default='tld-list-details.json', help='Input file containing TLD data')
parser.add_argument('--type', choices=['gTLD', 'ccTLD'], help='Filter TLDs by type')
parser.add_argument('--output', default='filtered_tld_list', help='Base name for output files')
args = parser.parse_args()
tld_data = read_tld_data(args.file)
if args.type:
sponsored, unsponsored = filter_and_split_tlds(tld_data, args.type)
else:
sponsored, unsponsored = filter_and_split_tlds(tld_data, None)
with open(f'{args.output}_sponsored.json', 'w') as f:
json.dump(sponsored, f, indent=4)
with open(f'{args.output}_unsponsored.json', 'w') as f:
json.dump(unsponsored, f, indent=4)
print(f'Sponsored TLDs have been saved to {args.output}_sponsored.json')
print(f'Unsponsored TLDs have been saved to {args.output}_unsponsored.json')
if __name__ == '__main__':
main()
| 1,576
|
Python
|
.py
| 36
| 37.444444
| 105
| 0.660761
|
darkoperator/dnsrecon
| 2,582
| 529
| 13
|
GPL-2.0
|
9/5/2024, 5:10:01 PM (Europe/Amsterdam)
|
6,630
|
test_dnshelper.py
|
darkoperator_dnsrecon/tests/test_dnshelper.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Unit test for DNSRecon's dnshelper library
# Author: Filippo Lauria (@filippolauria)
#
# Copyright (C) 2023 Carlos Perez
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; Applies version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from dnsrecon.lib.dnshelper import DnsHelper
from netaddr import IPAddress
from re import match
class Test_Lib_dnshelper:
def test_get_a(self):
helper = DnsHelper("google.com")
records = helper.get_a("ipv4.google.com")
for record in records:
assert record[0] in ["A", "CNAME"]
def test_get_aaaa(self):
helper = DnsHelper("google.com")
records = helper.get_aaaa("ipv6.google.com")
for record in records:
assert record[0] in ["AAAA", "CNAME"]
def test_get_mx(self):
helper = DnsHelper("google.com")
records = helper.get_mx()
for record in records:
assert record[0] == "MX"
def test_get_ip(self):
helper = DnsHelper("google.com")
records = helper.get_ip("google.com")
for record in records:
ip = IPAddress(record[2])
assert ip.version in [4, 6] # ~ redundant
def test_get_txt(self):
helper = DnsHelper("gmail.com")
records = helper.get_txt()
for record in records:
assert record[0] == "TXT"
def test_get_ns(self):
helper = DnsHelper("zonetransfer.me")
records = helper.get_ns()
for record in records:
assert record[0] == "NS"
def test_get_soa(self):
helper = DnsHelper("zonetransfer.me")
records = helper.get_soa()
for record in records:
assert record[0] == "SOA"
def test_get_srv(self):
helper = DnsHelper("nsztm1.digi.ninja")
records = helper.get_srv("_sip._tcp.zonetransfer.me")
for record in records:
assert record[0] == "SRV"
def test_zone_transfer(self):
helper = DnsHelper("zonetransfer.me")
records = helper.zone_transfer()
assert len(records) >= 135
def test_get_ptr(self):
helper = DnsHelper("zonetransfer.me")
records = helper.get_ptr("51.79.37.18")
assert len(records) == 1 and match(r"^.+\.megacorpone\.com$", records[0][1])
| 2,908
|
Python
|
.py
| 72
| 33.819444
| 84
| 0.639745
|
darkoperator/dnsrecon
| 2,582
| 529
| 13
|
GPL-2.0
|
9/5/2024, 5:10:01 PM (Europe/Amsterdam)
|
6,631
|
test_dnsrecon.py
|
darkoperator_dnsrecon/tests/test_dnsrecon.py
|
from unittest.mock import patch, MagicMock
from dnsrecon import cli
def test_check_wildcard():
with patch('dnsrecon.lib.dnshelper.DnsHelper') as mock_dns_helper:
mock_instance = mock_dns_helper.return_value
mock_instance.check_wildcard.return_value = (True, ["192.0.2.1"])
result = cli.check_wildcard(mock_instance, "zonetransfer.me")
assert result == set() # The function returns an empty set
def test_expand_range():
input_range = "192.0.2.0"
end_ip = "192.0.2.3"
result = cli.expand_range(input_range, end_ip)
assert len(result) == 4
assert "192.0.2.0" in result
assert "192.0.2.3" in result
def test_brute_domain():
with patch('dnsrecon.lib.dnshelper.DnsHelper') as mock_dns_helper, \
patch('builtins.input', return_value='y'): # Mock user input
mock_instance = mock_dns_helper.return_value
mock_instance.get_a.return_value = ["192.0.2.1"]
mock_instance.get_aaaa.return_value = ["2001:db8::1"]
mock_instance.check_wildcard.return_value = (False, [])
# Mock the generate_testname function to return a valid string
with patch('dnsrecon.cli.generate_testname', return_value='testname.zonetransfer.me'):
result = cli.brute_domain(mock_instance, "zonetransfer.me", ["subdomain"])
assert isinstance(result, list)
def test_general_enum():
with patch('dnsrecon.lib.dnshelper.DnsHelper') as mock_dns_helper:
mock_instance = mock_dns_helper.return_value
mock_instance.get_a.return_value = ["192.0.2.1"]
mock_instance.get_aaaa.return_value = ["2001:db8::1"]
mock_instance.get_mx.return_value = ["mail.zonetransfer.me"]
mock_instance.get_txt.return_value = ["txt.zonetransfer.me"]
mock_instance.get_ns.return_value = ["ns.zonetransfer.me"]
mock_instance.get_soa.return_value = ["soa.zonetransfer.me"]
mock_instance.get_srv.return_value = ["srv.zonetransfer.me"]
mock_instance.get_spf.return_value = ["spf.zonetransfer.me"]
mock_instance.get_nsec.return_value = ["nsec.zonetransfer.me"]
mock_instance.get_nsec3.return_value = ["nsec3.zonetransfer.me"]
mock_instance.get_nsec3param.return_value = ["nsec3param.zonetransfer.me"]
mock_instance.get_ds.return_value = ["ds.zonetransfer.me"]
mock_instance.get_dnskey.return_value = ["dnskey.zonetransfer.me"]
mock_instance.get_rrsig.return_value = ["rrsig.zonetransfer.me"]
result = cli.general_enum(mock_instance, "zonetransfer.me", True, True, True, True, True, True, True, 5.0)
assert result is None # The function doesn't return anything
def test_get_nsec_type():
with patch('dnsrecon.lib.dnshelper.DnsHelper') as mock_dns_helper:
mock_instance = mock_dns_helper.return_value
mock_instance._res = MagicMock()
mock_instance._res.nameservers = ["8.8.8.8"]
mock_instance._res.timeout = 2.0
mock_answer = MagicMock()
mock_answer.authority = []
with patch('dnsrecon.cli.get_a_answer', return_value=mock_answer):
result = cli.get_nsec_type("zonetransfer.me", mock_instance)
assert result is None
| 3,207
|
Python
|
.py
| 56
| 49.571429
| 114
| 0.676339
|
darkoperator/dnsrecon
| 2,582
| 529
| 13
|
GPL-2.0
|
9/5/2024, 5:10:01 PM (Europe/Amsterdam)
|
6,632
|
cli.py
|
darkoperator_dnsrecon/dnsrecon/cli.py
|
#!/usr/bin/env python3
# DNSRecon
#
# Copyright (C) 2023 Carlos Perez
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; Applies version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__version__ = '1.3.0'
__author__ = 'Carlos Perez, Carlos_Perez@darkoperator.com'
__name__ = 'cli'
__doc__ = """
DNSRecon https://www.darkoperator.com
by Carlos Perez, Darkoperator
"""
import datetime
import json
import os
import sqlite3
import sys
from argparse import ArgumentError, ArgumentParser, RawTextHelpFormatter
from concurrent import futures
from pathlib import Path
from random import SystemRandom
from string import ascii_letters, digits
from xml.dom import minidom
from xml.etree import ElementTree
from xml.etree.ElementTree import Element
import dns.flags
import dns.message
import dns.query
import dns.rdata
import dns.rdatatype
import dns.resolver
import dns.reversename
import dns.zone
import netaddr
from dns.dnssec import algorithm_to_text
from loguru import logger
from dnsrecon.lib.bingenum import *
from dnsrecon.lib.crtenum import scrape_crtsh
from dnsrecon.lib.dnshelper import DnsHelper
from dnsrecon.lib.tlds import TLDS
from dnsrecon.lib.whois import *
from dnsrecon.lib.yandexenum import *
# Global Variables for Brute force Threads
brtdata = []
CONFIG = {'disable_check_recursion': False, 'disable_check_bindversion': False}
DATA_DIR = Path(__file__).parent / 'data'
def process_range(arg):
"""
This function will take a string representation of a range for IPv4 or IPv6 in
CIDR or Range format and return a list of individual IP addresses.
"""
ip_list = []
ranges_raw_list = list(set(arg.strip().split(',')))
for entry in ranges_raw_list:
try:
if re.match(r'\S*/\S*', entry):
# If CIDR, expand to individual IPs
ip_list.extend(list(netaddr.IPNetwork(entry)))
elif re.match(r'\S*-\S*', entry):
# If range, expand to individual IPs
start, end = entry.split('-')
ip_list.extend(list(netaddr.iter_iprange(start, end)))
else:
logger.error(f'Range: {entry} provided is not valid')
except Exception as e:
logger.error(f'Error processing range: {entry} - {e}')
return ip_list
def process_spf_data(res, data):
"""
This function will take the text info of a TXT or SPF record, extract the
IPv4, IPv6 addresses and ranges, a request process includes records and returns
a list of IP Addresses for the records specified in the SPF Record.
"""
# Declare lists that will be used in the function.
ipv4 = []
ipv6 = []
includes = []
ip_list = []
# check first if it is a sfp record
if not re.search(r'v=spf', data):
return
# Parse the record for IPv4 Ranges, individual IPs and include TXT Records.
ipv4.extend(re.findall(r'ip4:(\S*)', ''.join(data)))
ipv6.extend(re.findall(r'ip6:(\S*)', ''.join(data)))
# Create a list of IPNetwork objects.
for ip in ipv4:
for i in IPNetwork(ip):
ip_list.append(i)
for ip in ipv6:
for i in IPNetwork(ip):
ip_list.append(i)
# Extract and process include values.
includes.extend(re.findall(r'include:(\S*)', ''.join(data)))
for inc_ranges in includes:
for spr_rec in res.get_txt(inc_ranges):
spf_data = process_spf_data(res, spr_rec[2])
if spf_data is not None:
ip_list.extend(spf_data)
# Return a list of IP Addresses
return [str(ip) for ip in ip_list]
def expand_cidr(cidr_to_expand):
"""
Function to expand a given CIDR and return an Array of IP Addresses that
form the range covered by the CIDR.
"""
return IPNetwork(cidr_to_expand)
def expand_range(startip, endip):
"""
Function to expand a given range and return an Array of IP Addresses that
form the range.
"""
return IPRange(startip, endip)
def range2cidr(ip1, ip2):
"""
Function to return the maximum CIDR given a range of IP's
"""
r1 = IPRange(ip1, ip2)
return str(r1.cidrs()[-1])
def write_to_file(data, target_file):
"""
Function for writing returned data to a file
"""
with open(target_file, 'w') as fd:
fd.write(data)
def generate_testname(name_len, name_suffix):
"""
This function easily allows generating a testname
to be used within the wildcard resolution and
the NXDOMAIN hijacking checks
"""
testname = SystemRandom().sample(ascii_letters + digits, name_len)
return ''.join(testname) + '.' + name_suffix
def check_wildcard(res, domain_trg):
"""
Function for checking if Wildcard resolution is configured for a Domain
"""
testname = generate_testname(12, domain_trg)
ips = res.get_a(testname)
if not ips:
return None
wildcard_set = set()
logger.debug('Wildcard resolution is enabled on this domain')
for ip in ips:
logger.debug(f'It is resolving to {ip[2]}')
wildcard_set.add(ip[2])
logger.debug('All queries will resolve to this list of addresses!!')
return wildcard_set
def check_nxdomain_hijack(nameserver):
"""
Function for checking if a name server performs NXDOMAIN hijacking
"""
testname = generate_testname(20, 'com')
res = dns.resolver.Resolver(configure=False)
res.nameservers = [nameserver]
res.timeout = 5.0
address = []
for record_type in ('A', 'AAAA'):
try:
answers = res.resolve(testname, record_type, tcp=True)
except (
dns.resolver.NoNameservers,
dns.resolver.NXDOMAIN,
dns.exception.Timeout,
dns.resolver.NoAnswer,
socket.error,
dns.query.BadResponse,
):
continue
if answers:
for ardata in answers.response.answer:
for rdata in ardata:
if rdata.rdtype == 5:
target_ = rdata.target.to_text()
if target_.endswith('.'):
target_ = target_[:-1]
address.append(target_)
else:
address.append(rdata.address)
if not address:
return False
addresses = ', '.join(address)
logger.error(f'Nameserver {nameserver} performs NXDOMAIN hijacking')
logger.error(f'It resolves nonexistent domains to {addresses}')
logger.error('This server has been removed from the name server list!')
return True
def brute_tlds(res, domain, verbose=False, thread_num=None):
"""
This function performs a check of a given domain for known TLD values.
Prints and returns a dictionary of the results.
"""
global brtdata
brtdata = []
# Define TLDs and Country Code TLDs
itld = ['arpa']
# Generic TLD
gtld = TLDS.generic_tlds()
# Generic restricted TLD
grtld = ['biz', 'name', 'pro']
# Sponsored TLD
stld = TLDS.sponsored_tlds()
# Country Code TLD
cctld = TLDS.country_codes()
domain_main = domain.split('.')[0]
# Combine all TLDs
total_tlds = list(set(itld + gtld + grtld + stld))
# Let the user know how long it could take
total_combinations = len(cctld) * len(total_tlds)
duration = time.strftime('%H:%M:%S', time.gmtime(total_combinations / 3))
logger.info(f'The operation could take up to: {duration}')
found_tlds = []
try:
with futures.ThreadPoolExecutor(max_workers=thread_num) as executor:
future_results = {}
# Nested loop to combine cctld and total_tlds
for cc in cctld:
for tld in total_tlds:
full_domain = f'{domain_main}.{cc}.{tld}'
future_results[executor.submit(res.get_ip, full_domain)] = full_domain
if verbose:
logger.info(f'Queuing: {full_domain}')
# Display results as soon as threads are completed
for future in futures.as_completed(future_results):
full_domain = future_results[future]
try:
res_ = future.result()
if res_:
for type_, name_, addr_ in res_:
if type_ in ['A', 'AAAA']:
logger.info(f'\t {type_} {name_} {addr_}')
found_tlds.append({'type': type_, 'name': name_, 'address': addr_})
except Exception as e:
logger.error(f'Error resolving domain {full_domain}: {e}')
except Exception as e:
logger.error(f'Error during brute force: {e}')
logger.info(f'{len(found_tlds)} Records Found')
return found_tlds
def brute_srv(res, domain, verbose=False, thread_num=None):
"""
Brute-force most common SRV records for a given Domain. Returns an Array with
records found.
"""
global brtdata
brtdata = []
returned_records = []
srvrcd = [
'_gc._tcp.',
'_kerberos._tcp.',
'_kerberos._udp.',
'_ldap._tcp.',
'_test._tcp.',
'_sips._tcp.',
'_sip._udp.',
'_sip._tcp.',
'_aix._tcp.',
'_aix._tcp.',
'_finger._tcp.',
'_ftp._tcp.',
'_http._tcp.',
'_nntp._tcp.',
'_telnet._tcp.',
'_whois._tcp.',
'_h323cs._tcp.',
'_h323cs._udp.',
'_h323be._tcp.',
'_h323be._udp.',
'_h323ls._tcp.',
'_https._tcp.',
'_h323ls._udp.',
'_sipinternal._tcp.',
'_sipinternaltls._tcp.',
'_sip._tls.',
'_sipfederationtls._tcp.',
'_jabber._tcp.',
'_xmpp-server._tcp.',
'_xmpp-client._tcp.',
'_imap.tcp.',
'_certificates._tcp.',
'_crls._tcp.',
'_pgpkeys._tcp.',
'_pgprevokations._tcp.',
'_cmp._tcp.',
'_svcp._tcp.',
'_crl._tcp.',
'_ocsp._tcp.',
'_PKIXREP._tcp.',
'_smtp._tcp.',
'_hkp._tcp.',
'_hkps._tcp.',
'_jabber._udp.',
'_xmpp-server._udp.',
'_xmpp-client._udp.',
'_jabber-client._tcp.',
'_jabber-client._udp.',
'_kerberos.tcp.dc._msdcs.',
'_ldap._tcp.ForestDNSZones.',
'_ldap._tcp.dc._msdcs.',
'_ldap._tcp.pdc._msdcs.',
'_ldap._tcp.gc._msdcs.',
'_kerberos._tcp.dc._msdcs.',
'_kpasswd._tcp.',
'_kpasswd._udp.',
'_imap._tcp.',
'_imaps._tcp.',
'_submission._tcp.',
'_pop3._tcp.',
'_pop3s._tcp.',
'_caldav._tcp.',
'_caldavs._tcp.',
'_carddav._tcp.',
'_carddavs._tcp.',
'_x-puppet._tcp.',
'_x-puppet-ca._tcp.',
'_autodiscover._tcp.',
]
try:
with futures.ThreadPoolExecutor(max_workers=thread_num) as executor:
if verbose:
for srvtype in srvrcd:
srvtype_domain = srvtype + domain
logger.info(f'Trying {srvtype_domain}...')
future_results = {executor.submit(res.get_srv, srvtype + domain): srvtype for srvtype in srvrcd}
# Display logs as soon as a thread is finished
for future in futures.as_completed(future_results):
res = future.result()
for type_, name_, target_, addr_, port_, priority_ in res:
returned_records.append(
{
'type': type_,
'name': name_,
'target': target_,
'address': addr_,
'port': port_,
}
)
logger.info(f'\t {type_} {name_} {target_} {addr_} {port_}')
except Exception as e:
logger.error(e)
if len(returned_records) > 0:
logger.info(f'{len(returned_records)} Records Found')
else:
logger.error(f'No SRV Records Found for {domain}')
return returned_records
def brute_reverse(res, ip_list, verbose=False, thread_num=None):
"""
Reverse look-up brute force for given CIDR example 192.168.1.1/24. Returns an
Array of found records.
"""
global brtdata
brtdata = []
returned_records = []
# Ensure that ip_list contains individual IPs instead of IPNetwork or IPRange objects.
expanded_ips = []
for entry in ip_list:
if isinstance(entry, netaddr.IPNetwork) or isinstance(entry, netaddr.IPRange):
expanded_ips.extend(list(entry))
else:
expanded_ips.append(entry)
start_ip = expanded_ips[0]
end_ip = expanded_ips[-1]
logger.info(f'Performing Reverse Lookup from {start_ip} to {end_ip}')
ip_group_size = 255
for ip_group in [expanded_ips[j : j + ip_group_size] for j in range(0, len(expanded_ips), ip_group_size)]:
try:
if verbose:
for ip in ip_group:
logger.info(f'Trying {ip}')
with futures.ThreadPoolExecutor(max_workers=thread_num) as executor:
# Submit each IP for reverse lookup, converting to string as necessary
future_results = {executor.submit(res.get_ptr, str(ip)): ip for ip in ip_group}
# Display logs as soon as a thread is finished
for future in futures.as_completed(future_results):
ip_address = future_results[future]
try:
res_ = future.result()
if res_:
for type_, name_, addr_ in res_:
returned_records.append({'type': type_, 'name': name_, 'address': addr_})
logger.info(f'\t {type_} {name_} {addr_}')
except Exception as e:
logger.error(f'Error resolving IP {ip_address}: {e}')
except Exception as e:
logger.error(f'Error with thread executor: {e}')
logger.info(f'{len(returned_records)} Records Found')
return returned_records
def brute_domain(
res,
dictfile,
dom,
filter_=None,
verbose=False,
ignore_wildcard=False,
thread_num=None,
):
"""
Main Function for domain brute forcing
"""
global brtdata
brtdata = []
# Check if wildcard resolution is enabled
wildcard_set = check_wildcard(res, dom)
if wildcard_set and not ignore_wildcard:
logger.info('Do you wish to continue? [Y/n]')
i = input().lower().strip()
if i not in ['y', 'yes']:
logger.error('Domain bruteforcing aborted.')
return None
found_hosts = []
# Check if the Dictionary file exists
if os.path.isfile(dictfile):
with open(dictfile) as fd:
targets = [f'{line.strip()}.{dom.strip()}' for line in fd]
if verbose:
for target in targets:
logger.info(f'Trying {target}')
with futures.ThreadPoolExecutor(max_workers=thread_num) as executor:
future_results = {executor.submit(res.get_ip, target): target for target in targets}
# Display logs as soon as a thread is finished
for future in futures.as_completed(future_results):
res = future.result()
for type_, name_, address_or_target_ in res:
print_and_append = False
found_dict = {'type': type_, 'name': name_}
if type_ in ['A', 'AAAA']:
# Filter Records if filtering was enabled
if filter_:
if not wildcard_set or address_or_target_ not in wildcard_set:
print_and_append = True
found_dict['address'] = address_or_target_
else:
print_and_append = True
found_dict['address'] = address_or_target_
elif type_ == 'CNAME':
print_and_append = True
found_dict['target'] = address_or_target_
if print_and_append:
logger.info(f'\t {type_} {name_} {address_or_target_}')
found_hosts.append(found_dict)
brtdata.append(res)
logger.info(f'{len(found_hosts)} Records Found')
return found_hosts
def in_cache(res, dict_file, ns):
"""
Function for Cache Snooping, it will check a given NS server for a specific
type of records for a given domain are in its cache.
"""
found_records = []
with open(dict_file) as f:
for zone in f:
dom_to_query = zone.strip()
query = dns.message.make_query(dom_to_query, dns.rdatatype.A, dns.rdataclass.IN)
query.flags ^= dns.flags.RD
answer = res.query(query, ns)
for an in answer.answer:
for rcd in an:
if rcd.rdtype not in [1, 5]:
continue
found_record = {'name': an.name, 'ttl': an.ttl}
status = f'\tName: {an.name} TTL: {an.ttl} '
if rcd.rdtype == 1:
found_record['type'] = 'A'
found_record['address'] = rcd.address
status += f'Address: {rcd.address} Type: A'
elif rcd.rdtype == 5:
found_record['type'] = 'CNAME'
found_record['target'] = rcd.target
status += f'Target: {rcd.target} Type: CNAME'
logger.info(status)
found_records.append(found_record)
return found_records
def se_result_process(res, se_entries):
"""
This function processes the results returned from a Search Engine and does
an A and AAAA query for the IP of the found host. Prints and returns a dictionary
with all the results found.
"""
if not se_entries:
return None
resolved_se_entries = []
for se_entry in se_entries:
for type_, name_, address_or_target_ in res.get_ip(se_entry):
if type_ not in ['A', 'CNAME']:
continue
logger.info(f'\t {type_} {name_} {address_or_target_}')
resolved_se_entry = {'type': type_, 'name': name_}
if type_ == 'A':
resolved_se_entry['address'] = address_or_target_
elif type_ == 'CNAME':
resolved_se_entry['target'] = address_or_target_
resolved_se_entries.append(resolved_se_entry)
logger.info(f'{len(resolved_se_entries)} Records Found')
return resolved_se_entries
def get_whois_nets_iplist(ip_list):
"""
This function will perform whois queries against a list of IP's and extract
the net ranges and if available the organization list of each and remover any
duplicate entries.
"""
seen = {}
idfun = repr
found_nets = []
for ip in ip_list:
if ip != 'no_ip':
# Find appropriate Whois Server for the IP
whois_server = get_whois(ip)
# If we get a Whois server Process get the whois and process.
if whois_server:
whois_data = whois(ip, whois_server)
arin_style = re.search('NetRange', whois_data)
ripe_apic_style = re.search('netname', whois_data)
if arin_style or ripe_apic_style:
net = get_whois_nets(whois_data)
if net:
for network in net:
org = get_whois_orgname(whois_data)
found_nets.append(
{
'start': network[0],
'end': network[1],
'orgname': ''.join(org),
}
)
else:
for line in whois_data.splitlines():
recordentrie = re.match(r'^(.*)\s\S*-\w*\s\S*\s(\S*\s-\s\S*)', line)
if recordentrie:
org = recordentrie.group(1)
net = get_whois_nets(recordentrie.group(2))
for network in net:
found_nets.append(
{
'start': network[0],
'end': network[1],
'orgname': ''.join(org),
}
)
# Remove Duplicates
return [seen.setdefault(idfun(e), e) for e in found_nets if idfun(e) not in seen]
def whois_ips(res, ip_list):
"""
This function will process the results of the whois lookups and present the
user with the list of net ranges found and ask the user if he wishes to perform
a reverse lookup on any of the ranges or all the ranges.
"""
found_records = []
logger.info('Performing Whois lookup against records found.')
list_whois = get_whois_nets_iplist(unique(ip_list))
if len(list_whois) > 0:
logger.info('The following IP Ranges were found:')
for i in range(len(list_whois)):
logger.info(
'\t {0} {1}-{2} {3}'.format(
str(i) + ')',
list_whois[i]['start'],
list_whois[i]['end'],
list_whois[i]['orgname'],
)
)
logger.info('What Range do you wish to do a Reverse Lookup for?')
logger.info('number, comma separated list, a for all or n for none')
val = sys.stdin.readline()[:-1]
answer = str(val).split(',')
if 'a' in answer:
for i in range(len(list_whois)):
logger.info('Performing Reverse Lookup of range {0}-{1}'.format(list_whois[i]['start'], list_whois[i]['end']))
found_records.append(brute_reverse(res, expand_range(list_whois[i]['start'], list_whois[i]['end'])))
elif 'n' in answer:
logger.info('No Reverse Lookups will be performed.')
else:
for a in answer:
net_selected = list_whois[int(a)]
logger.info(net_selected['orgname'])
logger.info('Performing Reverse Lookup of range {0}-{1}'.format(net_selected['start'], net_selected['end']))
found_records.append(brute_reverse(res, expand_range(net_selected['start'], net_selected['end'])))
else:
logger.error('No IP Ranges were found in the Whois query results')
return found_records
def prettify(elem):
"""
Return a pretty-printed XML string for the Element.
"""
rough_string = ElementTree.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=' ')
def dns_record_from_dict(record_dict_list, scan_info, domains):
"""
Saves DNS Records to XML Given a list of dictionaries each representing
a record to be saved, returns the XML Document formatted.
"""
xml_doc = Element('dnsrecon')
# Add scan information
scanelem = Element('scaninfo')
scanelem.attrib['arguments'] = scan_info[0]
scanelem.attrib['time'] = scan_info[1]
xml_doc.append(scanelem)
for domain in domains:
domelem = Element('domain')
domelem.attrib['domain_name'] = domain
xml_doc.append(domelem)
# Filter records for the current domain
domain_records = [r for r in record_dict_list if r.get('domain') == domain]
for r in domain_records:
elem = Element('record')
for k, v in r.items():
if k != 'domain': # Domain already represented by domelem
elem.attrib[k] = str(v)
domelem.append(elem)
return prettify(xml_doc)
def create_db(db):
"""
This function will create the specified database if not present, and it will create
the table needed for storing the data returned by the modules.
"""
# Connect to the DB
con = sqlite3.connect(db)
# Create SQL Queries to be used in the script
make_table = """CREATE TABLE data (
serial integer Primary Key Autoincrement,
domain TEXT(256),
type TEXT(8),
name TEXT(32),
address TEXT(32),
target TEXT(32),
port TEXT(8),
text TEXT(256),
zt_dns TEXT(32)
)"""
# Set the cursor for connection
con.isolation_level = None
cur = con.cursor()
# Connect and create table
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='data';")
if cur.fetchone() is None:
cur.execute(make_table)
con.commit()
else:
pass
def make_csv(data):
csv_data = 'Domain,Type,Name,Address,Target,Port,String\n'
for record_tmp in data:
record = record_tmp
# make sure that we are working with a dictionary.
if not isinstance(record, dict):
# the representation of data[i] is a list of one dictionary
# we want to exploit this dictionary
record = record_tmp[0]
domain = record.get('domain', '')
type_ = record['type'].upper()
csv_data += f'{domain},{type_},'
if type_ in ['PTR', 'A', 'AAAA', 'NS', 'SOA', 'MX']:
if type_ in ['PTR', 'A', 'AAAA']:
csv_data += record.get('name', '')
elif type_ == 'NS':
csv_data += record.get('target', '')
elif type_ == 'SOA':
csv_data += record.get('mname', '')
elif type_ == 'MX':
csv_data += record.get('exchange', '')
csv_data += ',' + record.get('address', '') + (',' * 3) + '\n'
elif type_ in ['TXT', 'SPF']:
if 'zone_server' not in record:
if type_ == 'SPF':
csv_data += record.get('domain', '')
else:
csv_data += record.get('name', '')
csv_data += (',' * 4) + f"'{record.get('strings', '')}'\n"
elif type_ == 'SRV':
items = [
record.get('name', ''),
record.get('address', ''),
record.get('target', ''),
record.get('port', ''),
]
csv_data += ','.join(items) + ',\n'
elif type_ == 'CNAME':
csv_data += record.get('name', '') + (',' * 2)
if 'target' in record:
csv_data += record['target']
csv_data += (',' * 2) + '\n'
else:
# Handle not common records
del record['type']
s = '; '.join([f'{k}={v}' for k, v in record.items()])
csv_data += (',' * 4) + f"'{s}'\n"
return csv_data
def write_json(jsonfile, data, scan_info):
"""
Function to write DNS Records SOA, PTR, NS, A, AAAA, MX, TXT, SPF and SRV to
JSON file.
"""
scaninfo = {'type': 'ScanInfo', 'arguments': scan_info[0], 'date': scan_info[1]}
data.insert(0, scaninfo)
json_data = json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))
write_to_file(json_data, jsonfile)
def write_db(db, data):
"""
Function to write DNS Records SOA, PTR, NS, A, AAAA, MX, TXT, SPF and SRV to
DB.
"""
con = sqlite3.connect(db)
# Set the cursor for connection
con.isolation_level = None
cur = con.cursor()
# Normalize the dictionary data
for n in data:
if re.match(r'PTR|^[A]$|AAAA', n['type']):
query = (
'insert into data( domain, type, name, address ) '
+ 'values( "{domain}", "{type}", "{name}","{address}" )'.format(**n)
)
elif re.match(r'NS$', n['type']):
query = (
'insert into data( domain, type, name, address ) '
+ 'values( "{domain}", "{type}", "{target}", "{address}" )'.format(**n)
)
elif re.match(r'SOA', n['type']):
query = (
'insert into data( domain, type, name, address ) '
+ 'values( "{domain}", "{type}", "{mname}", "{address}" )'.format(**n)
)
elif re.match(r'MX', n['type']):
query = (
'insert into data( domain, type, name, address ) '
+ 'values( "{domain}", "{type}", "{exchange}", "{address}" )'.format(**n)
)
elif re.match(r'TXT', n['type']):
query = 'insert into data( domain, type, text) ' + 'values( "{domain}", "{type}","{strings}" )'.format(**n)
elif re.match(r'SPF', n['type']):
query = 'insert into data( domain, type, text) ' + 'values( "{domain}", "{type}","{strings}" )'.format(**n)
elif re.match(r'SRV', n['type']):
query = (
'insert into data( domain, type, name, target, address, port ) '
+ 'values( "{domain}", "{type}", "{name}" , "{target}", "{address}" ,"{port}" )'.format(**n)
)
elif re.match(r'CNAME', n['type']):
query = (
'insert into data( domain, type, name, target ) '
+ 'values( "{domain}", "{type}", "{name}" , "{target}" )'.format(**n)
)
else:
# Handle not common records
t = n['type']
del n['type']
record_data = ''.join([f'{key}={value},' for key, value in n.items()])
records = [t, record_data]
query = 'insert into data(domain,type,text) values ("%(domain)", \'' + records[0] + "','" + records[1] + "')"
# Execute Query and commit
cur.execute(query)
con.commit()
def get_nsec_type(domain, res):
target = '0.' + domain
answer = get_a_answer(res, target, res._res.nameservers[0], res._res.timeout)
for a in answer.authority:
if a.rdtype == 50:
return 'NSEC3'
elif a.rdtype == 47:
return 'NSEC'
def dns_sec_check(domain, res):
"""
Check if a zone is configured for DNSSEC and if so is NSEC or NSEC3 is used.
"""
try:
answer = res.resolve(domain, 'DNSKEY', res._res.nameservers[0])
logger.info(f'DNSSEC is configured for {domain}')
nsectype = get_nsec_type(domain, res)
logger.info('DNSKEYs:')
for rdata in answer:
if rdata.flags == 256:
key_type = 'ZSK'
if rdata.flags == 257:
key_type = 'KSk'
logger.info(f'\t{nsectype} {key_type} {algorithm_to_text(rdata.algorithm)} {dns.rdata._hexify(rdata.key)}')
except dns.resolver.NXDOMAIN:
logger.error(f'Could not resolve domain: {domain}')
sys.exit(1)
except dns.resolver.NoNameservers:
logger.error(f'All nameservers failed to answer the DNSSEC query for {domain}')
except dns.exception.Timeout:
logger.error('A timeout error occurred please make sure you can reach the target DNS Servers')
logger.error(f'directly and requests are not being filtered. Increase the timeout from {res._res.timeout} second')
logger.error('to a higher number with --lifetime <time> option.')
sys.exit(1)
except dns.resolver.NoAnswer:
logger.error(f'No answer for DNSSEC query for {domain}')
def check_bindversion(res, ns_server, timeout):
"""
Check if the version of Bind can be queried for.
"""
version = ''
if not CONFIG or not CONFIG.get('disable_check_bindversion', False):
request = dns.message.make_query('version.bind', 'txt', 'ch')
try:
response = res.query(request, ns_server, timeout=timeout, one_rr_per_rrset=True)
if len(response.answer) > 0:
version = response.answer[0].to_text().split(' ')[-1]
logger.info(f'\t Bind Version for {ns_server} {version}')
except (
dns.resolver.NXDOMAIN,
dns.exception.Timeout,
dns.resolver.NoAnswer,
socket.error,
dns.query.BadResponse,
):
pass
return version
def check_recursive(res, ns_server, timeout):
"""
Check if an NS Server is recursive.
"""
is_recursive = False
if not CONFIG or not CONFIG.get('disable_check_recursion', False):
query = dns.message.make_query('www.google.com.', dns.rdatatype.NS)
try:
response = res.query(query, ns_server, timeout)
recursion_flag_pattern = r'\.*RA\.*'
flags = dns.flags.to_text(response.flags)
result = re.findall(recursion_flag_pattern, flags)
if result:
logger.error(f'\t Recursion enabled on NS Server {ns_server}')
is_recursive = True
except (socket.error, dns.exception.Timeout):
pass
return is_recursive
def general_enum(
res,
domain,
do_axfr,
do_bing,
do_yandex,
do_spf,
do_whois,
do_crt,
zw,
request_timeout,
thread_num=None,
):
"""
Function for performing general enumeration of a domain. It gets SOA, NS, MX
A, AAAA and SRV records for a given domain. It will first try a Zone Transfer
if not successful, it will try individual record type enumeration.
"""
returned_records = []
# Var for SPF Record Range Reverse Look-up
found_spf_ranges = []
# Var to hold the IP Addresses that will be queried in Whois
ip_for_whois = []
# Check if wildcards are enabled on the target domain
check_wildcard(res, domain)
# To identify when the records come from a Zone Transfer
from_zt = None
# Perform test for Zone Transfer against all NS servers of a Domain
if do_axfr:
zonerecs = res.zone_transfer()
if zonerecs is not None:
returned_records.extend(res.zone_transfer())
if len(returned_records) == 0:
from_zt = True
# If a Zone Transfer was possible there is no need to enumerate the rest
if from_zt is None:
# Check if DNSSEC is configured
dns_sec_check(domain, res)
# Enumerate SOA Record
try:
found_soa_records = res.get_soa()
for found_soa_record in found_soa_records:
logger.info(f'\t {found_soa_record[0]} {found_soa_record[1]} {found_soa_record[2]}')
# Save dictionary of returned record
returned_records.extend(
[
{
'domain': domain,
'type': found_soa_record[0],
'mname': found_soa_record[1],
'address': found_soa_record[2],
}
]
)
ip_for_whois.append(found_soa_record[2])
except Exception:
logger.info(found_soa_records)
if found_soa_records == []:
logger.error(f'No SOA records found for {domain}')
else:
logger.error(f'Could not Resolve SOA Record for {domain}')
# Enumerate Name Servers
try:
for ns_rcrd in res.get_ns():
logger.info(f'\t {ns_rcrd[0]} {ns_rcrd[1]} {ns_rcrd[2]}')
# Save dictionary of returned record
recursive = check_recursive(res, ns_rcrd[2], res._res.timeout)
bind_ver = check_bindversion(res, ns_rcrd[2], res._res.timeout)
returned_records.extend(
[
{
'domain': domain,
'type': ns_rcrd[0],
'target': ns_rcrd[1],
'address': ns_rcrd[2],
'recursive': str(recursive),
'Version': bind_ver,
}
]
)
ip_for_whois.append(ns_rcrd[2])
except dns.resolver.NoAnswer:
logger.error(f'Could not Resolve NS Records for {domain}')
except dns.resolver.NoNameservers:
logger.error(f'All nameservers failed to answer the NS query for {domain}')
sys.exit(1)
# Enumerate MX Records
try:
for mx_rcrd in res.get_mx():
logger.info(f'\t {mx_rcrd[0]} {mx_rcrd[1]} {mx_rcrd[2]}')
# Save dictionary of returned record
returned_records.extend(
[
{
'domain': domain,
'type': mx_rcrd[0],
'exchange': mx_rcrd[1],
'address': mx_rcrd[2],
}
]
)
ip_for_whois.append(mx_rcrd[2])
except dns.resolver.NoAnswer:
logger.error(f'Could not Resolve MX Records for {domain}')
except dns.resolver.NoNameservers:
logger.error(f'All nameservers failed to answer the MX query for {domain}')
# Enumerate A Record for the targeted Domain
for a_rcrd in res.get_ip(domain):
logger.info(f'\t {a_rcrd[0]} {a_rcrd[1]} {a_rcrd[2]}')
# Save dictionary of returned record
returned_records.extend(
[
{
'domain': domain,
'type': a_rcrd[0],
'name': a_rcrd[1],
'address': a_rcrd[2],
}
]
)
ip_for_whois.append(a_rcrd[2])
# Enumerate SFP and TXT Records for the target domain
text_data = ''
spf_text_data = res.get_spf()
# Save dictionary of returned record
if spf_text_data is not None:
for s in spf_text_data:
logger.info(f'\t {s[0]} {s[1]}')
text_data = s[1]
returned_records.extend([{'domain': domain, 'type': s[0], 'strings': s[1]}])
txt_text_data = res.get_txt()
# Save dictionary of returned record
if txt_text_data is not None:
for t in txt_text_data:
logger.info(f'\t {t[0]} {t[1]} {t[2]}')
text_data += t[2]
returned_records.extend([{'domain': domain, 'type': t[0], 'name': t[1], 'strings': t[2]}])
domainkey_text_data = res.get_txt('_domainkey.' + domain)
# Save dictionary of returned record
if domainkey_text_data is not None:
for t in domainkey_text_data:
logger.info(f'\t {t[0]} {t[1]} {t[2]}')
text_data += t[2]
returned_records.extend([{'domain': domain, 'type': t[0], 'name': t[1], 'strings': t[2]}])
# Process SPF records if selected
if do_spf and len(text_data) > 0:
logger.info('Expanding IP ranges found in DNS and TXT records for Reverse Look-up')
processed_spf_data = process_spf_data(res, text_data)
if processed_spf_data is not None:
found_spf_ranges.extend(processed_spf_data)
if len(found_spf_ranges) > 0:
logger.info('Performing Reverse Look-up of SPF Ranges')
returned_records.extend(brute_reverse(res, unique(found_spf_ranges)))
else:
logger.info('No IP Ranges were found in SPF and TXT Records')
# Enumerate SRV Records for the targeted Domain
logger.info('Enumerating SRV Records')
srv_rcd = brute_srv(res, domain, thread_num=thread_num)
if srv_rcd:
for r in srv_rcd:
ip_for_whois.append(r['address'])
returned_records.extend(
[
{
'domain': domain,
'type': r['type'],
'name': r['name'],
'target': r['target'],
'address': r['address'],
'port': r['port'],
}
]
)
# Do Bing Search enumeration if selected
if do_bing:
logger.info('Performing Bing Search Enumeration')
bing_rcd = se_result_process(res, scrape_bing(domain))
if bing_rcd:
for r in bing_rcd:
if 'address' in bing_rcd:
ip_for_whois.append(r['address'])
returned_records.extend(bing_rcd)
# Do Yandex Search enumeration if selected
if do_yandex:
logger.info('Performing Yandex Search Enumeration')
yandex_rcd = se_result_process(res, scrape_bing(domain))
if yandex_rcd:
for r in yandex_rcd:
if 'address' in yandex_rcd:
ip_for_whois.append(r['address'])
returned_records.extend(yandex_rcd)
if do_crt:
logger.info('Performing Crt.sh Search Enumeration')
crt_rcd = se_result_process(res, scrape_crtsh(domain))
if crt_rcd:
for r in crt_rcd:
if 'address' in crt_rcd:
ip_for_whois.append(r['address'])
returned_records.extend(crt_rcd)
if do_whois:
whois_rcd = whois_ips(res, ip_for_whois)
if whois_rcd:
for r in whois_rcd:
returned_records.extend(r)
if zw:
zone_info = ds_zone_walk(res, domain, request_timeout)
if zone_info:
returned_records.extend(zone_info)
return returned_records
def query_ds(res, target, ns, timeout=5.0):
"""
Function for performing DS Record queries. Returns answer object. Since a
timeout will break the DS NSEC chain of a zone walk, it will exit if a timeout
happens.
"""
try:
query = dns.message.make_query(target, dns.rdatatype.DS, dns.rdataclass.IN)
query.flags += dns.flags.CD
query.use_edns(edns=True, payload=4096)
query.want_dnssec(True)
answer = res.query(query, ns, timeout)
except dns.exception.Timeout:
logger.error('A timeout error occurred please make sure you can reach the target DNS Servers')
logger.error(f'directly and requests are not being filtered. Increase the timeout from {timeout} second')
logger.error('to a higher number with --lifetime <time> option.')
sys.exit(1)
except Exception:
logger.error(f'Unexpected error: {sys.exc_info()[0]}')
raise
return answer
def get_constants(prefix):
"""
Create a dictionary mapping socket module constants to their names.
"""
return dict((getattr(socket, n), n) for n in dir(socket) if n.startswith(prefix))
def socket_resolv(target):
"""
Resolve IPv4 and IPv6 .
"""
found_recs = []
families = get_constants('AF_')
types = get_constants('SOCK_')
try:
for response in socket.getaddrinfo(target, 0):
# Unpack the response tuple
family, socktype, proto, canonname, sockaddr = response
if families[family] == 'AF_INET' and types[socktype] == 'SOCK_DGRAM':
found_recs.append(['A', target, sockaddr[0]])
elif families[family] == 'AF_INET6' and types[socktype] == 'SOCK_DGRAM':
found_recs.append(['AAAA', target, sockaddr[0]])
except Exception:
return found_recs
return found_recs
def lookup_next(target, res):
"""
Try to get the most accurate information for the record found.
"""
DnsHelper(target)
returned_records = []
if re.search(r'^_[A-Za-z0-9_-]*._[A-Za-z0-9_-]*.', target, re.I):
srv_answer = res.get_srv(target)
if len(srv_answer) > 0:
for r in srv_answer:
logger.info('\t {0}'.format(' '.join(r)))
returned_records.append(
{
'type': r[0],
'name': r[1],
'target': r[2],
'address': r[3],
'port': r[4],
}
)
elif re.search(r'(_autodiscover\\.|_spf\\.|_domainkey\\.)', target, re.I):
txt_answer = res.get_txt(target)
if len(txt_answer) > 0:
for r in txt_answer:
logger.info('\t {0}'.format(' '.join(r)))
returned_records.append({'type': r[0], 'name': r[1], 'strings': r[2]})
else:
txt_answer = res.get_txt(target)
if len(txt_answer) > 0:
for r in txt_answer:
logger.info('\t {0}'.format(' '.join(r)))
returned_records.append({'type': r[0], 'name': r[1], 'strings': r[2]})
else:
logger.info(f'\t A {target} no_ip')
returned_records.append({'type': 'A', 'name': target, 'address': 'no_ip'})
else:
a_answer = res.get_ip(target)
if len(a_answer) > 0:
for r in a_answer:
logger.info(f'\t {r[0]} {r[1]} {r[2]}')
if r[0] == 'CNAME':
returned_records.append({'type': r[0], 'name': r[1], 'target': r[2]})
else:
returned_records.append({'type': r[0], 'name': r[1], 'address': r[2]})
else:
a_answer = socket_resolv(target)
if len(a_answer) > 0:
for r in a_answer:
logger.info(f'\t {r[0]} {r[1]} {r[2]}')
returned_records.append({'type': r[0], 'name': r[1], 'address': r[2]})
else:
logger.info(f'\t A {target} no_ip')
returned_records.append({'type': 'A', 'name': target, 'address': 'no_ip'})
return returned_records
def get_a_answer(res, target, ns, timeout):
query = dns.message.make_query(target, dns.rdatatype.A, dns.rdataclass.IN)
query.flags += dns.flags.CD
query.use_edns(edns=True, payload=4096)
query.want_dnssec(True)
answer = res.query(query, ns, timeout)
return answer
def get_next(res, target, ns, timeout):
next_host = None
response = get_a_answer(res, target, ns, timeout)
for a in response.authority:
if a.rdtype == 47:
for r in a:
next_host = r.next.to_text()[:-1]
return next_host
def ds_zone_walk(res, domain, lifetime):
"""
Perform DNSSEC Zone Walk using NSEC records found in the error an additional
records section of the message to find the next host to query in the zone.
"""
logger.info(f'Performing NSEC Zone Walk for {domain}')
logger.info(f'Getting SOA record for {domain}')
nameserver = ''
try:
target_soas = res.get_soa()
if target_soas:
first_ns = target_soas[0]
if first_ns:
nameserver = first_ns[2]
if nameserver:
logger.info(f'Name Server {nameserver} will be used')
res = DnsHelper(domain, nameserver, lifetime)
if not nameserver:
logger.error('This zone appears to be misconfigured, no SOA record found.')
except Exception as err:
logger.error(f'Exception while trying to determine the SOA records for domain {domain}: {err}')
timeout = res._res.timeout
records = []
transformations = [
# Send the hostname as-is
lambda h, hc, dc: h,
# Prepend a zero as a subdomain
lambda h, hc, dc: f'0.{h}',
# Append a hyphen to the host portion
lambda h, hc, dc: f'{hc}-.{dc}' if hc else None,
# Double the last character of the host portion
lambda h, hc, dc: f'{hc}{hc[-1]}.{dc}' if hc else None,
]
pending = {domain}
finished = set()
try:
while pending:
# Get the next pending hostname
hostname = pending.pop()
finished.add(hostname)
# Get all the records we can for the hostname
records.extend(lookup_next(hostname, res))
# Arrange the arguments for the transformations
fields = re.search(r'^(^[^.]*)\.(\S+\.\S*)$', hostname)
domain_portion = hostname
if fields and fields.group(2):
domain_portion = fields.group(2)
host_portion = ''
if fields and fields.group(1):
host_portion = fields.group(1)
params = [hostname, host_portion, domain_portion]
walk_filter = '.' + domain_portion
walk_filter_offset = len(walk_filter) + 1
for transformation in transformations:
# Apply the transformation
target = transformation(*params)
if not target:
continue
# Perform a DNS query for the target and process the response
if not nameserver:
response = get_a_answer(res, target, res._res.nameservers[0], timeout)
else:
response = get_a_answer(res, target, nameserver, timeout)
for a in response.authority:
if a.rdtype != 47:
continue
# NSEC records give two results:
# 1) The previous existing hostname that is signed
# 2) The subsequent existing hostname that is signed
# Add the latter to our list of pending hostnames
for r in a:
# As an optimization Cloudflare (and perhaps others)
# return '\000.' instead of NODATA when a record doesn't
# exist. Detect this and avoid becoming tarpitted while
# permuting the namespace.
if r.next.to_text()[:5] == '\\000.':
continue
# Avoid walking outside of the target domain. This
# happens with certain misconfigured domains.
if r.next.to_text()[-walk_filter_offset:-1] == walk_filter:
pending.add(r.next.to_text()[:-1])
# Ensure nothing pending has already been queried
pending -= finished
except KeyboardInterrupt:
logger.error('You have pressed Ctrl + C. Saving found records.')
except dns.exception.Timeout:
logger.error('A timeout error occurred while performing the zone walk please make ')
logger.error('sure you can reach the target DNS Servers directly and requests')
logger.error('are not being filtered. Increase the timeout to a higher number')
logger.error('with --lifetime <time> option.')
except EOFError:
logger.error(f'SoA nameserver {nameserver} failed to answer the DNSSEC query for {target}')
except socket.error:
logger.error(f'SoA nameserver {nameserver} failed to answer the DNSSEC query for {domain}')
# Give a summary of the walk
if len(records) > 0:
logger.info(f'{len(records)} records found')
else:
logger.error('Zone could not be walked')
return records
def main():
#
# Option Variables
#
output_file = None
xfr = None
bing = False
yandex = False
spf_enum = False
do_whois = False
do_crt = False
# By default, thread_num will be None
thread_num = None
results_db = None
zonewalk = False
csv_file = None
json_file = None
wildcard_filter = False
verbose = False
ignore_wildcardrr = False
# Initialize ns_server as an empty list
ns_server = []
#
# Define options
#
parser = ArgumentParser(formatter_class=RawTextHelpFormatter)
try:
parser.add_argument('-d', '--domain', type=str, dest='domain', help='Target domain.')
parser.add_argument(
'-iL',
'--input-list',
type=str,
dest='input_list',
help='File containing a list of domains to perform DNS enumeration on, one per line.',
)
parser.add_argument(
'-n',
'--name_server',
type=str,
dest='ns_server',
help='Domain server to use. If none is given, the SOA of the target will be used. Multiple servers can be specified using a comma separated list.',
)
parser.add_argument(
'-r',
'--range',
type=str,
dest='range',
help='IP range for reverse lookup brute force in formats (first-last) or in (range/bitmask).',
)
parser.add_argument(
'-D',
'--dictionary',
type=str,
dest='dictionary',
help='Dictionary file of subdomain and hostnames to use for brute force.',
)
parser.add_argument(
'-f',
help='Filter out of brute force domain lookup, records that resolve to the wildcard defined IP address when saving records.',
action='store_true',
)
parser.add_argument('-a', help='Perform AXFR with standard enumeration.', action='store_true')
parser.add_argument(
'-s',
help='Perform a reverse lookup of IPv4 ranges in the SPF record with standard enumeration.',
action='store_true',
)
parser.add_argument(
'-b',
help='Perform Bing enumeration with standard enumeration.',
action='store_true',
)
parser.add_argument(
'-y',
help='Perform Yandex enumeration with standard enumeration.',
action='store_true',
)
parser.add_argument(
'-k',
help='Perform crt.sh enumeration with standard enumeration.',
action='store_true',
)
parser.add_argument(
'-w',
help='Perform deep whois record analysis and reverse lookup of IP ranges found through Whois when doing a standard enumeration.',
action='store_true',
)
parser.add_argument(
'-z',
help='Performs a DNSSEC zone walk with standard enumeration.',
action='store_true',
)
parser.add_argument(
'--threads',
type=int,
dest='threads',
help='Number of threads to use in reverse lookups, forward lookups, brute force and SRV record enumeration.',
)
parser.add_argument(
'--lifetime',
type=float,
dest='lifetime',
default=3.0,
help='Time to wait for a server to respond to a query. default is 3.0',
)
parser.add_argument(
'--loglevel',
type=str,
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO',
help='Log level to use. default is INFO',
)
parser.add_argument(
'--tcp',
dest='tcp',
help='Use TCP protocol to make queries.',
action='store_true',
)
parser.add_argument('--db', type=str, dest='db', help='SQLite 3 file to save found records.')
parser.add_argument('-x', '--xml', type=str, dest='xml', help='XML file to save found records.')
parser.add_argument(
'-c',
'--csv',
type=str,
dest='csv',
help='Save output to a comma separated value file.',
)
parser.add_argument('-j', '--json', type=str, dest='json', help='save output to a JSON file.')
parser.add_argument(
'--iw',
help='Continue brute forcing a domain even if a wildcard record is discovered.',
action='store_true',
)
parser.add_argument(
'--disable_check_nxdomain', help='Disables check for NXDOMAIN hijacking on name servers.', action='store_true'
)
parser.add_argument(
'--disable_check_recursion',
help='Disables check for recursion on name servers',
action='store_true',
)
parser.add_argument(
'--disable_check_bindversion',
help='Disables check for BIND version on name servers',
action='store_true',
)
parser.add_argument('-V', '--version', help='DNSrecon version', action='store_true')
parser.add_argument('-v', '--verbose', help='Enable verbosity', action='store_true')
parser.add_argument(
'-t',
'--type',
type=str,
dest='type',
help="""Type of enumeration to perform.
Possible types:
std: SOA, NS, A, AAAA, MX and SRV.
rvl: Reverse lookup of a given CIDR or IP range.
brt: Brute force domains and hosts using a given dictionary.
srv: SRV records.
axfr: Test all NS servers for a zone transfer.
bing: Perform Bing search for subdomains and hosts.
yand: Perform Yandex search for subdomains and hosts.
crt: Perform crt.sh search for subdomains and hosts.
snoop: Perform cache snooping against all NS servers for a given domain, testing
all with file containing the domains, file given with -D option.
tld: Remove the TLD of given domain and test against all TLDs registered in IANA.
zonewalk: Perform a DNSSEC zone walk using NSEC records.""",
)
arguments = parser.parse_args()
logger.remove()
logger.add(sys.stderr, format='{time} {level} {message}', level=arguments.loglevel)
logger.add('~/.config/dnsrecon/dnsrecon.log', rotation='100 MB', compression='tar.gz')
except SystemExit:
# Handle exit() from passing --help
raise
except ArgumentError as e:
logger.error(f'Wrong Option Provided!: {e}')
parser.print_help()
sys.exit(1)
# Ensure that both --domain and --input-list are not used simultaneously
if arguments.domain and arguments.input_list:
logger.error('Cannot specify both --domain and --input-list options simultaneously.')
sys.exit(1)
# if no arguments have been provided,
# we exit and print program usage
if not len(sys.argv) > 1:
parser.print_usage()
sys.exit(0)
# a "map" that specifies if a type of scan needs
# the domain and the dictionary
type_map = {
'axfr': {'domain': True, 'dictionary': False},
'std': {'domain': True, 'dictionary': False},
'srv': {'domain': True, 'dictionary': False},
'tld': {'domain': True, 'dictionary': False},
'bing': {'domain': True, 'dictionary': False},
'yand': {'domain': True, 'dictionary': False},
'crt': {'domain': True, 'dictionary': False},
'rvl': {'domain': False, 'dictionary': False},
'zonewalk': {'domain': True, 'dictionary': False},
'brt': {'domain': True, 'dictionary': True},
'snoop': {'domain': False, 'dictionary': True},
}
valid_types = type_map.keys()
#
# Parse options
#
# if user requests tool version, we print it and exit
if arguments.version:
print(f'DNSRecon version {__version__} https://www.darkoperator.com')
sys.exit(0)
# validating type param which is in the form: type1,type2,...,typeN
# if the pattern is not correct or if there is an unknown type we exit
type_arg = arguments.type
types = []
if type_arg:
type_arg = type_arg.lower().strip()
# we create a dynamic regex specifying min and max type length
# and max number of possible scan types
min_type_len = len(min(valid_types, key=len))
max_type_len = len(max(valid_types, key=len))
type_len = len(valid_types)
dynamic_regex = f'^([a-z]{{{min_type_len},{max_type_len}}},?){{,{type_len}}}$'
type_match = re.match(dynamic_regex, type_arg)
if not type_match:
logger.error('This type of scan is not valid')
sys.exit(1)
incorrect_types = [t for t in type_arg.split(',') if t not in valid_types]
if incorrect_types:
incorrect_types_str = ','.join(incorrect_types)
logger.error(f'This type of scan is not in the list: {incorrect_types_str}')
sys.exit(1)
types = list(set(type_arg.split(',')))
# validating range
rvl_ip_list = []
if arguments.range:
rvl_ip_list = process_range(arguments.range)
# if the provided range is not valid, we exit
if not rvl_ip_list:
logger.error('Invalid Address/CIDR or Address Range provided.')
sys.exit(1)
# otherwise, we update a type list
if 'rvl' not in types:
types.append('rvl')
# Read list of domains if input_list is provided
domain_list = []
if arguments.input_list:
if not os.path.isfile(arguments.input_list):
logger.error(f"Input list file '{arguments.input_list}' does not exist.")
sys.exit(1)
with open(arguments.input_list) as f:
domain_list = [line.strip() for line in f if line.strip()]
if not domain_list:
logger.error(f"No domains found in the input list file '{arguments.input_list}'.")
sys.exit(1)
elif arguments.domain:
domain_list = [arguments.domain]
else:
# If no domain or input list is provided, exit
logger.error('A domain name or an input list of domains is required.')
sys.exit(1)
# if types are empty but domain_list is not, default to 'std'
if not types and domain_list:
types = ['std']
# Check if the Dictionary file exists
dictionary_required = []
if types:
# combining the types and the type_map, we obtain
# dictionary_required, which is a list of bool
# where True means that a dictionary file is required
dictionary_required = [type_map[t]['dictionary'] for t in types]
else:
# Handle cases where types might not be defined
dictionary_required = [False]
dictionary = ''
if any(dictionary_required):
# we generate a list of possible dictionary files
dictionaries = ['/etc/dnsrecon/namelist.txt', str(DATA_DIR / 'namelist.txt')]
# if the user has provided a custom dictionary file,
# we insert it as the first entry of the list
if arguments.dictionary:
arguments.dictionary = arguments.dictionary.strip()
dictionaries.insert(0, arguments.dictionary)
else:
logger.info('No dictionary file has been specified.')
# we individuate the first valid dictionary file,
# among those in the list
for dict_ in dictionaries:
if os.path.isfile(dict_):
dictionary = dict_
break
# if we don't have a valid dictionary file, we exit
if not dictionary:
logger.error('No valid dictionary files have been specified or found within the tool')
sys.exit(1)
dict_type = 'user' if arguments.dictionary == dictionary else 'tool'
logger.info(f'Using the dictionary file: {dictionary} (provided by {dict_type})')
if arguments.threads:
thread_num = int(arguments.threads)
request_timeout = float(arguments.lifetime)
output_file = arguments.xml
results_db = arguments.db
csv_file = arguments.csv
json_file = arguments.json
# this flag summarizes if the program has to output
do_output = bool(output_file or results_db or csv_file or json_file)
verbose = arguments.verbose
ignore_wildcardrr = arguments.iw
CONFIG['disable_check_recursion'] = arguments.disable_check_recursion
CONFIG['disable_check_bindversion'] = arguments.disable_check_bindversion
xfr = arguments.a
bing = arguments.b
yandex = arguments.y
do_crt = arguments.k
do_whois = arguments.w
zonewalk = arguments.z
spf_enum = arguments.s
wildcard_filter = arguments.f
proto = 'tcp' if arguments.tcp else 'udp'
# Initialize an empty list to hold all records
all_returned_records = []
# Iterate over each domain and perform enumeration
for domain in domain_list:
logger.info(f'Starting enumeration for domain: {domain}')
# Initialize the resolver for the current domain
res = DnsHelper(domain, ns_server, request_timeout, proto)
scan_info = [' '.join(sys.argv), str(datetime.datetime.now())]
for type_ in types:
# Check if the scan type requires a domain
if type_map[type_]['domain'] and not domain:
logger.error(f'{type_}: No Domain to target specified!')
sys.exit(1)
try:
# Perform the scan based on type_
if type_ == 'axfr':
zonercds = res.zone_transfer()
if not zonercds:
logger.error(f'{type_}: No records were returned.')
continue
all_returned_records.extend(zonercds)
elif type_ == 'std':
logger.info(f'{type_}: Performing General Enumeration against: {domain}...')
std_enum_records = general_enum(
res,
domain,
xfr,
bing,
yandex,
spf_enum,
do_whois,
do_crt,
zonewalk,
request_timeout,
thread_num=thread_num,
)
if do_output and std_enum_records:
all_returned_records.extend(std_enum_records)
elif type_ == 'rvl':
if not rvl_ip_list:
logger.error(f'{type_}: Invalid Address/CIDR or Address Range provided.')
continue
rvl_enum_records = brute_reverse(res, rvl_ip_list, verbose, thread_num=thread_num)
if do_output:
all_returned_records.extend(rvl_enum_records)
elif type_ == 'brt':
logger.info(f'{type_}: Performing host and subdomain brute force against {domain}...')
brt_enum_records = brute_domain(
res,
dictionary,
domain,
wildcard_filter,
verbose,
ignore_wildcardrr,
thread_num=thread_num,
)
if do_output and brt_enum_records:
all_returned_records.extend(brt_enum_records)
elif type_ == 'srv':
logger.info(f'{type_}: Enumerating Common SRV Records against {domain}...')
srv_enum_records = brute_srv(res, domain, verbose, thread_num=thread_num)
if do_output:
all_returned_records.extend(srv_enum_records)
elif type_ == 'tld':
logger.info(f'{type_}: Performing TLD Brute force Enumeration against {domain}...')
tld_enum_records = brute_tlds(res, domain, verbose, thread_num=thread_num)
if do_output:
all_returned_records.extend(tld_enum_records)
elif type_ == 'bing':
logger.info(f'{type_}: Performing Bing Search Enumeration against {domain}...')
bing_enum_records = se_result_process(res, scrape_bing(domain))
if bing_enum_records is not None and do_output:
all_returned_records.extend(bing_enum_records)
elif type_ == 'yand':
logger.info(f'{type_}: Performing Yandex Search Enumeration against {domain}...')
yandex_enum_records = se_result_process(res, scrape_yandex(domain))
if yandex_enum_records is not None and do_output:
all_returned_records.extend(yandex_enum_records)
elif type_ == 'crt':
logger.info(f'{type_}: Performing Crt.sh Search Enumeration against {domain}...')
crt_enum_records = se_result_process(res, scrape_crtsh(domain))
if crt_enum_records is not None and do_output:
all_returned_records.extend(crt_enum_records)
else:
print('[-] No records returned from crt.sh enumeration')
elif type_ == 'snoop':
if not (dictionary and ns_server):
logger.error(f'{type_}: A dictionary file and at least one Name Server have to be specified!')
continue
logger.info(f'{type_}: Performing Cache Snooping against NS Server: {ns_server[0]}...')
cache_enum_records = in_cache(res, dictionary, ns_server[0])
if do_output:
all_returned_records.extend(cache_enum_records)
elif type_ == 'zonewalk':
zonewalk_result = ds_zone_walk(res, domain, request_timeout)
if do_output:
all_returned_records.extend(zonewalk_result)
else:
logger.error(f'{type_}: This type of scan is not in the list.')
except dns.resolver.NXDOMAIN:
logger.error(f'Could not resolve domain: {domain}')
continue # Continue with the next domain
except dns.exception.Timeout:
logger.error(
f"""A timeout error occurred.
Please make sure you can reach the target DNS Servers directly and requests are not being filtered.
Increase the timeout from {request_timeout} seconds to a higher number with --lifetime <time> option."""
)
continue # Continue with the next domain
logger.info(f'Completed enumeration for domain: {domain}\n')
# After processing all domains, handle output
if do_output:
# XML Output
if output_file:
logger.info(f'Saving records to XML file: {output_file}')
xml_enum_doc = dns_record_from_dict(all_returned_records, scan_info, domain_list)
write_to_file(xml_enum_doc, output_file)
# SQLite DB Output
if results_db:
logger.info(f'Saving records to SQLite3 file: {results_db}')
create_db(results_db)
write_db(results_db, all_returned_records)
# CSV Output
if csv_file:
logger.info(f'Saving records to CSV file: {csv_file}')
write_to_file(make_csv(all_returned_records), csv_file)
# JSON Output
if json_file:
logger.info(f'Saving records to JSON file: {json_file}')
write_json(json_file, all_returned_records, scan_info)
sys.exit(0)
| 72,440
|
Python
|
.py
| 1,706
| 31
| 159
| 0.553845
|
darkoperator/dnsrecon
| 2,582
| 529
| 13
|
GPL-2.0
|
9/5/2024, 5:10:01 PM (Europe/Amsterdam)
|
6,633
|
yandexenum.py
|
darkoperator_dnsrecon/dnsrecon/lib/yandexenum.py
|
#!/usr/bin/env python3
# Copyright (C) 2020 Cristiano Maruti (twitter: @cmaruti)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; Applies version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import re
import time
import urllib
import urllib.request
from loguru import logger
__name__ = 'yandexenum'
url_opener = urllib.request.FancyURLopener
class AppURLopener(url_opener):
version = """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36
(KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246"""
def scrape_yandex(dom):
"""
Function for enumerating sub-domains and hosts by scraping Bing.
"""
results = []
searches = ['1', '2', '3', '4', '5', '10', '20', '30']
urllib._urlopener = AppURLopener()
for _ in searches:
url = 'https://yandex.com/search/?text=site%3A' + dom
try:
sock = urllib.request.urlopen(url, timeout=10)
data = sock.read().decode('utf-8')
sock.close()
except Exception as e:
logger.error(e)
return []
if re.search('enter_captcha_value', data):
logger.error("Yandex has detected the search as 'bot activity, stopping search...")
return unique(results)
results.extend(re.findall(r'([a-zA-Z0-9\-\.]+' + dom + ')/?', data))
time.sleep(10)
return unique(results)
def unique(seq, idfun=repr):
"""
Function to remove duplicates in an array. Returns array with duplicates
removed.
"""
seen = {}
return [seen.setdefault(idfun(e), e) for e in seq if idfun(e) not in seen]
| 2,215
|
Python
|
.py
| 54
| 35.796296
| 95
| 0.664026
|
darkoperator/dnsrecon
| 2,582
| 529
| 13
|
GPL-2.0
|
9/5/2024, 5:10:01 PM (Europe/Amsterdam)
|
6,634
|
crtenum.py
|
darkoperator_dnsrecon/dnsrecon/lib/crtenum.py
|
# Copyright (C) 2010 Carlos Perez
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; Applies version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from urllib.error import HTTPError, URLError
from urllib.request import Request, urlopen
from loguru import logger
from lxml import etree
__name__ = 'crtenum'
def scrape_crtsh(dom):
"""
Function for enumerating subdomains by scraping crt.sh.
"""
results = []
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.3'
}
url = f'https://crt.sh/?q=%25.{dom}'
req = Request(url=url, headers=headers)
try:
resp = urlopen(req, timeout=30)
data = resp.read()
except HTTPError as e:
logger.error(f'Bad http status from crt.sh: "{e.code}"')
return results
except URLError as e:
logger.error(f'Connection with crt.sh failed. Reason: "{e.reason}"')
return results
root = etree.HTML(data)
tbl = root.xpath('//table/tr/td/table/tr/td[5]')
if len(tbl) < 1:
logger.error('Certificates for subdomains not found')
return results
for ent in tbl:
sub_dom = ent.text
if not sub_dom.endswith('.' + dom):
continue
if sub_dom.startswith('*.'):
logger.info(f'\t {sub_dom} wildcard')
continue
if sub_dom not in results:
results.append(sub_dom)
return results
| 2,072
|
Python
|
.py
| 53
| 33.830189
| 134
| 0.667496
|
darkoperator/dnsrecon
| 2,582
| 529
| 13
|
GPL-2.0
|
9/5/2024, 5:10:01 PM (Europe/Amsterdam)
|
6,635
|
dnshelper.py
|
darkoperator_dnsrecon/dnsrecon/lib/dnshelper.py
|
#!/usr/bin/env python3
# Copyright (C) 2020 Carlos Perez
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; Applies version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import random
import socket
import dns.message
import dns.query
import dns.resolver
import dns.reversename
from dns.dnssec import algorithm_to_text
from dns.zone import *
from loguru import logger
DNS_PORT_NUMBER = 53
DNS_QUERY_TIMEOUT = 4.0
def strip_last_dot(addr_):
"""
Util function that strips the last dot from an address (if any)
"""
return addr_[:-1] if addr_.endswith('.') else addr_
class DnsHelper:
def __init__(self, domain, ns_server=None, request_timeout=3.0, proto='tcp'):
self._domain = domain
self._proto = proto
self._is_tcp = proto == 'tcp'
configure = not ns_server
self._res = dns.resolver.Resolver(configure=configure)
if ns_server:
if isinstance(ns_server, str):
ns_server = [ns_server]
self._res.nameservers = ns_server
if len(ns_server) > 1:
self._res.rotate = True
# Set timing
self._res.timeout = request_timeout
self._res.lifetime = request_timeout
def check_tcp_dns(self, address):
"""
Function to check if a server is listening at port 53 TCP. This will aid
in IDS/IPS detection since a AXFR will not be tried if port 53 is found to
be closed.
"""
try:
sock = socket.socket()
sock.settimeout(DNS_QUERY_TIMEOUT)
sock.connect((address, DNS_PORT_NUMBER))
except Exception:
return False
return True
def get_answers(self, type_, addr_):
"""
Function that wraps the resolve() function with all the specific
exceptions it could raise and the socket.error exception
https://dnspython.readthedocs.io/en/latest/resolver-class.html#dns.resolver.Resolver.resolve
"""
try:
return self._res.resolve(addr_, type_, tcp=self._is_tcp)
except (
OSError,
dns.exception.Timeout,
dns.resolver.NXDOMAIN,
dns.resolver.YXDOMAIN,
dns.resolver.NoAnswer,
dns.resolver.NoNameservers,
dns.name.EmptyLabel,
):
return None
def resolve(self, target, type_, ns=None):
"""
Function for performing general resolution types returning the RDATA
"""
configure = not ns
res = dns.resolver.Resolver(configure=configure)
if ns:
res.nameservers = [ns]
answers = res.query(target, type_, tcp=self._is_tcp)
return answers
def query(
self,
q,
where,
timeout=None,
port=53,
af=None,
source=None,
source_port=0,
one_rr_per_rrset=False,
):
if isinstance(where, list):
random.shuffle(where)
target_server = where[0]
else:
target_server = where
if self._is_tcp:
return dns.query.tcp(
q,
target_server,
timeout,
port,
af,
source,
source_port,
one_rr_per_rrset,
)
else:
return dns.query.udp(
q,
target_server,
timeout,
port,
af,
source,
source_port,
False,
one_rr_per_rrset,
)
def get_a(self, host_trg):
"""
Function for resolving the A Record for a given host. Returns an Array of
the IP Address it resolves to. It will also return CNAME data.
"""
answers = self.get_answers('A', host_trg)
if not answers:
return []
result = []
for answer in answers.response.answer:
for rdata in answer:
if rdata.rdtype == 5:
target_ = strip_last_dot(rdata.target.to_text())
result.append(['CNAME', host_trg, target_])
host_trg = target_
else:
result.append(['A', host_trg, rdata.address])
return result
def get_aaaa(self, host_trg):
"""
Function for resolving the AAAA Record for a given host. Returns an Array of
the IP Address it resolves to. It will also return CNAME data.
"""
answers = self.get_answers('AAAA', host_trg)
if not answers:
return []
result = []
for answer in answers.response.answer:
for rdata in answer:
if rdata.rdtype == 5:
target_ = strip_last_dot(rdata.target.to_text())
result.append(['CNAME', host_trg, target_])
host_trg = target_
else:
result.append(['AAAA', host_trg, rdata.address])
return result
def get_ip(self, hostname):
"""
Function resolves a host name to its given A and/or AAAA record.
Returns Array of found hosts and IPv4 or IPv6 Address.
"""
found_ip_add = []
found_ip_add.extend(self.get_a(hostname))
found_ip_add.extend(self.get_aaaa(hostname))
return found_ip_add
def get_mx(self):
"""
Function for MX Record resolving. Returns all MX records. Returns also the IP
address of the host both in IPv4 and IPv6. Returns an Array
"""
answers = self.get_answers('MX', self._domain)
if not answers:
return []
answer_types = ['A', 'AAAA']
result = []
for answer_type in answer_types:
for answer in answers:
exchange_ = strip_last_dot(answer.exchange.to_text())
a_or_aaaa_answers = self.get_answers(answer_type, exchange_)
if not a_or_aaaa_answers:
continue
for a_or_aaaa_answer in a_or_aaaa_answers:
result.append(['MX', exchange_, a_or_aaaa_answer.address, answer.preference])
return result
def get_ns(self):
"""
Function for NS Record resolving. Returns all NS records. Returns also the IP
address of the host both in IPv4 and IPv6. Returns an Array.
"""
answers = self.get_answers('NS', self._domain)
if not answers:
return []
result = []
for answer in answers:
target_ = strip_last_dot(answer.target.to_text())
addresses = self.get_ip(target_)
for type_, name_, addr_ in addresses:
if type_ in ['A', 'AAAA']:
result.append(['NS', target_, addr_])
return result
def get_soa(self):
"""
Function for SOA Record resolving. Returns all SOA records. Returns also the IP
address of the host both in IPv4 and IPv6. Returns an Array.
"""
queryfunc = dns.query.tcp if self._is_tcp else dns.query.udp
try:
querymsg = dns.message.make_query(self._domain, dns.rdatatype.SOA)
response = queryfunc(querymsg, self._res.nameservers[0], self._res.timeout)
except (
OSError,
dns.exception.Timeout,
dns.resolver.NXDOMAIN,
dns.resolver.YXDOMAIN,
dns.resolver.NoAnswer,
dns.resolver.NoNameservers,
dns.query.BadResponse,
) as e:
logger.error(f'Exception "{e}" while resolving SOA record.')
logger.error(f'Error while resolving SOA while using {self._res.nameservers[0]} as nameserver.')
return []
# ~ we consider both response sections
sections = []
if len(response.authority) > 0:
sections.append(response.authority)
if len(response.answer) > 0:
sections.append(response.answer)
else:
return []
result = []
record_types = ['A', 'AAAA']
for section in sections:
for record in section:
if not isinstance(record[0], dns.rdtypes.ANY.SOA.SOA):
continue
mname_ = strip_last_dot(record[0].mname.to_text())
for record_type in record_types:
a_or_aaaa_answers = self.get_answers(record_type, mname_)
if not a_or_aaaa_answers:
continue
for a_or_aaaa_answer in a_or_aaaa_answers:
result.append(['SOA', mname_, a_or_aaaa_answer.address])
return result
def get_spf(self):
"""
Function for SPF Record resolving returns the string with the SPF definition.
Prints the string for the SPF Record and Returns the string
"""
answers = self.get_answers('SPF', self._domain)
if not answers:
return []
result = []
for answer in answers:
strings_ = bytes.join(b'', answer.strings).decode('utf-8', errors='ignore')
result.append(['SPF', strings_])
return result
def get_txt(self, target=None):
"""
Function for TXT Record resolving returns the string.
"""
if target is None:
target = self._domain
targets = [target, '_dmarc.' + target]
result = []
for target_ in targets:
answers = self.get_answers('TXT', target_)
if not answers:
continue
for answer in answers:
strings_ = bytes.join(b'', answer.strings).decode('utf-8', errors='ignore')
result.append(['TXT', target_, strings_])
return result
def get_ptr(self, ipaddress):
"""
Function for resolving PTR Record given it's IPv4 or IPv6 Address.
"""
reversename_ = dns.reversename.from_address(ipaddress)
answers = self.get_answers('PTR', reversename_)
if not answers:
return []
result = []
for answer in answers:
target_ = strip_last_dot(answer.target.to_text())
result.append(['PTR', target_, ipaddress])
return result
def get_srv(self, host):
"""
Function for resolving SRV Records.
"""
answers = self.get_answers('SRV', host)
if not answers:
return []
result = []
for answer in answers:
target_ = strip_last_dot(answer.target.to_text())
a_or_aaaa_answers = self.get_ip(target_)
for type_, hostname_, addr_ in a_or_aaaa_answers:
if type_ in ['A', 'AAAA']:
result.append(
[
'SRV',
host,
target_,
addr_,
str(answer.port),
str(answer.weight),
]
)
return result
def get_nsec(self, host):
"""
Function for querying for a NSEC record and retrieving the rdata object.
This function is used mostly for performing a Zone Walk against a zone.
"""
return self.get_answers('NSEC', host)
def from_wire(self, xfr, zone_factory=Zone, relativize=True):
"""
Method for turning returned data from a DNS AXFR in to RRSET, this method will not perform a
check origin on the zone data as the method included with dnspython
"""
z = None
for r in xfr:
if z is None:
if relativize:
origin = r.origin
else:
origin = r.answer[0].name
rdclass = r.answer[0].rdclass
z = zone_factory(origin, rdclass, relativize=relativize)
for rrset in r.answer:
znode = z.nodes.get(rrset.name)
if not znode:
znode = z.node_factory()
z.nodes[rrset.name] = znode
zrds = znode.find_rdataset(rrset.rdclass, rrset.rdtype, rrset.covers, True)
zrds.update_ttl(rrset.ttl)
for rd in rrset:
try:
rd.choose_relativity(z.origin, relativize)
except AttributeError:
pass
zrds.add(rd)
return z
def zone_transfer(self):
"""
Function for testing for zone transfers for a given Domain, it will parse the
output by record type.
"""
# if anyone reports a record not parsed, I will add it; the list is long
# I tried to include those I thought where the most common.
zone_records = []
ns_records = []
logger.info(f'Checking for Zone Transfer for {self._domain} name servers')
# Find SOA for Domain
logger.info('Resolving SOA Record')
try:
soa_srvs = self.get_soa()
for type_, name_, addr_ in soa_srvs:
logger.info(f'\t {type_} {name_} {addr_}')
ns_records.append(addr_)
except Exception:
logger.error('Could not obtain the domains SOA Record.')
return
# Find NS for Domain
logger.info('Resolving NS Records')
try:
ns_srvs = []
ns_srvs = self.get_ns()
logger.info('NS Servers found:')
for type_, name_, addr_ in ns_srvs:
logger.info(f'\t {type_} {name_} {addr_}')
ns_records.append(addr_)
except Exception as e:
logger.error(f'Could not Resolve NS Records: {e}')
# Remove duplicates
logger.info('Removing any duplicate NS server IP Addresses...')
ns_records = list(set(ns_records))
# Test each NS Server
for ns_srv in ns_records:
logger.info(' ')
logger.info(f'Trying NS server {ns_srv}')
if not self.check_tcp_dns(ns_srv):
logger.error(f'Zone Transfer Failed for {ns_srv}!')
logger.error('Port 53 TCP is being filtered')
zone_records.append({'type': 'info', 'zone_transfer': 'failed', 'ns_server': ns_srv})
continue
logger.info(f'{ns_srv} Has port 53 TCP Open')
try:
zone = self.from_wire(dns.query.xfr(ns_srv, self._domain))
logger.info('Zone Transfer was successful!!')
zone_records.append({'type': 'info', 'zone_transfer': 'success', 'ns_server': ns_srv})
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.SOA):
for rdata in rdataset:
mname = strip_last_dot(rdata.mname.to_text())
for type_, name_, addr_ in self.get_ip(mname):
if type_ in ['A', 'AAAA']:
logger.info(f'\t SOA {mname} {addr_}')
zone_records.append(
{
'zone_server': ns_srv,
'type': 'SOA',
'mname': mname,
'address': addr_,
}
)
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.NS):
for rdata in rdataset:
# Check if target is only the host name or a full FQDN.
# If only a hostname we will append the domain name of the
# Zone being transfered.
target = rdata.target.to_text()
if target.count('.') == 0:
target = target + '.' + self._domain
else:
target = strip_last_dot(target)
for type_, name_, addr_ in self.get_ip(target):
if type_ in ['A', 'AAAA']:
logger.info(f'\t NS {target} {addr_}')
zone_records.append(
{
'zone_server': ns_srv,
'type': 'NS',
'target': target,
'address': addr_,
}
)
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.TXT):
for rdata in rdataset:
s = '; '.join([string.decode() for string in rdata.strings])
logger.info(f'\t TXT {s}')
zone_records.append({'zone_server': ns_srv, 'type': 'TXT', 'strings': s})
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.SPF):
for rdata in rdataset:
s = '; '.join([string.decode() for string in rdata.strings])
logger.info(f'\t SPF {s}')
zone_records.append({'zone_server': ns_srv, 'type': 'SPF', 'strings': s})
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.PTR):
for rdata in rdataset:
target = rdata.target.to_text() + '.' + self._domain
for type_, name_, addr_ in self.get_ip(target):
if type_ in ['A', 'AAAA']:
logger.info(f'\t PTR {target} {addr_}')
zone_records.append(
{
'zone_server': ns_srv,
'type': 'PTR',
'name': target,
'address': addr_,
}
)
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.MX):
for rdata in rdataset:
exchange = strip_last_dot(rdata.exchange.to_text())
for type_, name_, addr_ in self.get_ip(exchange):
fqdn_ = str(name) + '.' + self._domain
if type_ in ['A', 'AAAA']:
logger.info(f'\t MX {fqdn_} {exchange} {addr_}')
zone_records.append(
{
'zone_server': ns_srv,
'type': 'MX',
'name': fqdn_,
'exchange': exchange,
'address': addr_,
}
)
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.AAAA):
fqdn_ = str(name) + '.' + self._domain
for rdata in rdataset:
logger.info(f'\t AAAA {fqdn_} {rdata.address}')
zone_records.append(
{
'zone_server': ns_srv,
'type': 'AAAA',
'name': fqdn_,
'address': rdata.address,
}
)
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.A):
fqdn_ = str(name) + '.' + self._domain
for rdata in rdataset:
logger.info(f'\t A {fqdn_} {rdata.address}')
zone_records.append(
{
'zone_server': ns_srv,
'type': 'A',
'name': fqdn_,
'address': rdata.address,
}
)
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.CNAME):
fqdn_ = str(name) + '.' + self._domain
for rdata in rdataset:
target = strip_last_dot(rdata.target.to_text())
for type_, name_, addr_ in self.get_ip(target):
if type_ in ['A', 'AAAA']:
logger.info(f'\t CNAME {fqdn_} {target} {addr_}')
zone_records.append(
{
'zone_server': ns_srv,
'type': 'CNAME',
'name': fqdn_,
'target': target,
'address': addr_,
}
)
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.SRV):
fqdn_ = str(name) + '.' + self._domain
for rdata in rdataset:
target = strip_last_dot(rdata.target.to_text())
weight_ = str(rdata.weight)
port_ = str(rdata.port)
ip_list = self.get_ip(rdata.target.to_text())
if not ip_list:
logger.info(f'\t SRV {fqdn_} {target} {port_} {weight_} no_ip')
zone_records.append(
{
'zone_server': ns_srv,
'type': 'SRV',
'name': fqdn_,
'target': target,
'address': 'no_ip',
'port': port_,
'weight': weight_,
}
)
continue
for type_, name_, addr_ in ip_list:
if type_ in ['A', 'AAAA']:
logger.info(f'\t SRV {fqdn_} {target} {port_} {weight_} {addr_}')
zone_records.append(
{
'zone_server': ns_srv,
'type': 'SRV',
'name': fqdn_,
'target': target,
'address': addr_,
'port': port_,
'weight': weight_,
}
)
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.HINFO):
for rdata in rdataset:
cpu_ = rdata.cpu.decode()
os_ = rdata.os.decode()
logger.info(f'\t HINFO {cpu_} {os_}')
zone_records.append(
{
'zone_server': ns_srv,
'type': 'HINFO',
'cpu': cpu_,
'os': os_,
}
)
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.WKS):
for rdata in rdataset:
addr_ = rdata.address
bitmap_ = rdata.bitmap
proto_ = rdata.protocol
logger.info(f'\t WKS {addr_} {bitmap_} {proto_}')
zone_records.append(
{
'zone_server': ns_srv,
'type': 'WKS',
'address': addr_,
'bitmap': bitmap_,
'protocol': proto_,
}
)
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.RP):
for rdata in rdataset:
mbox_ = rdata.mbox.to_text()
txt_ = rdata.txt.to_text()
logger.info(f'\t RP {mbox_} {txt_}')
zone_records.append(
{
'zone_server': ns_srv,
'type': 'RP',
'mbox': mbox_,
'txt': txt_,
}
)
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.AFSDB):
for rdata in rdataset:
subtype_ = str(rdata.subtype)
hostname_ = rdata.hostname.to_text()
logger.info(f'\t AFSDB {subtype_} {hostname_}')
zone_records.append(
{
'zone_server': ns_srv,
'type': 'AFSDB',
'subtype': subtype_,
'hostname': hostname_,
}
)
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.LOC):
for rdata in rdataset:
coordinates_ = rdata.to_text()
logger.info(f'\t LOC {coordinates_}')
zone_records.append(
{
'zone_server': ns_srv,
'type': 'LOC',
'coordinates': coordinates_,
}
)
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.X25):
for rdata in rdataset:
addr_ = rdata.address
logger.info(f'\t X25 {addr_}')
zone_records.append({'zone_server': ns_srv, 'type': 'X25', 'address': addr_})
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.ISDN):
for rdata in rdataset:
addr_ = rdata.address
logger.info(f'\t ISDN {addr_}')
zone_records.append({'zone_server': ns_srv, 'type': 'ISDN', 'address': addr_})
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.RT):
for rdata in rdataset:
addr_ = rdata.address
exchange = strip_last_dot(rdata.exchange.to_text())
pref_ = str(rdata.preference)
logger.info(f'\t RT {exchange} {pref_}')
zone_records.append(
{
'zone_server': ns_srv,
'type': 'X25',
'address': addr_,
'preference': pref_,
}
)
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.NSAP):
for rdata in rdataset:
addr_ = rdata.address
logger.info(f'\t NSAP {addr_}')
zone_records.append({'zone_server': ns_srv, 'type': 'NSAP', 'address': addr_})
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.NAPTR):
for rdata in rdataset:
flags_ = rdata.flags.decode()
order_ = str(rdata.order)
pref_ = str(rdata.preference)
regexp_ = rdata.regexp.decode()
replacement_ = rdata.replacement.to_text()
service_ = rdata.service.decode()
logger.info(f'\t NAPTR {flags_} {order_} {pref_} {regexp_} {replacement_} {service_}')
zone_records.append(
{
'zone_server': ns_srv,
'type': 'NAPTR',
'order': order_,
'preference': pref_,
'regex': regexp_,
'replacement': replacement_,
'service': service_,
}
)
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.CERT):
for rdata in rdataset:
algo_ = algorithm_to_text(rdata.algorithm)
cert_ = rdata.certificate
cert_type_ = rdata.certificate_type
key_tag_ = rdata.key_tag
logger.info(f'\t CERT {rdata.to_text()}')
zone_records.append(
{
'zone_server': ns_srv,
'type': 'CERT',
'algorithm': algo_,
'certificate': cert_,
'certificate_type': cert_type_,
'key_tag': key_tag_,
}
)
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.SIG):
for rdata in rdataset:
algo_ = algorithm_to_text(rdata.algorithm)
expiration_ = rdata.expiration
inception_ = (rdata.inception,)
key_tag_ = rdata.key_tag
labels_ = rdata.labels
original_ttl_ = rdata.original_ttl
signature_ = rdata.signature
signer_ = str(rdata.signer)
type_covered_ = rdata.type_covered
logger.info(
f'\t SIG {algo_} {expiration_} {inception_} {key_tag_} {labels_} {original_ttl_} {signature_} {signer_} {type_covered_}'
)
zone_records.append(
{
'zone_server': ns_srv,
'type': 'SIG',
'algorithm': algo_,
'expiration': expiration_,
'inception': inception_,
'key_tag': key_tag_,
'labels': labels_,
'original_ttl': original_ttl_,
'signature': signature_,
'signer': signer_,
'type_covered': type_covered_,
}
)
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.RRSIG):
for rdata in rdataset:
algo_ = algorithm_to_text(rdata.algorithm)
expiration_ = rdata.expiration
inception_ = (rdata.inception,)
key_tag_ = rdata.key_tag
labels_ = rdata.labels
original_ttl_ = rdata.original_ttl
signature_ = rdata.signature
signer_ = str(rdata.signer)
type_covered_ = rdata.type_covered
logger.info(
f'\t RRSIG {algo_} {expiration_} {inception_} {key_tag_} {labels_} {original_ttl_} {signature_} {signer_} {type_covered_}'
)
zone_records.append(
{
'zone_server': ns_srv,
'type': 'RRSIG',
'algorithm': algo_,
'expiration': expiration_,
'inception': inception_,
'key_tag': key_tag_,
'labels': labels_,
'original_ttl': original_ttl_,
'signature': signature_,
'signer': signer_,
'type_covered': type_covered_,
}
)
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.DNSKEY):
for rdata in rdataset:
algo_ = algorithm_to_text(rdata.algorithm)
flags_ = rdata.flags
key_ = dns.rdata._hexify(rdata.key)
proto_ = rdata.protocol
logger.info(f'\t DNSKEY {algo_} {flags_} {key_} {proto_}')
zone_records.append(
{
'zone_server': ns_srv,
'type': 'DNSKEY',
'algorithm': algo_,
'flags': flags_,
'key': key_,
'protocol': proto_,
}
)
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.DS):
for rdata in rdataset:
algo_ = algorithm_to_text(rdata.algorithm)
digest_ = dns.rdata._hexify(rdata.digest)
digest_type_ = rdata.digest_type
key_tag_ = rdata.key_tag
logger.info(f'\t DS {algo_} {digest_} {digest_type_} {key_tag_}')
zone_records.append(
{
'zone_server': ns_srv,
'type': 'DS',
'algorithm': algo_,
'digest': digest_,
'digest_type': digest_type_,
'key_tag': key_tag_,
}
)
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.NSEC):
for rdata in rdataset:
next_ = rdata.next.to_text()
logger.info(f'\t NSEC {next_}')
zone_records.append({'zone_server': ns_srv, 'type': 'NSEC', 'next': next_})
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.NSEC3):
for rdata in rdataset:
algo_ = algorithm_to_text(rdata.algorithm)
flags_ = rdata.flags
iterations_ = rdata.iterations
salt_ = dns.rdata._hexify(rdata.salt)
logger.info(f'\t NSEC3 {algo_} {flags_} {iterations_} {salt_}')
zone_records.append(
{
'zone_server': ns_srv,
'type': 'NSEC3',
'algorithm': algo_,
'flags': flags_,
'iterations': iterations_,
'salt': salt_,
}
)
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.NSEC3PARAM):
for rdata in rdataset:
algo_ = algorithm_to_text(rdata.algorithm)
flags_ = rdata.flags
iterations_ = rdata.iterations
salt_ = dns.rdata._hexify(rdata.salt)
logger.info(f'\t NSEC3PARAM {algo_} {flags_} {iterations_} {salt_}')
zone_records.append(
{
'zone_server': ns_srv,
'type': 'NSEC3PARAM',
'algorithm': algo_,
'flags': flags_,
'iterations': iterations_,
'salt': salt_,
}
)
for name, rdataset in zone.iterate_rdatasets(dns.rdatatype.IPSECKEY):
for rdata in rdataset:
algo_ = algorithm_to_text(rdata.algorithm)
key_ = dns.rdata._hexify(rdata.key)
gw_ = rdata.gateway
gw_type_ = rdata.gateway_type
prec_ = rdata.precedence
logger.info(f'\t IPSECKEY {algo_} {gw_} {gw_type_} {key_} {prec_}')
zone_records.append(
{
'zone_server': ns_srv,
'type': 'IPSECKEY',
'algorithm': algo_,
'gateway': gw_,
'gateway_type': gw_type_,
'key': key_,
'precedence': prec_,
}
)
except Exception as e:
logger.error(f'Zone Transfer Failed ({e})')
zone_records.append({'type': 'info', 'zone_transfer': 'failed', 'ns_server': ns_srv})
return zone_records
| 38,944
|
Python
|
.py
| 833
| 26.409364
| 150
| 0.425977
|
darkoperator/dnsrecon
| 2,582
| 529
| 13
|
GPL-2.0
|
9/5/2024, 5:10:01 PM (Europe/Amsterdam)
|
6,636
|
bingenum.py
|
darkoperator_dnsrecon/dnsrecon/lib/bingenum.py
|
# Copyright (C) 2017 Cristiano Maruti (twitter: @cmaruti)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; Applies version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import re
import time
import urllib.request
__name__ = 'bingenum'
url_opener = urllib.request.FancyURLopener
class AppURLopener(url_opener):
version = 'Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)'
def scrape_bing(dom):
"""
Function for enumerating subdomains and hosts by scraping Bing.
"""
results = []
searches = [
'10',
'20',
'30',
'40',
'50',
'60',
'70',
'80',
'90',
'100',
'110',
'120',
'130',
'140',
'150',
]
urllib._urlopener = AppURLopener()
for n in searches:
url = 'https://www.bing.com/search?q=domain%3A' + dom + '&qs=n&first=' + n
req = urllib.request.Request(
url,
data=None,
headers={
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'
},
)
sock = urllib.request.urlopen(req, timeout=10)
data = sock.read().decode('utf-8')
results.extend(re.findall(r'([a-zA-Z0-9\-.]+' + dom + ')/?', data))
sock.close()
time.sleep(5)
return unique(results)
def unique(seq, idfun=repr):
"""
Function to remove duplicates in an array. Returns array with duplicates
removed.
"""
seen = {}
return [seen.setdefault(idfun(e), e) for e in seq if idfun(e) not in seen]
| 2,251
|
Python
|
.py
| 66
| 28.19697
| 151
| 0.620341
|
darkoperator/dnsrecon
| 2,582
| 529
| 13
|
GPL-2.0
|
9/5/2024, 5:10:01 PM (Europe/Amsterdam)
|
6,637
|
whois.py
|
darkoperator_dnsrecon/dnsrecon/lib/whois.py
|
# Copyright (C) 2010 Carlos Perez
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; Applies version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import re
import socket
from netaddr import *
__name__ = 'whois.py'
WHOIS_PORT_NUMBER = 43
WHOIS_RECEIVE_BUFFER_SIZE = 4096
def get_whois(ip_addrs):
"""
Function that returns what whois server is the one to be queried for
registration information, returns whois.arin.net is not in database, returns
None if private.
"""
whois_server = None
ip = IPAddress(ip_addrs)
info_of_ip = ip.info
if ip.version == 4 and ip.is_private() is False:
for i in info_of_ip['IPv4']:
whois_server = i['whois']
if len(whois_server) == 0 and i['status'] != 'Reserved':
whois_server = 'whois.arin.net'
elif len(whois_server) == 0:
whois_server = None
return whois_server
def whois(target, whois_srv):
"""
Performs a whois query against a arin.net for a given IP, Domain or Host as a
string and returns the answer of the query.
"""
response = ''
counter = 1
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((whois_srv, WHOIS_PORT_NUMBER))
if whois_srv == 'whois.arin.net':
s.send(('n ' + target + '\r\n').encode('utf-8'))
else:
s.send((target + '\r\n').encode('utf-8'))
response = ''
while True:
d = s.recv(WHOIS_RECEIVE_BUFFER_SIZE)
response += str(d)
counter += 1
if str(d) == '' or counter == 5:
break
s.close()
except Exception as e:
print(e)
return response
def get_whois_nets(data):
"""
Parses whois data and extracts the Network Ranges returning an array of lists
where each list has the starting and ending IP of the found range.
"""
pattern = r'([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}) - ([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})'
results = re.findall(pattern, data)
return results
def get_whois_orgname(data):
org_pattern = r'OrgName\:\s*(.*)\n'
result = re.findall(org_pattern, data)
# Let's try RIPENET Format
if not result:
org_pattern = r'netname\:\s*(.*)\n'
result = re.findall(org_pattern, data)
if not result:
result.append('Not Found')
return result
| 3,006
|
Python
|
.py
| 80
| 31.625
| 116
| 0.626804
|
darkoperator/dnsrecon
| 2,582
| 529
| 13
|
GPL-2.0
|
9/5/2024, 5:10:01 PM (Europe/Amsterdam)
|
6,638
|
tlds.py
|
darkoperator_dnsrecon/dnsrecon/lib/tlds.py
|
class TLDS:
@staticmethod
def generic_tlds():
gtld_extensions = [
'aaa',
'aarp',
'abarth',
'abb',
'abbott',
'abbvie',
'abc',
'able',
'abogado',
'abudhabi',
'academy',
'accenture',
'accountant',
'accountants',
'aco',
'active',
'actor',
'adac',
'ads',
'adult',
'aeg',
'aetna',
'afamilycompany',
'afl',
'africa',
'agakhan',
'agency',
'aig',
'aigo',
'airbus',
'airforce',
'airtel',
'akdn',
'alfaromeo',
'alibaba',
'alipay',
'allfinanz',
'allstate',
'ally',
'alsace',
'alstom',
'amazon',
'americanexpress',
'americanfamily',
'amex',
'amfam',
'amica',
'amsterdam',
'analytics',
'android',
'anquan',
'anz',
'aol',
'apartments',
'app',
'apple',
'aquarelle',
'arab',
'aramco',
'archi',
'army',
'art',
'arte',
'asda',
'associates',
'athleta',
'attorney',
'auction',
'audi',
'audible',
'audio',
'auspost',
'author',
'auto',
'autos',
'avianca',
'aws',
'axa',
'azure',
'baby',
'baidu',
'banamex',
'bananarepublic',
'band',
'bank',
'bar',
'barcelona',
'barclaycard',
'barclays',
'barefoot',
'bargains',
'baseball',
'basketball',
'bauhaus',
'bayern',
'bbc',
'bbt',
'bbva',
'bcg',
'bcn',
'beats',
'beauty',
'beer',
'bentley',
'berlin',
'best',
'bestbuy',
'bet',
'bharti',
'bible',
'bid',
'bike',
'bing',
'bingo',
'bio',
'black',
'blackfriday',
'blanco',
'blockbuster',
'blog',
'bloomberg',
'blue',
'bms',
'bmw',
'bnl',
'bnpparibas',
'boats',
'boehringer',
'bofa',
'bom',
'bond',
'boo',
'book',
'booking',
'boots',
'bosch',
'bostik',
'boston',
'bot',
'boutique',
'box',
'bradesco',
'bridgestone',
'broadway',
'broker',
'brother',
'brussels',
'budapest',
'bugatti',
'build',
'builders',
'business',
'buy',
'buzz',
'bzh',
'cab',
'cafe',
'cal',
'call',
'calvinklein',
'cam',
'camera',
'camp',
'cancerresearch',
'canon',
'capetown',
'capital',
'capitalone',
'car',
'caravan',
'cards',
'care',
'career',
'careers',
'cars',
'cartier',
'casa',
'case',
'caseih',
'cash',
'casino',
'catering',
'catholic',
'cba',
'cbn',
'cbre',
'cbs',
'ceb',
'center',
'ceo',
'cern',
'cfa',
'cfd',
'chanel',
'channel',
'charity',
'chase',
'chat',
'cheap',
'chintai',
'chloe',
'christmas',
'chrome',
'chrysler',
'church',
'cipriani',
'circle',
'cisco',
'citadel',
'citi',
'citic',
'city',
'cityeats',
'claims',
'cleaning',
'click',
'clinic',
'clinique',
'clothing',
'cloud',
'club',
'clubmed',
'coach',
'codes',
'coffee',
'college',
'cologne',
'com',
'comcast',
'commbank',
'community',
'company',
'compare',
'computer',
'comsec',
'condos',
'construction',
'consulting',
'contact',
'contractors',
'cooking',
'cookingchannel',
'cool',
'corsica',
'country',
'coupon',
'coupons',
'courses',
'cpa',
'credit',
'creditcard',
'creditunion',
'cricket',
'crown',
'crs',
'cruise',
'cruises',
'csc',
'cuisinella',
'cymru',
'cyou',
'dabur',
'dad',
'dance',
'data',
'date',
'dating',
'datsun',
'day',
'dclk',
'dds',
'deal',
'dealer',
'deals',
'degree',
'delivery',
'dell',
'deloitte',
'delta',
'democrat',
'dental',
'dentist',
'desi',
'design',
'dev',
'dhl',
'diamonds',
'diet',
'digital',
'direct',
'directory',
'discount',
'discover',
'dish',
'diy',
'dnp',
'docs',
'doctor',
'dodge',
'dog',
'doha',
'domains',
'doosan',
'dot',
'download',
'drive',
'dtv',
'dubai',
'duck',
'dunlop',
'duns',
'dupont',
'durban',
'dvag',
'dvr',
'earth',
'eat',
'eco',
'edeka',
'education',
'email',
'emerck',
'energy',
'engineer',
'engineering',
'enterprises',
'epost',
'epson',
'equipment',
'ericsson',
'erni',
'esq',
'estate',
'esurance',
'etisalat',
'eurovision',
'eus',
'events',
'everbank',
'exchange',
'expert',
'exposed',
'express',
'extraspace',
'fage',
'fail',
'fairwinds',
'faith',
'family',
'fan',
'fans',
'farm',
'farmers',
'fashion',
'fast',
'fedex',
'feedback',
'ferrari',
'ferrero',
'fiat',
'fidelity',
'fido',
'film',
'final',
'finance',
'financial',
'fire',
'firestone',
'firmdale',
'fish',
'fishing',
'fit',
'fitness',
'flickr',
'flights',
'flir',
'florist',
'flowers',
'flsmidth',
'fly',
'foo',
'food',
'foodnetwork',
'football',
'ford',
'forex',
'forsale',
'forum',
'foundation',
'fox',
'free',
'fresenius',
'frl',
'frogans',
'frontdoor',
'frontier',
'ftr',
'fujitsu',
'fujixerox',
'fun',
'fund',
'furniture',
'futbol',
'fyi',
'gal',
'gallery',
'gallo',
'gallup',
'game',
'games',
'gap',
'garden',
'gay',
'gbiz',
'gdn',
'gea',
'gent',
'genting',
'george',
'ggee',
'gift',
'gifts',
'gives',
'giving',
'glade',
'glass',
'gle',
'global',
'globo',
'gmail',
'gmbh',
'gmo',
'gmx',
'godaddy',
'gold',
'goldpoint',
'golf',
'goo',
'goodhands',
'goodyear',
'goog',
'google',
'gop',
'got',
'grainger',
'graphics',
'gratis',
'green',
'gripe',
'grocery',
'group',
'guardian',
'gucci',
'guge',
'guide',
'guitars',
'guru',
'hair',
'hamburg',
'hangout',
'haus',
'hbo',
'hdfc',
'hdfcbank',
'health',
'healthcare',
'help',
'helsinki',
'here',
'hermes',
'hgtv',
'hiphop',
'hisamitsu',
'hitachi',
'hiv',
'hkt',
'hockey',
'holdings',
'holiday',
'homedepot',
'homegoods',
'homes',
'homesense',
'honda',
'honeywell',
'horse',
'hospital',
'host',
'hosting',
'hot',
'hoteles',
'hotels',
'hotmail',
'house',
'how',
'hsbc',
'htc',
'hughes',
'hyatt',
'hyundai',
'ibm',
'icbc',
'ice',
'icu',
'ieee',
'ifm',
'iinet',
'ikano',
'imamat',
'imdb',
'immo',
'immobilien',
'inc',
'industries',
'infiniti',
'info',
'ing',
'ink',
'institute',
'insurance',
'insure',
'intel',
'international',
'intuit',
'investments',
'ipiranga',
'irish',
'iselect',
'ismaili',
'ist',
'istanbul',
'itau',
'itv',
'iveco',
'iwc',
'jaguar',
'java',
'jcb',
'jcp',
'jeep',
'jetzt',
'jewelry',
'jio',
'jlc',
'jll',
'jmp',
'jnj',
'joburg',
'jot',
'joy',
'jpmorgan',
'jprs',
'juegos',
'juniper',
'kaufen',
'kddi',
'kerryhotels',
'kerrylogistics',
'kerryproperties',
'kfh',
'kia',
'kids',
'kim',
'kinder',
'kindle',
'kitchen',
'kiwi',
'koeln',
'komatsu',
'kosher',
'kpmg',
'kpn',
'krd',
'kred',
'kuokgroup',
'kyoto',
'lacaixa',
'ladbrokes',
'lamborghini',
'lamer',
'lancaster',
'lancia',
'lancome',
'land',
'landrover',
'lanxess',
'lasalle',
'lat',
'latino',
'latrobe',
'law',
'lawyer',
'lds',
'lease',
'leclerc',
'lefrak',
'legal',
'lego',
'lexus',
'lgbt',
'liaison',
'lidl',
'life',
'lifeinsurance',
'lifestyle',
'lighting',
'like',
'lilly',
'limited',
'limo',
'lincoln',
'linde',
'link',
'lipsy',
'live',
'living',
'lixil',
'llc',
'llp',
'loan',
'loans',
'locker',
'locus',
'loft',
'lol',
'london',
'lotte',
'lotto',
'love',
'lpl',
'lplfinancial',
'ltd',
'ltda',
'lundbeck',
'lupin',
'luxe',
'luxury',
'macys',
'madrid',
'maif',
'maison',
'makeup',
'man',
'management',
'mango',
'map',
'market',
'marketing',
'markets',
'marriott',
'marshalls',
'maserati',
'mattel',
'mba',
'mcd',
'mcdonalds',
'mckinsey',
'med',
'media',
'meet',
'melbourne',
'meme',
'memorial',
'men',
'menu',
'meo',
'merckmsd',
'metlife',
'miami',
'microsoft',
'mini',
'mint',
'mit',
'mitsubishi',
'mlb',
'mls',
'mma',
'mobi',
'mobile',
'mobily',
'moda',
'moe',
'moi',
'mom',
'monash',
'money',
'monster',
'montblanc',
'mopar',
'mormon',
'mortgage',
'moscow',
'moto',
'motorcycles',
'mov',
'movie',
'movistar',
'msd',
'mtn',
'mtpc',
'mtr',
'music',
'mutual',
'mutuelle',
'nab',
'nadex',
'nagoya',
'nationwide',
'natura',
'navy',
'nba',
'nec',
'net',
'netbank',
'netflix',
'network',
'neustar',
'new',
'newholland',
'news',
'next',
'nextdirect',
'nexus',
'nfl',
'ngo',
'nhk',
'nico',
'nike',
'nikon',
'ninja',
'nissan',
'nissay',
'nokia',
'northwesternmutual',
'norton',
'now',
'nowruz',
'nowtv',
'nra',
'nrw',
'ntt',
'nyc',
'obi',
'observer',
'off',
'office',
'okinawa',
'olayan',
'olayangroup',
'oldnavy',
'ollo',
'omega',
'one',
'ong',
'onl',
'online',
'onyourside',
'ooo',
'open',
'oracle',
'orange',
'org',
'organic',
'orientexpress',
'origins',
'osaka',
'otsuka',
'ott',
'ovh',
'page',
'pamperedchef',
'panasonic',
'panerai',
'paris',
'pars',
'partners',
'parts',
'party',
'passagens',
'pay',
'pccw',
'pet',
'pfizer',
'pharmacy',
'phd',
'philips',
'phone',
'photo',
'photography',
'photos',
'physio',
'piaget',
'pics',
'pictet',
'pictures',
'pid',
'pin',
'ping',
'pink',
'pioneer',
'pizza',
'place',
'play',
'playstation',
'plumbing',
'plus',
'pnc',
'pohl',
'poker',
'politie',
'porn',
'pramerica',
'praxi',
'press',
'prime',
'prod',
'productions',
'prof',
'progressive',
'promo',
'properties',
'property',
'protection',
'pru',
'prudential',
'pub',
'pwc',
'qpon',
'quebec',
'quest',
'qvc',
'racing',
'radio',
'raid',
'read',
'realestate',
'realtor',
'realty',
'recipes',
'red',
'redstone',
'redumbrella',
'rehab',
'reise',
'reisen',
'reit',
'reliance',
'ren',
'rent',
'rentals',
'repair',
'report',
'republican',
'rest',
'restaurant',
'review',
'reviews',
'rexroth',
'rich',
'richardli',
'ricoh',
'rightathome',
'ril',
'rio',
'rip',
'rmit',
'rocher',
'rocks',
'rodeo',
'rogers',
'room',
'rsvp',
'rugby',
'ruhr',
'run',
'rwe',
'ryukyu',
'saarland',
'safe',
'safety',
'sakura',
'sale',
'salon',
'samsclub',
'samsung',
'sandvik',
'sandvikcoromant',
'sanofi',
'sap',
'sapo',
'sarl',
'sas',
'save',
'saxo',
'sbi',
'sbs',
'sca',
'scb',
'schaeffler',
'schmidt',
'scholarships',
'school',
'schule',
'schwarz',
'science',
'scjohnson',
'scor',
'scot',
'search',
'seat',
'secure',
'security',
'seek',
'select',
'sener',
'services',
'ses',
'seven',
'sew',
'sex',
'sexy',
'sfr',
'shangrila',
'sharp',
'shaw',
'shell',
'shia',
'shiksha',
'shoes',
'shop',
'shopping',
'shouji',
'show',
'showtime',
'shriram',
'silk',
'sina',
'singles',
'site',
'ski',
'skin',
'sky',
'skype',
'sling',
'smart',
'smile',
'sncf',
'soccer',
'social',
'softbank',
'software',
'sohu',
'solar',
'solutions',
'song',
'sony',
'soy',
'spa',
'space',
'spiegel',
'sport',
'spot',
'spreadbetting',
'srl',
'srt',
'stada',
'staples',
'star',
'starhub',
'statebank',
'statefarm',
'statoil',
'stc',
'stcgroup',
'stockholm',
'storage',
'store',
'stream',
'studio',
'study',
'style',
'sucks',
'supplies',
'supply',
'support',
'surf',
'surgery',
'suzuki',
'swatch',
'swiftcover',
'swiss',
'sydney',
'symantec',
'systems',
'tab',
'taipei',
'talk',
'taobao',
'target',
'tatamotors',
'tatar',
'tattoo',
'tax',
'taxi',
'tci',
'tdk',
'team',
'tech',
'technology',
'telecity',
'telefonica',
'temasek',
'tennis',
'teva',
'thd',
'theater',
'theatre',
'tiaa',
'tickets',
'tienda',
'tiffany',
'tips',
'tires',
'tirol',
'tjmaxx',
'tjx',
'tkmaxx',
'tmall',
'today',
'tokyo',
'tools',
'top',
'toray',
'toshiba',
'total',
'tours',
'town',
'toyota',
'toys',
'trade',
'trading',
'training',
'travelchannel',
'travelers',
'travelersinsurance',
'trust',
'trv',
'tube',
'tui',
'tunes',
'tushu',
'tvs',
'ubank',
'ubs',
'uconnect',
'unicom',
'university',
'uno',
'uol',
'ups',
'vacations',
'vana',
'vanguard',
'vegas',
'ventures',
'verisign',
'versicherung',
'vet',
'viajes',
'video',
'vig',
'viking',
'villas',
'vin',
'vip',
'virgin',
'visa',
'vision',
'vista',
'vistaprint',
'viva',
'vivo',
'vlaanderen',
'vodka',
'volkswagen',
'volvo',
'vote',
'voting',
'voto',
'voyage',
'vuelos',
'wales',
'walmart',
'walter',
'wang',
'wanggou',
'warman',
'watch',
'watches',
'weather',
'weatherchannel',
'webcam',
'weber',
'website',
'wed',
'wedding',
'weibo',
'weir',
'whoswho',
'wien',
'wiki',
'williamhill',
'win',
'windows',
'wine',
'winners',
'wme',
'wolterskluwer',
'woodside',
'work',
'works',
'world',
'wow',
'wtc',
'wtf',
'xbox',
'xerox',
'xfinity',
'xihuan',
'xin',
'xperia',
'xyz',
'yachts',
'yahoo',
'yamaxun',
'yandex',
'yodobashi',
'yoga',
'yokohama',
'you',
'youtube',
'yun',
'zappos',
'zara',
'zero',
'zip',
'zippo',
'zone',
'zuerich',
]
return gtld_extensions
@staticmethod
def country_codes():
country_extensions = [
'ac',
'ad',
'ae',
'af',
'ag',
'ai',
'al',
'am',
'an',
'ao',
'aq',
'ar',
'as',
'at',
'au',
'aw',
'ax',
'az',
'ba',
'bb',
'bd',
'be',
'bf',
'bg',
'bh',
'bi',
'bj',
'bl',
'bm',
'bn',
'bo',
'bq',
'br',
'bs',
'bt',
'bv',
'bw',
'by',
'bz',
'ca',
'cc',
'cd',
'cf',
'cg',
'ch',
'ci',
'ck',
'cl',
'cm',
'cn',
'co',
'cr',
'cu',
'cv',
'cw',
'cx',
'cy',
'cz',
'de',
'dj',
'dk',
'dm',
'do',
'dz',
'ec',
'ee',
'eg',
'eh',
'er',
'es',
'et',
'eu',
'fi',
'fj',
'fk',
'fm',
'fo',
'fr',
'ga',
'gb',
'gd',
'ge',
'gf',
'gg',
'gh',
'gi',
'gl',
'gm',
'gn',
'gp',
'gq',
'gr',
'gs',
'gt',
'gu',
'gw',
'gy',
'hk',
'hm',
'hn',
'hr',
'ht',
'hu',
'id',
'ie',
'il',
'im',
'in',
'io',
'iq',
'ir',
'is',
'it',
'je',
'jm',
'jo',
'jp',
'ke',
'kg',
'kh',
'ki',
'km',
'kn',
'kp',
'kr',
'kw',
'ky',
'kz',
'la',
'lb',
'lc',
'li',
'lk',
'lr',
'ls',
'lt',
'lu',
'lv',
'ly',
'ma',
'mc',
'md',
'me',
'mf',
'mg',
'mh',
'mk',
'ml',
'mm',
'mn',
'mo',
'mp',
'mq',
'mr',
'ms',
'mt',
'mu',
'mv',
'mw',
'mx',
'my',
'mz',
'na',
'nc',
'ne',
'nf',
'ng',
'ni',
'nl',
'no',
'np',
'nr',
'nu',
'nz',
'om',
'pa',
'pe',
'pf',
'pg',
'ph',
'pk',
'pl',
'pm',
'pn',
'pr',
'ps',
'pt',
'pw',
'py',
'qa',
're',
'ro',
'rs',
'ru',
'rw',
'sa',
'sb',
'sc',
'sd',
'se',
'sg',
'sh',
'si',
'sj',
'sk',
'sl',
'sm',
'sn',
'so',
'sr',
'ss',
'st',
'su',
'sv',
'sx',
'sy',
'sz',
'tc',
'td',
'tf',
'tg',
'th',
'tj',
'tk',
'tl',
'tm',
'tn',
'to',
'tp',
'tr',
'tt',
'tv',
'tw',
'tz',
'ua',
'ug',
'uk',
'um' 'us',
'uy',
'uz',
'va',
'vc',
've',
'vg',
'vi',
'vn',
'vu',
'wf',
'ws',
'ye',
'yt',
'za',
'zm',
'zw',
]
return country_extensions
@staticmethod
def sponsored_tlds():
sponsored_extensions = [
'aaa',
'aarp',
'abb',
'abbott',
'abbvie',
'abc',
'able',
'abogado',
'abudhabi',
'academy',
'accenture',
'accountant',
'accountants',
'aco',
'actor',
'ads',
'adult',
'aeg',
'aetna',
'afl',
'africa',
'agakhan',
'agency',
'aig',
'airbus',
'airforce',
'airtel',
'akdn',
'alibaba',
'alipay',
'allfinanz',
'allstate',
'ally',
'alsace',
'alstom',
'amazon',
'americanexpress',
'americanfamily',
'amex',
'amfam',
'amica',
'amsterdam',
'analytics',
'android',
'anquan',
'anz',
'aol',
'apartments',
'app',
'apple',
'aquarelle',
'arab',
'aramco',
'archi',
'army',
'art',
'arte',
'asda',
'associates',
'athleta',
'attorney',
'auction',
'audi',
'audible',
'audio',
'auspost',
'author',
'auto',
'autos',
'aws',
'axa',
'azure',
'baby',
'baidu',
'banamex',
'band',
'bank',
'bar',
'barcelona',
'barclaycard',
'barclays',
'barefoot',
'bargains',
'baseball',
'basketball',
'bauhaus',
'bayern',
'bbc',
'bbt',
'bbva',
'bcg',
'bcn',
'beats',
'beauty',
'beer',
'bentley',
'berlin',
'best',
'bestbuy',
'bet',
'bharti',
'bible',
'bid',
'bike',
'bing',
'bingo',
'bio',
'black',
'blackfriday',
'blockbuster',
'blog',
'bloomberg',
'blue',
'bms',
'bmw',
'bnpparibas',
'boats',
'boehringer',
'bofa',
'bom',
'bond',
'boo',
'book',
'booking',
'bosch',
'bostik',
'boston',
'bot',
'boutique',
'box',
'bradesco',
'bridgestone',
'broadway',
'broker',
'brother',
'brussels',
'build',
'builders',
'business',
'buy',
'buzz',
'bzh',
'cab',
'cafe',
'cal',
'call',
'calvinklein',
'cam',
'camera',
'camp',
'canon',
'capetown',
'capital',
'capitalone',
'car',
'caravan',
'cards',
'care',
'career',
'careers',
'cars',
'casa',
'case',
'cash',
'casino',
'catering',
'catholic',
'cba',
'cbn',
'cbre',
'center',
'ceo',
'cern',
'cfa',
'cfd',
'chanel',
'channel',
'charity',
'chase',
'chat',
'cheap',
'chintai',
'christmas',
'chrome',
'church',
'cipriani',
'circle',
'cisco',
'citadel',
'citi',
'citic',
'city',
'claims',
'cleaning',
'click',
'clinic',
'clinique',
'clothing',
'cloud',
'club',
'clubmed',
'coach',
'codes',
'coffee',
'college',
'cologne',
'com',
'commbank',
'community',
'company',
'compare',
'computer',
'comsec',
'condos',
'construction',
'consulting',
'contact',
'contractors',
'cooking',
'cool',
'corsica',
'country',
'coupon',
'coupons',
'courses',
'cpa',
'credit',
'creditcard',
'creditunion',
'cricket',
'crown',
'crs',
'cruise',
'cruises',
'cuisinella',
'cymru',
'cyou',
'dad',
'dance',
'data',
'date',
'dating',
'datsun',
'day',
'dclk',
'dds',
'deal',
'dealer',
'deals',
'degree',
'delivery',
'dell',
'deloitte',
'delta',
'democrat',
'dental',
'dentist',
'desi',
'design',
'dev',
'dhl',
'diamonds',
'diet',
'digital',
'direct',
'directory',
'discount',
'discover',
'dish',
'diy',
'dnp',
'docs',
'doctor',
'dog',
'domains',
'dot',
'download',
'drive',
'dtv',
'dubai',
'dunlop',
'dupont',
'durban',
'dvag',
'dvr',
'earth',
'eat',
'eco',
'edeka',
'education',
'email',
'emerck',
'energy',
'engineer',
'engineering',
'enterprises',
'epson',
'equipment',
'ericsson',
'erni',
'esq',
'estate',
'eurovision',
'eus',
'events',
'exchange',
'expert',
'exposed',
'express',
'extraspace',
'fage',
'fail',
'fairwinds',
'faith',
'family',
'fan',
'fans',
'farm',
'farmers',
'fashion',
'fast',
'fedex',
'feedback',
'ferrari',
'ferrero',
'fidelity',
'fido',
'film',
'final',
'finance',
'financial',
'fire',
'firestone',
'firmdale',
'fish',
'fishing',
'fit',
'fitness',
'flickr',
'flights',
'flir',
'florist',
'flowers',
'fly',
'foo',
'food',
'football',
'ford',
'forex',
'forsale',
'forum',
'foundation',
'fox',
'free',
'fresenius',
'frl',
'frogans',
'frontier',
'ftr',
'fujitsu',
'fun',
'fund',
'furniture',
'futbol',
'fyi',
'gal',
'gallery',
'gallo',
'gallup',
'game',
'games',
'gap',
'garden',
'gay',
'gbiz',
'gdn',
'gea',
'gent',
'genting',
'george',
'ggee',
'gift',
'gifts',
'gives',
'giving',
'glass',
'gle',
'global',
'globo',
'gmail',
'gmbh',
'gmo',
'gmx',
'godaddy',
'gold',
'goldpoint',
'golf',
'goo',
'goodyear',
'goog',
'google',
'gop',
'got',
'grainger',
'graphics',
'gratis',
'green',
'gripe',
'grocery',
'group',
'gucci',
'guge',
'guide',
'guitars',
'guru',
'hair',
'hamburg',
'hangout',
'haus',
'hbo',
'hdfc',
'hdfcbank',
'health',
'healthcare',
'help',
'helsinki',
'here',
'hermes',
'hiphop',
'hisamitsu',
'hitachi',
'hiv',
'hkt',
'hockey',
'holdings',
'holiday',
'homedepot',
'homegoods',
'homes',
'homesense',
'honda',
'horse',
'hospital',
'host',
'hosting',
'hot',
'hotels',
'hotmail',
'house',
'how',
'hsbc',
'hughes',
'hyatt',
'hyundai',
'ibm',
'icbc',
'ice',
'icu',
'ieee',
'ifm',
'ikano',
'imamat',
'imdb',
'immo',
'immobilien',
'inc',
'industries',
'infiniti',
'info',
'ing',
'ink',
'institute',
'insurance',
'insure',
'international',
'intuit',
'investments',
'ipiranga',
'irish',
'ismaili',
'ist',
'istanbul',
'itau',
'itv',
'jaguar',
'java',
'jcb',
'jeep',
'jetzt',
'jewelry',
'jio',
'jll',
'jmp',
'jnj',
'joburg',
'jot',
'joy',
'jpmorgan',
'jprs',
'juegos',
'juniper',
'kaufen',
'kddi',
'kerryhotels',
'kerrylogistics',
'kerryproperties',
'kfh',
'kia',
'kids',
'kim',
'kindle',
'kitchen',
'kiwi',
'koeln',
'komatsu',
'kosher',
'kpmg',
'kpn',
'krd',
'kred',
'kuokgroup',
'kyoto',
'lacaixa',
'lamborghini',
'lamer',
'lancaster',
'land',
'landrover',
'lanxess',
'lasalle',
'lat',
'latino',
'latrobe',
'law',
'lawyer',
'lds',
'lease',
'leclerc',
'lefrak',
'legal',
'lego',
'lexus',
'lgbt',
'lidl',
'life',
'lifeinsurance',
'lifestyle',
'lighting',
'like',
'lilly',
'limited',
'limo',
'lincoln',
'link',
'lipsy',
'live',
'living',
'llc',
'llp',
'loan',
'loans',
'locker',
'locus',
'lol',
'london',
'lotte',
'lotto',
'love',
'lpl',
'lplfinancial',
'ltd',
'ltda',
'lundbeck',
'luxe',
'luxury',
'madrid',
'maif',
'maison',
'makeup',
'man',
'management',
'mango',
'map',
'market',
'marketing',
'markets',
'marriott',
'marshalls',
'mattel',
'mba',
'mckinsey',
'med',
'media',
'meet',
'melbourne',
'meme',
'memorial',
'men',
'menu',
'merckmsd',
'miami',
'microsoft',
'mini',
'mint',
'mit',
'mitsubishi',
'mlb',
'mls',
'mma',
'mobi',
'mobile',
'moda',
'moe',
'moi',
'mom',
'monash',
'money',
'monster',
'mormon',
'mortgage',
'moscow',
'moto',
'motorcycles',
'mov',
'movie',
'msd',
'mtn',
'mtr',
'music',
'nab',
'nagoya',
'navy',
'nba',
'nec',
'net',
'netbank',
'netflix',
'network',
'neustar',
'new',
'news',
'next',
'nextdirect',
'nexus',
'nfl',
'ngo',
'nhk',
'nico',
'nike',
'nikon',
'ninja',
'nissan',
'nissay',
'nokia',
'norton',
'now',
'nowruz',
'nowtv',
'nra',
'nrw',
'ntt',
'nyc',
'obi',
'observer',
'office',
'okinawa',
'olayan',
'olayangroup',
'ollo',
'omega',
'one',
'ong',
'onl',
'online',
'ooo',
'open',
'oracle',
'orange',
'org',
'organic',
'origins',
'osaka',
'otsuka',
'ott',
'ovh',
'page',
'panasonic',
'paris',
'pars',
'partners',
'parts',
'party',
'pay',
'pccw',
'pet',
'pfizer',
'pharmacy',
'phd',
'philips',
'phone',
'photo',
'photography',
'photos',
'physio',
'pics',
'pictet',
'pictures',
'pid',
'pin',
'ping',
'pink',
'pioneer',
'pizza',
'place',
'play',
'playstation',
'plumbing',
'plus',
'pnc',
'pohl',
'poker',
'politie',
'porn',
'pramerica',
'praxi',
'press',
'prime',
'prod',
'productions',
'prof',
'progressive',
'promo',
'properties',
'property',
'protection',
'pru',
'prudential',
'pub',
'pwc',
'qpon',
'quebec',
'quest',
'racing',
'radio',
'read',
'realestate',
'realtor',
'realty',
'recipes',
'red',
'redstone',
'redumbrella',
'rehab',
'reise',
'reisen',
'reit',
'reliance',
'ren',
'rent',
'rentals',
'repair',
'report',
'republican',
'rest',
'restaurant',
'review',
'reviews',
'rexroth',
'rich',
'richardli',
'ricoh',
'ril',
'rio',
'rip',
'rocks',
'rodeo',
'rogers',
'room',
'rsvp',
'rugby',
'ruhr',
'run',
'rwe',
'ryukyu',
'saarland',
'safe',
'safety',
'sakura',
'sale',
'salon',
'samsclub',
'samsung',
'sandvik',
'sandvikcoromant',
'sanofi',
'sap',
'sarl',
'sas',
'save',
'saxo',
'sbi',
'sbs',
'scb',
'schaeffler',
'schmidt',
'scholarships',
'school',
'schule',
'schwarz',
'science',
'scot',
'search',
'seat',
'secure',
'security',
'seek',
'select',
'sener',
'services',
'seven',
'sew',
'sex',
'sexy',
'sfr',
'shangrila',
'sharp',
'shell',
'shia',
'shiksha',
'shoes',
'shop',
'shopping',
'shouji',
'show',
'silk',
'sina',
'singles',
'site',
'ski',
'skin',
'sky',
'skype',
'sling',
'smart',
'smile',
'sncf',
'soccer',
'social',
'softbank',
'software',
'sohu',
'solar',
'solutions',
'song',
'sony',
'soy',
'spa',
'space',
'sport',
'spot',
'srl',
'stada',
'staples',
'star',
'statebank',
'statefarm',
'stc',
'stcgroup',
'stockholm',
'storage',
'store',
'stream',
'studio',
'study',
'style',
'sucks',
'supplies',
'supply',
'support',
'surf',
'surgery',
'suzuki',
'swatch',
'swiss',
'sydney',
'systems',
'tab',
'taipei',
'talk',
'taobao',
'target',
'tatamotors',
'tatar',
'tattoo',
'tax',
'taxi',
'tci',
'tdk',
'team',
'tech',
'technology',
'temasek',
'tennis',
'teva',
'thd',
'theater',
'theatre',
'tiaa',
'tickets',
'tienda',
'tips',
'tires',
'tirol',
'tjmaxx',
'tjx',
'tkmaxx',
'tmall',
'today',
'tokyo',
'tools',
'top',
'toray',
'toshiba',
'total',
'tours',
'town',
'toyota',
'toys',
'trade',
'trading',
'training',
'travelers',
'travelersinsurance',
'trust',
'trv',
'tube',
'tui',
'tunes',
'tushu',
'tvs',
'ubank',
'ubs',
'unicom',
'university',
'uno',
'uol',
'ups',
'vacations',
'vana',
'vanguard',
'vegas',
'ventures',
'verisign',
'versicherung',
'vet',
'viajes',
'video',
'vig',
'viking',
'villas',
'vin',
'vip',
'virgin',
'visa',
'vision',
'viva',
'vivo',
'vlaanderen',
'vodka',
'volvo',
'vote',
'voting',
'voto',
'voyage',
'wales',
'walmart',
'walter',
'wang',
'wanggou',
'watch',
'watches',
'weather',
'weatherchannel',
'webcam',
'weber',
'website',
'wed',
'wedding',
'weibo',
'weir',
'whoswho',
'wien',
'wiki',
'williamhill',
'win',
'windows',
'wine',
'winners',
'wme',
'wolterskluwer',
'woodside',
'work',
'works',
'world',
'wow',
'wtc',
'wtf',
'xbox',
'xerox',
'xihuan',
'xin',
'xyz',
'yachts',
'yahoo',
'yamaxun',
'yandex',
'yodobashi',
'yoga',
'yokohama',
'you',
'youtube',
'yun',
'zappos',
'zara',
'zero',
'zip',
'zone',
'zuerich',
]
return sponsored_extensions
| 52,077
|
Python
|
.py
| 2,447
| 8.320392
| 35
| 0.262352
|
darkoperator/dnsrecon
| 2,582
| 529
| 13
|
GPL-2.0
|
9/5/2024, 5:10:01 PM (Europe/Amsterdam)
|
6,639
|
websockify.py
|
novnc_websockify/websockify.py
|
#!/usr/bin/env sh
set -e
cd "$(dirname "$0")"
exec python3 -m websockify "$@"
| 78
|
Python
|
.py
| 4
| 18.5
| 31
| 0.635135
|
novnc/websockify
| 3,872
| 766
| 31
|
LGPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,640
|
setup.py
|
novnc_websockify/setup.py
|
from setuptools import setup, find_packages
version = '0.12.0'
name = 'websockify'
long_description = open("README.md").read() + "\n" + \
open("CHANGES.txt").read() + "\n"
setup(name=name,
version=version,
description="Websockify.",
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
],
keywords='noVNC websockify',
license='LGPLv3',
url="https://github.com/novnc/websockify",
author="Joel Martin",
author_email="github@martintribe.org",
packages=['websockify'],
include_package_data=True,
install_requires=[
'numpy', 'requests',
'jwcrypto',
'redis',
],
zip_safe=False,
entry_points={
'console_scripts': [
'websockify = websockify.websocketproxy:websockify_init',
]
},
)
| 1,401
|
Python
|
.py
| 41
| 26.463415
| 69
| 0.585114
|
novnc/websockify
| 3,872
| 766
| 31
|
LGPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,641
|
test_websocket.py
|
novnc_websockify/tests/test_websocket.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright(c)2013 NTT corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Unit tests for websocket """
import unittest
from websockify import websocket
class FakeSocket:
def __init__(self):
self.data = b''
def send(self, buf):
self.data += buf
return len(buf)
class AcceptTestCase(unittest.TestCase):
def test_success(self):
ws = websocket.WebSocket()
sock = FakeSocket()
ws.accept(sock, {'upgrade': 'websocket',
'Sec-WebSocket-Version': '13',
'Sec-WebSocket-Key': 'DKURYVK9cRFul1vOZVA56Q=='})
self.assertEqual(sock.data[:13], b'HTTP/1.1 101 ')
self.assertTrue(b'\r\nUpgrade: websocket\r\n' in sock.data)
self.assertTrue(b'\r\nConnection: Upgrade\r\n' in sock.data)
self.assertTrue(b'\r\nSec-WebSocket-Accept: pczpYSQsvE1vBpTQYjFQPcuoj6M=\r\n' in sock.data)
def test_bad_version(self):
ws = websocket.WebSocket()
sock = FakeSocket()
self.assertRaises(Exception, ws.accept,
sock, {'upgrade': 'websocket',
'Sec-WebSocket-Key': 'DKURYVK9cRFul1vOZVA56Q=='})
self.assertRaises(Exception, ws.accept,
sock, {'upgrade': 'websocket',
'Sec-WebSocket-Version': '5',
'Sec-WebSocket-Key': 'DKURYVK9cRFul1vOZVA56Q=='})
self.assertRaises(Exception, ws.accept,
sock, {'upgrade': 'websocket',
'Sec-WebSocket-Version': '20',
'Sec-WebSocket-Key': 'DKURYVK9cRFul1vOZVA56Q=='})
def test_bad_upgrade(self):
ws = websocket.WebSocket()
sock = FakeSocket()
self.assertRaises(Exception, ws.accept,
sock, {'Sec-WebSocket-Version': '13',
'Sec-WebSocket-Key': 'DKURYVK9cRFul1vOZVA56Q=='})
self.assertRaises(Exception, ws.accept,
sock, {'upgrade': 'websocket2',
'Sec-WebSocket-Version': '13',
'Sec-WebSocket-Key': 'DKURYVK9cRFul1vOZVA56Q=='})
def test_missing_key(self):
ws = websocket.WebSocket()
sock = FakeSocket()
self.assertRaises(Exception, ws.accept,
sock, {'upgrade': 'websocket',
'Sec-WebSocket-Version': '13'})
def test_protocol(self):
class ProtoSocket(websocket.WebSocket):
def select_subprotocol(self, protocol):
return 'gazonk'
ws = ProtoSocket()
sock = FakeSocket()
ws.accept(sock, {'upgrade': 'websocket',
'Sec-WebSocket-Version': '13',
'Sec-WebSocket-Key': 'DKURYVK9cRFul1vOZVA56Q==',
'Sec-WebSocket-Protocol': 'foobar gazonk'})
self.assertEqual(sock.data[:13], b'HTTP/1.1 101 ')
self.assertTrue(b'\r\nSec-WebSocket-Protocol: gazonk\r\n' in sock.data)
def test_no_protocol(self):
ws = websocket.WebSocket()
sock = FakeSocket()
ws.accept(sock, {'upgrade': 'websocket',
'Sec-WebSocket-Version': '13',
'Sec-WebSocket-Key': 'DKURYVK9cRFul1vOZVA56Q=='})
self.assertEqual(sock.data[:13], b'HTTP/1.1 101 ')
self.assertFalse(b'\r\nSec-WebSocket-Protocol:' in sock.data)
def test_missing_protocol(self):
ws = websocket.WebSocket()
sock = FakeSocket()
self.assertRaises(Exception, ws.accept,
sock, {'upgrade': 'websocket',
'Sec-WebSocket-Version': '13',
'Sec-WebSocket-Key': 'DKURYVK9cRFul1vOZVA56Q==',
'Sec-WebSocket-Protocol': 'foobar gazonk'})
def test_protocol(self):
class ProtoSocket(websocket.WebSocket):
def select_subprotocol(self, protocol):
return 'oddball'
ws = ProtoSocket()
sock = FakeSocket()
self.assertRaises(Exception, ws.accept,
sock, {'upgrade': 'websocket',
'Sec-WebSocket-Version': '13',
'Sec-WebSocket-Key': 'DKURYVK9cRFul1vOZVA56Q==',
'Sec-WebSocket-Protocol': 'foobar gazonk'})
class PingPongTest(unittest.TestCase):
def setUp(self):
self.ws = websocket.WebSocket()
self.sock = FakeSocket()
self.ws.accept(self.sock, {'upgrade': 'websocket',
'Sec-WebSocket-Version': '13',
'Sec-WebSocket-Key': 'DKURYVK9cRFul1vOZVA56Q=='})
self.assertEqual(self.sock.data[:13], b'HTTP/1.1 101 ')
self.sock.data = b''
def test_ping(self):
self.ws.ping()
self.assertEqual(self.sock.data, b'\x89\x00')
def test_pong(self):
self.ws.pong()
self.assertEqual(self.sock.data, b'\x8a\x00')
def test_ping_data(self):
self.ws.ping(b'foo')
self.assertEqual(self.sock.data, b'\x89\x03foo')
def test_pong_data(self):
self.ws.pong(b'foo')
self.assertEqual(self.sock.data, b'\x8a\x03foo')
class HyBiEncodeDecodeTestCase(unittest.TestCase):
def test_decode_hybi_text(self):
buf = b'\x81\x85\x37\xfa\x21\x3d\x7f\x9f\x4d\x51\x58'
ws = websocket.WebSocket()
res = ws._decode_hybi(buf)
self.assertEqual(res['fin'], 1)
self.assertEqual(res['opcode'], 0x1)
self.assertEqual(res['masked'], True)
self.assertEqual(res['length'], len(buf))
self.assertEqual(res['payload'], b'Hello')
def test_decode_hybi_binary(self):
buf = b'\x82\x04\x01\x02\x03\x04'
ws = websocket.WebSocket()
res = ws._decode_hybi(buf)
self.assertEqual(res['fin'], 1)
self.assertEqual(res['opcode'], 0x2)
self.assertEqual(res['length'], len(buf))
self.assertEqual(res['payload'], b'\x01\x02\x03\x04')
def test_decode_hybi_extended_16bit_binary(self):
data = (b'\x01\x02\x03\x04' * 65) # len > 126 -- len == 260
buf = b'\x82\x7e\x01\x04' + data
ws = websocket.WebSocket()
res = ws._decode_hybi(buf)
self.assertEqual(res['fin'], 1)
self.assertEqual(res['opcode'], 0x2)
self.assertEqual(res['length'], len(buf))
self.assertEqual(res['payload'], data)
def test_decode_hybi_extended_64bit_binary(self):
data = (b'\x01\x02\x03\x04' * 65) # len > 126 -- len == 260
buf = b'\x82\x7f\x00\x00\x00\x00\x00\x00\x01\x04' + data
ws = websocket.WebSocket()
res = ws._decode_hybi(buf)
self.assertEqual(res['fin'], 1)
self.assertEqual(res['opcode'], 0x2)
self.assertEqual(res['length'], len(buf))
self.assertEqual(res['payload'], data)
def test_decode_hybi_multi(self):
buf1 = b'\x01\x03\x48\x65\x6c'
buf2 = b'\x80\x02\x6c\x6f'
ws = websocket.WebSocket()
res1 = ws._decode_hybi(buf1)
self.assertEqual(res1['fin'], 0)
self.assertEqual(res1['opcode'], 0x1)
self.assertEqual(res1['length'], len(buf1))
self.assertEqual(res1['payload'], b'Hel')
res2 = ws._decode_hybi(buf2)
self.assertEqual(res2['fin'], 1)
self.assertEqual(res2['opcode'], 0x0)
self.assertEqual(res2['length'], len(buf2))
self.assertEqual(res2['payload'], b'lo')
def test_encode_hybi_basic(self):
ws = websocket.WebSocket()
res = ws._encode_hybi(0x1, b'Hello')
expected = b'\x81\x05\x48\x65\x6c\x6c\x6f'
self.assertEqual(res, expected)
| 8,440
|
Python
|
.py
| 179
| 35.374302
| 99
| 0.574259
|
novnc/websockify
| 3,872
| 766
| 31
|
LGPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,642
|
echo_client.py
|
novnc_websockify/tests/echo_client.py
|
#!/usr/bin/env python
import os
import sys
import optparse
import select
sys.path.insert(0,os.path.join(os.path.dirname(__file__), ".."))
from websockify.websocket import WebSocket, \
WebSocketWantReadError, WebSocketWantWriteError
parser = optparse.OptionParser(usage="%prog URL")
(opts, args) = parser.parse_args()
if len(args) == 1:
URL = args[0]
else:
parser.error("Invalid arguments")
sock = WebSocket()
print("Connecting to %s..." % URL)
sock.connect(URL)
print("Connected.")
def send(msg):
while True:
try:
sock.sendmsg(msg)
break
except WebSocketWantReadError:
msg = ''
ins, outs, excepts = select.select([sock], [], [])
if excepts: raise Exception("Socket exception")
except WebSocketWantWriteError:
msg = ''
ins, outs, excepts = select.select([], [sock], [])
if excepts: raise Exception("Socket exception")
def read():
while True:
try:
return sock.recvmsg()
except WebSocketWantReadError:
ins, outs, excepts = select.select([sock], [], [])
if excepts: raise Exception("Socket exception")
except WebSocketWantWriteError:
ins, outs, excepts = select.select([], [sock], [])
if excepts: raise Exception("Socket exception")
counter = 1
while True:
msg = "Message #%d" % counter
counter += 1
send(msg)
print("Sent message: %r" % msg)
while True:
ins, outs, excepts = select.select([sock], [], [], 1.0)
if excepts: raise Exception("Socket exception")
if ins == []:
break
while True:
msg = read()
print("Received message: %r" % msg)
if not sock.pending():
break
| 1,816
|
Python
|
.py
| 57
| 24.684211
| 64
| 0.597023
|
novnc/websockify
| 3,872
| 766
| 31
|
LGPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,643
|
test_websocketserver.py
|
novnc_websockify/tests/test_websocketserver.py
|
""" Unit tests for websocketserver """
import unittest
from unittest.mock import patch, MagicMock
from websockify.websocketserver import HttpWebSocket
class HttpWebSocketTest(unittest.TestCase):
@patch("websockify.websocketserver.WebSocket.__init__", autospec=True)
def test_constructor(self, websock):
# Given
req_obj = MagicMock()
# When
sock = HttpWebSocket(req_obj)
# Then
websock.assert_called_once_with(sock)
self.assertEqual(sock.request_handler, req_obj)
@patch("websockify.websocketserver.WebSocket.__init__", MagicMock(autospec=True))
def test_send_response(self):
# Given
req_obj = MagicMock()
sock = HttpWebSocket(req_obj)
# When
sock.send_response(200, "message")
# Then
req_obj.send_response.assert_called_once_with(200, "message")
@patch("websockify.websocketserver.WebSocket.__init__", MagicMock(autospec=True))
def test_send_response_default_message(self):
# Given
req_obj = MagicMock()
sock = HttpWebSocket(req_obj)
# When
sock.send_response(200)
# Then
req_obj.send_response.assert_called_once_with(200, None)
@patch("websockify.websocketserver.WebSocket.__init__", MagicMock(autospec=True))
def test_send_header(self):
# Given
req_obj = MagicMock()
sock = HttpWebSocket(req_obj)
# When
sock.send_header("keyword", "value")
# Then
req_obj.send_header.assert_called_once_with("keyword", "value")
@patch("websockify.websocketserver.WebSocket.__init__", MagicMock(autospec=True))
def test_end_headers(self):
# Given
req_obj = MagicMock()
sock = HttpWebSocket(req_obj)
# When
sock.end_headers()
# Then
req_obj.end_headers.assert_called_once_with()
| 1,902
|
Python
|
.py
| 50
| 30.26
| 85
| 0.6503
|
novnc/websockify
| 3,872
| 766
| 31
|
LGPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,644
|
test_websocketproxy.py
|
novnc_websockify/tests/test_websocketproxy.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright(c) 2015 Red Hat, Inc All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Unit tests for websocketproxy """
import sys
import unittest
import unittest
import socket
from io import StringIO
from io import BytesIO
from unittest.mock import patch, MagicMock
from websockify import websocketproxy
from websockify import token_plugins
from websockify import auth_plugins
class FakeSocket:
def __init__(self, data=b''):
self._data = data
def recv(self, amt, flags=None):
res = self._data[0:amt]
if not (flags & socket.MSG_PEEK):
self._data = self._data[amt:]
return res
def makefile(self, mode='r', buffsize=None):
if 'b' in mode:
return BytesIO(self._data)
else:
return StringIO(self._data.decode('latin_1'))
class FakeServer:
class EClose(Exception):
pass
def __init__(self):
self.token_plugin = None
self.auth_plugin = None
self.wrap_cmd = None
self.ssl_target = None
self.unix_target = None
class ProxyRequestHandlerTestCase(unittest.TestCase):
def setUp(self):
super().setUp()
self.handler = websocketproxy.ProxyRequestHandler(
FakeSocket(), "127.0.0.1", FakeServer())
self.handler.path = "https://localhost:6080/websockify?token=blah"
self.handler.headers = None
patch('websockify.websockifyserver.WebSockifyServer.socket').start()
def tearDown(self):
patch.stopall()
super().tearDown()
def test_get_target(self):
class TestPlugin(token_plugins.BasePlugin):
def lookup(self, token):
return ("some host", "some port")
host, port = self.handler.get_target(
TestPlugin(None))
self.assertEqual(host, "some host")
self.assertEqual(port, "some port")
def test_get_target_unix_socket(self):
class TestPlugin(token_plugins.BasePlugin):
def lookup(self, token):
return ("unix_socket", "/tmp/socket")
_, socket = self.handler.get_target(
TestPlugin(None))
self.assertEqual(socket, "/tmp/socket")
def test_get_target_raises_error_on_unknown_token(self):
class TestPlugin(token_plugins.BasePlugin):
def lookup(self, token):
return None
with self.assertRaises(FakeServer.EClose):
self.handler.get_target(TestPlugin(None))
@patch('websockify.websocketproxy.ProxyRequestHandler.send_auth_error', MagicMock())
def test_token_plugin(self):
class TestPlugin(token_plugins.BasePlugin):
def lookup(self, token):
return (self.source + token).split(',')
self.handler.server.token_plugin = TestPlugin("somehost,")
self.handler.validate_connection()
self.assertEqual(self.handler.server.target_host, "somehost")
self.assertEqual(self.handler.server.target_port, "blah")
@patch('websockify.websocketproxy.ProxyRequestHandler.send_auth_error', MagicMock())
def test_auth_plugin(self):
class TestPlugin(auth_plugins.BasePlugin):
def authenticate(self, headers, target_host, target_port):
if target_host == self.source:
raise auth_plugins.AuthenticationError(response_msg="some_error")
self.handler.server.auth_plugin = TestPlugin("somehost")
self.handler.server.target_host = "somehost"
self.handler.server.target_port = "someport"
with self.assertRaises(auth_plugins.AuthenticationError):
self.handler.auth_connection()
self.handler.server.target_host = "someotherhost"
self.handler.auth_connection()
| 4,339
|
Python
|
.py
| 101
| 35.405941
| 88
| 0.670152
|
novnc/websockify
| 3,872
| 766
| 31
|
LGPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,645
|
test_websockifyserver.py
|
novnc_websockify/tests/test_websockifyserver.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright(c)2013 NTT corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Unit tests for websockifyserver """
import errno
import os
import logging
import select
import shutil
import socket
import ssl
from unittest.mock import patch, MagicMock, ANY
import sys
import tempfile
import unittest
import socket
import signal
from http.server import BaseHTTPRequestHandler
from io import StringIO
from io import BytesIO
from websockify import websockifyserver
def raise_oserror(*args, **kwargs):
raise OSError('fake error')
class FakeSocket:
def __init__(self, data=b''):
self._data = data
def recv(self, amt, flags=None):
res = self._data[0:amt]
if not (flags & socket.MSG_PEEK):
self._data = self._data[amt:]
return res
def makefile(self, mode='r', buffsize=None):
if 'b' in mode:
return BytesIO(self._data)
else:
return StringIO(self._data.decode('latin_1'))
class WebSockifyRequestHandlerTestCase(unittest.TestCase):
def setUp(self):
super().setUp()
self.tmpdir = tempfile.mkdtemp('-websockify-tests')
# Mock this out cause it screws tests up
patch('os.chdir').start()
def tearDown(self):
"""Called automatically after each test."""
patch.stopall()
os.rmdir(self.tmpdir)
super().tearDown()
def _get_server(self, handler_class=websockifyserver.WebSockifyRequestHandler,
**kwargs):
web = kwargs.pop('web', self.tmpdir)
return websockifyserver.WebSockifyServer(
handler_class, listen_host='localhost',
listen_port=80, key=self.tmpdir, web=web,
record=self.tmpdir, daemon=False, ssl_only=0, idle_timeout=1,
**kwargs)
@patch('websockify.websockifyserver.WebSockifyRequestHandler.send_error')
def test_normal_get_with_only_upgrade_returns_error(self, send_error):
server = self._get_server(web=None)
handler = websockifyserver.WebSockifyRequestHandler(
FakeSocket(b'GET /tmp.txt HTTP/1.1'), '127.0.0.1', server)
handler.do_GET()
send_error.assert_called_with(405)
@patch('websockify.websockifyserver.WebSockifyRequestHandler.send_error')
def test_list_dir_with_file_only_returns_error(self, send_error):
server = self._get_server(file_only=True)
handler = websockifyserver.WebSockifyRequestHandler(
FakeSocket(b'GET / HTTP/1.1'), '127.0.0.1', server)
handler.path = '/'
handler.do_GET()
send_error.assert_called_with(404)
class WebSockifyServerTestCase(unittest.TestCase):
def setUp(self):
super().setUp()
self.tmpdir = tempfile.mkdtemp('-websockify-tests')
# Mock this out cause it screws tests up
patch('os.chdir').start()
def tearDown(self):
"""Called automatically after each test."""
patch.stopall()
os.rmdir(self.tmpdir)
super().tearDown()
def _get_server(self, handler_class=websockifyserver.WebSockifyRequestHandler,
**kwargs):
return websockifyserver.WebSockifyServer(
handler_class, listen_host='localhost',
listen_port=80, key=self.tmpdir, web=self.tmpdir,
record=self.tmpdir, **kwargs)
def test_daemonize_raises_error_while_closing_fds(self):
server = self._get_server(daemon=True, ssl_only=1, idle_timeout=1)
patch('os.fork').start().return_value = 0
patch('signal.signal').start()
patch('os.setsid').start()
patch('os.close').start().side_effect = raise_oserror
self.assertRaises(OSError, server.daemonize, keepfd=None, chdir='./')
def test_daemonize_ignores_ebadf_error_while_closing_fds(self):
def raise_oserror_ebadf(fd):
raise OSError(errno.EBADF, 'fake error')
server = self._get_server(daemon=True, ssl_only=1, idle_timeout=1)
patch('os.fork').start().return_value = 0
patch('signal.signal').start()
patch('os.setsid').start()
patch('os.close').start().side_effect = raise_oserror_ebadf
patch('os.open').start().side_effect = raise_oserror
self.assertRaises(OSError, server.daemonize, keepfd=None, chdir='./')
def test_handshake_fails_on_not_ready(self):
server = self._get_server(daemon=True, ssl_only=0, idle_timeout=1)
def fake_select(rlist, wlist, xlist, timeout=None):
return ([], [], [])
patch('select.select').start().side_effect = fake_select
self.assertRaises(
websockifyserver.WebSockifyServer.EClose, server.do_handshake,
FakeSocket(), '127.0.0.1')
def test_empty_handshake_fails(self):
server = self._get_server(daemon=True, ssl_only=0, idle_timeout=1)
sock = FakeSocket('')
def fake_select(rlist, wlist, xlist, timeout=None):
return ([sock], [], [])
patch('select.select').start().side_effect = fake_select
self.assertRaises(
websockifyserver.WebSockifyServer.EClose, server.do_handshake,
sock, '127.0.0.1')
def test_handshake_policy_request(self):
# TODO(directxman12): implement
pass
def test_handshake_ssl_only_without_ssl_raises_error(self):
server = self._get_server(daemon=True, ssl_only=1, idle_timeout=1)
sock = FakeSocket(b'some initial data')
def fake_select(rlist, wlist, xlist, timeout=None):
return ([sock], [], [])
patch('select.select').start().side_effect = fake_select
self.assertRaises(
websockifyserver.WebSockifyServer.EClose, server.do_handshake,
sock, '127.0.0.1')
def test_do_handshake_no_ssl(self):
class FakeHandler:
CALLED = False
def __init__(self, *args, **kwargs):
type(self).CALLED = True
FakeHandler.CALLED = False
server = self._get_server(
handler_class=FakeHandler, daemon=True,
ssl_only=0, idle_timeout=1)
sock = FakeSocket(b'some initial data')
def fake_select(rlist, wlist, xlist, timeout=None):
return ([sock], [], [])
patch('select.select').start().side_effect = fake_select
self.assertEqual(server.do_handshake(sock, '127.0.0.1'), sock)
self.assertTrue(FakeHandler.CALLED, True)
def test_do_handshake_ssl(self):
# TODO(directxman12): implement this
pass
def test_do_handshake_ssl_without_ssl_raises_error(self):
# TODO(directxman12): implement this
pass
def test_do_handshake_ssl_without_cert_raises_error(self):
server = self._get_server(daemon=True, ssl_only=0, idle_timeout=1,
cert='afdsfasdafdsafdsafdsafdas')
sock = FakeSocket(b"\x16some ssl data")
def fake_select(rlist, wlist, xlist, timeout=None):
return ([sock], [], [])
patch('select.select').start().side_effect = fake_select
self.assertRaises(
websockifyserver.WebSockifyServer.EClose, server.do_handshake,
sock, '127.0.0.1')
def test_do_handshake_ssl_error_eof_raises_close_error(self):
server = self._get_server(daemon=True, ssl_only=0, idle_timeout=1)
sock = FakeSocket(b"\x16some ssl data")
def fake_select(rlist, wlist, xlist, timeout=None):
return ([sock], [], [])
def fake_wrap_socket(*args, **kwargs):
raise ssl.SSLError(ssl.SSL_ERROR_EOF)
class fake_create_default_context():
def __init__(self, purpose):
self.verify_mode = None
self.options = 0
def load_cert_chain(self, certfile, keyfile, password):
pass
def set_default_verify_paths(self):
pass
def load_verify_locations(self, cafile):
pass
def wrap_socket(self, *args, **kwargs):
raise ssl.SSLError(ssl.SSL_ERROR_EOF)
patch('select.select').start().side_effect = fake_select
patch('ssl.create_default_context').start().side_effect = fake_create_default_context
self.assertRaises(
websockifyserver.WebSockifyServer.EClose, server.do_handshake,
sock, '127.0.0.1')
def test_do_handshake_ssl_sets_ciphers(self):
test_ciphers = 'TEST-CIPHERS-1:TEST-CIPHER-2'
class FakeHandler:
def __init__(self, *args, **kwargs):
pass
server = self._get_server(handler_class=FakeHandler, daemon=True,
idle_timeout=1, ssl_ciphers=test_ciphers)
sock = FakeSocket(b"\x16some ssl data")
def fake_select(rlist, wlist, xlist, timeout=None):
return ([sock], [], [])
class fake_create_default_context():
CIPHERS = ''
def __init__(self, purpose):
self.verify_mode = None
self.options = 0
def load_cert_chain(self, certfile, keyfile, password):
pass
def set_default_verify_paths(self):
pass
def load_verify_locations(self, cafile):
pass
def wrap_socket(self, *args, **kwargs):
pass
def set_ciphers(self, ciphers_to_set):
fake_create_default_context.CIPHERS = ciphers_to_set
patch('select.select').start().side_effect = fake_select
patch('ssl.create_default_context').start().side_effect = fake_create_default_context
server.do_handshake(sock, '127.0.0.1')
self.assertEqual(fake_create_default_context.CIPHERS, test_ciphers)
def test_do_handshake_ssl_sets_opions(self):
test_options = 0xCAFEBEEF
class FakeHandler:
def __init__(self, *args, **kwargs):
pass
server = self._get_server(handler_class=FakeHandler, daemon=True,
idle_timeout=1, ssl_options=test_options)
sock = FakeSocket(b"\x16some ssl data")
def fake_select(rlist, wlist, xlist, timeout=None):
return ([sock], [], [])
class fake_create_default_context:
OPTIONS = 0
def __init__(self, purpose):
self.verify_mode = None
self._options = 0
def load_cert_chain(self, certfile, keyfile, password):
pass
def set_default_verify_paths(self):
pass
def load_verify_locations(self, cafile):
pass
def wrap_socket(self, *args, **kwargs):
pass
def get_options(self):
return self._options
def set_options(self, val):
fake_create_default_context.OPTIONS = val
options = property(get_options, set_options)
patch('select.select').start().side_effect = fake_select
patch('ssl.create_default_context').start().side_effect = fake_create_default_context
server.do_handshake(sock, '127.0.0.1')
self.assertEqual(fake_create_default_context.OPTIONS, test_options)
def test_fallback_sigchld_handler(self):
# TODO(directxman12): implement this
pass
def test_start_server_error(self):
server = self._get_server(daemon=False, ssl_only=1, idle_timeout=1)
sock = server.socket('localhost')
def fake_select(rlist, wlist, xlist, timeout=None):
raise Exception("fake error")
patch('websockify.websockifyserver.WebSockifyServer.socket').start()
patch('websockify.websockifyserver.WebSockifyServer.daemonize').start()
patch('select.select').start().side_effect = fake_select
server.start_server()
def test_start_server_keyboardinterrupt(self):
server = self._get_server(daemon=False, ssl_only=0, idle_timeout=1)
sock = server.socket('localhost')
def fake_select(rlist, wlist, xlist, timeout=None):
raise KeyboardInterrupt
patch('websockify.websockifyserver.WebSockifyServer.socket').start()
patch('websockify.websockifyserver.WebSockifyServer.daemonize').start()
patch('select.select').start().side_effect = fake_select
server.start_server()
def test_start_server_systemexit(self):
server = self._get_server(daemon=False, ssl_only=0, idle_timeout=1)
sock = server.socket('localhost')
def fake_select(rlist, wlist, xlist, timeout=None):
sys.exit()
patch('websockify.websockifyserver.WebSockifyServer.socket').start()
patch('websockify.websockifyserver.WebSockifyServer.daemonize').start()
patch('select.select').start().side_effect = fake_select
server.start_server()
def test_socket_set_keepalive_options(self):
keepcnt = 12
keepidle = 34
keepintvl = 56
server = self._get_server(daemon=False, ssl_only=0, idle_timeout=1)
sock = server.socket('localhost',
tcp_keepcnt=keepcnt,
tcp_keepidle=keepidle,
tcp_keepintvl=keepintvl)
if hasattr(socket, 'TCP_KEEPCNT'):
self.assertEqual(sock.getsockopt(socket.SOL_TCP,
socket.TCP_KEEPCNT), keepcnt)
self.assertEqual(sock.getsockopt(socket.SOL_TCP,
socket.TCP_KEEPIDLE), keepidle)
self.assertEqual(sock.getsockopt(socket.SOL_TCP,
socket.TCP_KEEPINTVL), keepintvl)
sock = server.socket('localhost',
tcp_keepalive=False,
tcp_keepcnt=keepcnt,
tcp_keepidle=keepidle,
tcp_keepintvl=keepintvl)
if hasattr(socket, 'TCP_KEEPCNT'):
self.assertNotEqual(sock.getsockopt(socket.SOL_TCP,
socket.TCP_KEEPCNT), keepcnt)
self.assertNotEqual(sock.getsockopt(socket.SOL_TCP,
socket.TCP_KEEPIDLE), keepidle)
self.assertNotEqual(sock.getsockopt(socket.SOL_TCP,
socket.TCP_KEEPINTVL), keepintvl)
| 15,021
|
Python
|
.py
| 318
| 36.41195
| 93
| 0.617331
|
novnc/websockify
| 3,872
| 766
| 31
|
LGPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,646
|
test_auth_plugins.py
|
novnc_websockify/tests/test_auth_plugins.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
""" Unit tests for Authentication plugins"""
from websockify.auth_plugins import BasicHTTPAuth, AuthenticationError
import unittest
class BasicHTTPAuthTestCase(unittest.TestCase):
def setUp(self):
self.plugin = BasicHTTPAuth('Aladdin:open sesame')
def test_no_auth(self):
headers = {}
self.assertRaises(AuthenticationError, self.plugin.authenticate, headers, 'localhost', '1234')
def test_invalid_password(self):
headers = {'Authorization': 'Basic QWxhZGRpbjpzZXNhbWUgc3RyZWV0'}
self.assertRaises(AuthenticationError, self.plugin.authenticate, headers, 'localhost', '1234')
def test_valid_password(self):
headers = {'Authorization': 'Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=='}
self.plugin.authenticate(headers, 'localhost', '1234')
def test_garbage_auth(self):
headers = {'Authorization': 'Basic xxxxxxxxxxxxxxxxxxxxxxxxxxxx'}
self.assertRaises(AuthenticationError, self.plugin.authenticate, headers, 'localhost', '1234')
| 1,061
|
Python
|
.py
| 19
| 49.526316
| 102
| 0.739593
|
novnc/websockify
| 3,872
| 766
| 31
|
LGPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,647
|
latency.py
|
novnc_websockify/tests/latency.py
|
#!/usr/bin/env python
'''
A WebSocket server that echos back whatever it receives from the client.
Copyright 2010 Joel Martin
Licensed under LGPL version 3 (see docs/LICENSE.LGPL-3)
You can make a cert/key with openssl using:
openssl req -new -x509 -days 365 -nodes -out self.pem -keyout self.pem
as taken from http://docs.python.org/dev/library/ssl.html#certificates
'''
import os, sys, select, optparse, logging
sys.path.insert(0,os.path.join(os.path.dirname(__file__), ".."))
from websockify.websockifyserver import WebSockifyServer, WebSockifyRequestHandler
class WebSocketEcho(WebSockifyRequestHandler):
"""
WebSockets server that echos back whatever is received from the
client. """
buffer_size = 8096
def new_websocket_client(self):
"""
Echo back whatever is received.
"""
cqueue = []
c_pend = 0
cpartial = ""
rlist = [self.request]
while True:
wlist = []
if cqueue or c_pend: wlist.append(self.request)
ins, outs, excepts = select.select(rlist, wlist, [], 1)
if excepts: raise Exception("Socket exception")
if self.request in outs:
# Send queued target data to the client
c_pend = self.send_frames(cqueue)
cqueue = []
if self.request in ins:
# Receive client data, decode it, and send it back
frames, closed = self.recv_frames()
cqueue.extend(frames)
if closed:
break
if __name__ == '__main__':
parser = optparse.OptionParser(usage="%prog [options] listen_port")
parser.add_option("--verbose", "-v", action="store_true",
help="verbose messages and per frame traffic")
parser.add_option("--cert", default="self.pem",
help="SSL certificate file")
parser.add_option("--key", default=None,
help="SSL key file (if separate from cert)")
parser.add_option("--ssl-only", action="store_true",
help="disallow non-encrypted connections")
(opts, args) = parser.parse_args()
try:
if len(args) != 1: raise ValueError
opts.listen_port = int(args[0])
except ValueError:
parser.error("Invalid arguments")
logging.basicConfig(level=logging.INFO)
opts.web = "."
server = WebSockifyServer(WebSocketEcho, **opts.__dict__)
server.start_server()
| 2,456
|
Python
|
.py
| 60
| 32.866667
| 82
| 0.629832
|
novnc/websockify
| 3,872
| 766
| 31
|
LGPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,648
|
echo.py
|
novnc_websockify/tests/echo.py
|
#!/usr/bin/env python
'''
A WebSocket server that echos back whatever it receives from the client.
Copyright 2010 Joel Martin
Licensed under LGPL version 3 (see docs/LICENSE.LGPL-3)
You can make a cert/key with openssl using:
openssl req -new -x509 -days 365 -nodes -out self.pem -keyout self.pem
as taken from http://docs.python.org/dev/library/ssl.html#certificates
'''
import os, sys, select, optparse, logging
sys.path.insert(0,os.path.join(os.path.dirname(__file__), ".."))
from websockify.websockifyserver import WebSockifyServer, WebSockifyRequestHandler
class WebSocketEcho(WebSockifyRequestHandler):
"""
WebSockets server that echos back whatever is received from the
client. """
buffer_size = 8096
def new_websocket_client(self):
"""
Echo back whatever is received.
"""
cqueue = []
c_pend = 0
cpartial = ""
rlist = [self.request]
while True:
wlist = []
if cqueue or c_pend: wlist.append(self.request)
ins, outs, excepts = select.select(rlist, wlist, [], 1)
if excepts: raise Exception("Socket exception")
if self.request in outs:
# Send queued target data to the client
c_pend = self.send_frames(cqueue)
cqueue = []
if self.request in ins:
# Receive client data, decode it, and send it back
frames, closed = self.recv_frames()
cqueue.extend(frames)
if closed:
break
if __name__ == '__main__':
parser = optparse.OptionParser(usage="%prog [options] listen_port")
parser.add_option("--verbose", "-v", action="store_true",
help="verbose messages and per frame traffic")
parser.add_option("--cert", default="self.pem",
help="SSL certificate file")
parser.add_option("--key", default=None,
help="SSL key file (if separate from cert)")
parser.add_option("--ssl-only", action="store_true",
help="disallow non-encrypted connections")
(opts, args) = parser.parse_args()
try:
if len(args) != 1: raise ValueError
opts.listen_port = int(args[0])
except ValueError:
parser.error("Invalid arguments")
logging.basicConfig(level=logging.INFO)
opts.web = "."
server = WebSockifyServer(WebSocketEcho, **opts.__dict__)
server.start_server()
| 2,456
|
Python
|
.py
| 60
| 32.866667
| 82
| 0.629832
|
novnc/websockify
| 3,872
| 766
| 31
|
LGPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,649
|
test_token_plugins.py
|
novnc_websockify/tests/test_token_plugins.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
""" Unit tests for Token plugins"""
import sys
import unittest
from unittest.mock import patch, mock_open, MagicMock
from jwcrypto import jwt, jwk
from websockify.token_plugins import parse_source_args, ReadOnlyTokenFile, JWTTokenApi, TokenRedis
class ParseSourceArgumentsTestCase(unittest.TestCase):
def test_parameterized(self):
params = [
('', ['']),
(':', ['', '']),
('::', ['', '', '']),
('"', ['"']),
('""', ['""']),
('"""', ['"""']),
('"localhost"', ['localhost']),
('"localhost":', ['localhost', '']),
('"localhost"::', ['localhost', '', '']),
('"local:host"', ['local:host']),
('"local:host:"pass"', ['"local', 'host', "pass"]),
('"local":"host"', ['local', 'host']),
('"local":host"', ['local', 'host"']),
('localhost:6379:1:pass"word:"my-app-namespace:dev"',
['localhost', '6379', '1', 'pass"word', 'my-app-namespace:dev']),
]
for src, args in params:
self.assertEqual(args, parse_source_args(src))
class ReadOnlyTokenFileTestCase(unittest.TestCase):
patch('os.path.isdir', MagicMock(return_value=False))
def test_empty(self):
plugin = ReadOnlyTokenFile('configfile')
config = ""
pyopen = mock_open(read_data=config)
with patch("websockify.token_plugins.open", pyopen, create=True):
result = plugin.lookup('testhost')
pyopen.assert_called_once_with('configfile')
self.assertIsNone(result)
patch('os.path.isdir', MagicMock(return_value=False))
def test_simple(self):
plugin = ReadOnlyTokenFile('configfile')
config = "testhost: remote_host:remote_port"
pyopen = mock_open(read_data=config)
with patch("websockify.token_plugins.open", pyopen, create=True):
result = plugin.lookup('testhost')
pyopen.assert_called_once_with('configfile')
self.assertIsNotNone(result)
self.assertEqual(result[0], "remote_host")
self.assertEqual(result[1], "remote_port")
patch('os.path.isdir', MagicMock(return_value=False))
def test_tabs(self):
plugin = ReadOnlyTokenFile('configfile')
config = "testhost:\tremote_host:remote_port"
pyopen = mock_open(read_data=config)
with patch("websockify.token_plugins.open", pyopen, create=True):
result = plugin.lookup('testhost')
pyopen.assert_called_once_with('configfile')
self.assertIsNotNone(result)
self.assertEqual(result[0], "remote_host")
self.assertEqual(result[1], "remote_port")
class JWSTokenTestCase(unittest.TestCase):
def test_asymmetric_jws_token_plugin(self):
plugin = JWTTokenApi("./tests/fixtures/public.pem")
key = jwk.JWK()
private_key = open("./tests/fixtures/private.pem", "rb").read()
key.import_from_pem(private_key)
jwt_token = jwt.JWT({"alg": "RS256"}, {'host': "remote_host", 'port': "remote_port"})
jwt_token.make_signed_token(key)
result = plugin.lookup(jwt_token.serialize())
self.assertIsNotNone(result)
self.assertEqual(result[0], "remote_host")
self.assertEqual(result[1], "remote_port")
def test_asymmetric_jws_token_plugin_with_illigal_key_exception(self):
plugin = JWTTokenApi("wrong.pub")
key = jwk.JWK()
private_key = open("./tests/fixtures/private.pem", "rb").read()
key.import_from_pem(private_key)
jwt_token = jwt.JWT({"alg": "RS256"}, {'host': "remote_host", 'port': "remote_port"})
jwt_token.make_signed_token(key)
result = plugin.lookup(jwt_token.serialize())
self.assertIsNone(result)
@patch('time.time')
def test_jwt_valid_time(self, mock_time):
plugin = JWTTokenApi("./tests/fixtures/public.pem")
key = jwk.JWK()
private_key = open("./tests/fixtures/private.pem", "rb").read()
key.import_from_pem(private_key)
jwt_token = jwt.JWT({"alg": "RS256"}, {'host': "remote_host", 'port': "remote_port", 'nbf': 100, 'exp': 200 })
jwt_token.make_signed_token(key)
mock_time.return_value = 150
result = plugin.lookup(jwt_token.serialize())
self.assertIsNotNone(result)
self.assertEqual(result[0], "remote_host")
self.assertEqual(result[1], "remote_port")
@patch('time.time')
def test_jwt_early_time(self, mock_time):
plugin = JWTTokenApi("./tests/fixtures/public.pem")
key = jwk.JWK()
private_key = open("./tests/fixtures/private.pem", "rb").read()
key.import_from_pem(private_key)
jwt_token = jwt.JWT({"alg": "RS256"}, {'host': "remote_host", 'port': "remote_port", 'nbf': 100, 'exp': 200 })
jwt_token.make_signed_token(key)
mock_time.return_value = 50
result = plugin.lookup(jwt_token.serialize())
self.assertIsNone(result)
@patch('time.time')
def test_jwt_late_time(self, mock_time):
plugin = JWTTokenApi("./tests/fixtures/public.pem")
key = jwk.JWK()
private_key = open("./tests/fixtures/private.pem", "rb").read()
key.import_from_pem(private_key)
jwt_token = jwt.JWT({"alg": "RS256"}, {'host': "remote_host", 'port': "remote_port", 'nbf': 100, 'exp': 200 })
jwt_token.make_signed_token(key)
mock_time.return_value = 250
result = plugin.lookup(jwt_token.serialize())
self.assertIsNone(result)
def test_symmetric_jws_token_plugin(self):
plugin = JWTTokenApi("./tests/fixtures/symmetric.key")
secret = open("./tests/fixtures/symmetric.key").read()
key = jwk.JWK()
key.import_key(kty="oct",k=secret)
jwt_token = jwt.JWT({"alg": "HS256"}, {'host': "remote_host", 'port': "remote_port"})
jwt_token.make_signed_token(key)
result = plugin.lookup(jwt_token.serialize())
self.assertIsNotNone(result)
self.assertEqual(result[0], "remote_host")
self.assertEqual(result[1], "remote_port")
def test_symmetric_jws_token_plugin_with_illigal_key_exception(self):
plugin = JWTTokenApi("wrong_sauce")
secret = open("./tests/fixtures/symmetric.key").read()
key = jwk.JWK()
key.import_key(kty="oct",k=secret)
jwt_token = jwt.JWT({"alg": "HS256"}, {'host': "remote_host", 'port': "remote_port"})
jwt_token.make_signed_token(key)
result = plugin.lookup(jwt_token.serialize())
self.assertIsNone(result)
def test_asymmetric_jwe_token_plugin(self):
plugin = JWTTokenApi("./tests/fixtures/private.pem")
private_key = jwk.JWK()
public_key = jwk.JWK()
private_key_data = open("./tests/fixtures/private.pem", "rb").read()
public_key_data = open("./tests/fixtures/public.pem", "rb").read()
private_key.import_from_pem(private_key_data)
public_key.import_from_pem(public_key_data)
jwt_token = jwt.JWT({"alg": "RS256"}, {'host': "remote_host", 'port': "remote_port"})
jwt_token.make_signed_token(private_key)
jwe_token = jwt.JWT(header={"alg": "RSA-OAEP", "enc": "A256CBC-HS512"},
claims=jwt_token.serialize())
jwe_token.make_encrypted_token(public_key)
result = plugin.lookup(jwt_token.serialize())
self.assertIsNotNone(result)
self.assertEqual(result[0], "remote_host")
self.assertEqual(result[1], "remote_port")
class TokenRedisTestCase(unittest.TestCase):
def setUp(self):
try:
import redis
except ImportError:
patcher = patch.dict(sys.modules, {'redis': MagicMock()})
patcher.start()
self.addCleanup(patcher.stop)
@patch('redis.Redis')
def test_empty(self, mock_redis):
plugin = TokenRedis('127.0.0.1:1234')
instance = mock_redis.return_value
instance.get.return_value = None
result = plugin.lookup('testhost')
instance.get.assert_called_once_with('testhost')
self.assertIsNone(result)
@patch('redis.Redis')
def test_simple(self, mock_redis):
plugin = TokenRedis('127.0.0.1:1234')
instance = mock_redis.return_value
instance.get.return_value = b'{"host": "remote_host:remote_port"}'
result = plugin.lookup('testhost')
instance.get.assert_called_once_with('testhost')
self.assertIsNotNone(result)
self.assertEqual(result[0], 'remote_host')
self.assertEqual(result[1], 'remote_port')
@patch('redis.Redis')
def test_json_token_with_spaces(self, mock_redis):
plugin = TokenRedis('127.0.0.1:1234')
instance = mock_redis.return_value
instance.get.return_value = b' {"host": "remote_host:remote_port"} '
result = plugin.lookup('testhost')
instance.get.assert_called_once_with('testhost')
self.assertIsNotNone(result)
self.assertEqual(result[0], 'remote_host')
self.assertEqual(result[1], 'remote_port')
@patch('redis.Redis')
def test_text_token(self, mock_redis):
plugin = TokenRedis('127.0.0.1:1234')
instance = mock_redis.return_value
instance.get.return_value = b'remote_host:remote_port'
result = plugin.lookup('testhost')
instance.get.assert_called_once_with('testhost')
self.assertIsNotNone(result)
self.assertEqual(result[0], 'remote_host')
self.assertEqual(result[1], 'remote_port')
@patch('redis.Redis')
def test_text_token_with_spaces(self, mock_redis):
plugin = TokenRedis('127.0.0.1:1234')
instance = mock_redis.return_value
instance.get.return_value = b' remote_host:remote_port '
result = plugin.lookup('testhost')
instance.get.assert_called_once_with('testhost')
self.assertIsNotNone(result)
self.assertEqual(result[0], 'remote_host')
self.assertEqual(result[1], 'remote_port')
@patch('redis.Redis')
def test_invalid_token(self, mock_redis):
plugin = TokenRedis('127.0.0.1:1234')
instance = mock_redis.return_value
instance.get.return_value = b'{"host": "remote_host:remote_port" '
result = plugin.lookup('testhost')
instance.get.assert_called_once_with('testhost')
self.assertIsNone(result)
@patch('redis.Redis')
def test_token_without_namespace(self, mock_redis):
plugin = TokenRedis('127.0.0.1:1234')
token = 'testhost'
def mock_redis_get(key):
self.assertEqual(key, token)
return b'remote_host:remote_port'
instance = mock_redis.return_value
instance.get = mock_redis_get
result = plugin.lookup(token)
self.assertIsNotNone(result)
self.assertEqual(result[0], 'remote_host')
self.assertEqual(result[1], 'remote_port')
@patch('redis.Redis')
def test_token_with_namespace(self, mock_redis):
plugin = TokenRedis('127.0.0.1:1234:::namespace')
token = 'testhost'
def mock_redis_get(key):
self.assertEqual(key, "namespace:" + token)
return b'remote_host:remote_port'
instance = mock_redis.return_value
instance.get = mock_redis_get
result = plugin.lookup(token)
self.assertIsNotNone(result)
self.assertEqual(result[0], 'remote_host')
self.assertEqual(result[1], 'remote_port')
def test_src_only_host(self):
plugin = TokenRedis('127.0.0.1')
self.assertEqual(plugin._server, '127.0.0.1')
self.assertEqual(plugin._port, 6379)
self.assertEqual(plugin._db, 0)
self.assertEqual(plugin._password, None)
self.assertEqual(plugin._namespace, "")
def test_src_with_host_port(self):
plugin = TokenRedis('127.0.0.1:1234')
self.assertEqual(plugin._server, '127.0.0.1')
self.assertEqual(plugin._port, 1234)
self.assertEqual(plugin._db, 0)
self.assertEqual(plugin._password, None)
self.assertEqual(plugin._namespace, "")
def test_src_with_host_port_db(self):
plugin = TokenRedis('127.0.0.1:1234:2')
self.assertEqual(plugin._server, '127.0.0.1')
self.assertEqual(plugin._port, 1234)
self.assertEqual(plugin._db, 2)
self.assertEqual(plugin._password, None)
self.assertEqual(plugin._namespace, "")
def test_src_with_host_port_db_pass(self):
plugin = TokenRedis('127.0.0.1:1234:2:verysecret')
self.assertEqual(plugin._server, '127.0.0.1')
self.assertEqual(plugin._port, 1234)
self.assertEqual(plugin._db, 2)
self.assertEqual(plugin._password, 'verysecret')
self.assertEqual(plugin._namespace, "")
def test_src_with_host_port_db_pass_namespace(self):
plugin = TokenRedis('127.0.0.1:1234:2:verysecret:namespace')
self.assertEqual(plugin._server, '127.0.0.1')
self.assertEqual(plugin._port, 1234)
self.assertEqual(plugin._db, 2)
self.assertEqual(plugin._password, 'verysecret')
self.assertEqual(plugin._namespace, "namespace:")
def test_src_with_host_empty_port_empty_db_pass_no_namespace(self):
plugin = TokenRedis('127.0.0.1:::verysecret')
self.assertEqual(plugin._server, '127.0.0.1')
self.assertEqual(plugin._port, 6379)
self.assertEqual(plugin._db, 0)
self.assertEqual(plugin._password, 'verysecret')
self.assertEqual(plugin._namespace, "")
def test_src_with_host_empty_port_empty_db_empty_pass_empty_namespace(self):
plugin = TokenRedis('127.0.0.1::::')
self.assertEqual(plugin._server, '127.0.0.1')
self.assertEqual(plugin._port, 6379)
self.assertEqual(plugin._db, 0)
self.assertEqual(plugin._password, None)
self.assertEqual(plugin._namespace, "")
def test_src_with_host_empty_port_empty_db_empty_pass_no_namespace(self):
plugin = TokenRedis('127.0.0.1:::')
self.assertEqual(plugin._server, '127.0.0.1')
self.assertEqual(plugin._port, 6379)
self.assertEqual(plugin._db, 0)
self.assertEqual(plugin._password, None)
self.assertEqual(plugin._namespace, "")
def test_src_with_host_empty_port_empty_db_no_pass_no_namespace(self):
plugin = TokenRedis('127.0.0.1::')
self.assertEqual(plugin._server, '127.0.0.1')
self.assertEqual(plugin._port, 6379)
self.assertEqual(plugin._db, 0)
self.assertEqual(plugin._password, None)
self.assertEqual(plugin._namespace, "")
def test_src_with_host_empty_port_no_db_no_pass_no_namespace(self):
plugin = TokenRedis('127.0.0.1:')
self.assertEqual(plugin._server, '127.0.0.1')
self.assertEqual(plugin._port, 6379)
self.assertEqual(plugin._db, 0)
self.assertEqual(plugin._password, None)
self.assertEqual(plugin._namespace, "")
def test_src_with_host_empty_port_empty_db_empty_pass_namespace(self):
plugin = TokenRedis('127.0.0.1::::namespace')
self.assertEqual(plugin._server, '127.0.0.1')
self.assertEqual(plugin._port, 6379)
self.assertEqual(plugin._db, 0)
self.assertEqual(plugin._password, None)
self.assertEqual(plugin._namespace, "namespace:")
def test_src_with_host_empty_port_empty_db_empty_pass_nested_namespace(self):
plugin = TokenRedis('127.0.0.1::::"ns1:ns2"')
self.assertEqual(plugin._server, '127.0.0.1')
self.assertEqual(plugin._port, 6379)
self.assertEqual(plugin._db, 0)
self.assertEqual(plugin._password, None)
self.assertEqual(plugin._namespace, "ns1:ns2:")
def test_src_with_host_empty_port_db_no_pass_no_namespace(self):
plugin = TokenRedis('127.0.0.1::2')
self.assertEqual(plugin._server, '127.0.0.1')
self.assertEqual(plugin._port, 6379)
self.assertEqual(plugin._db, 2)
self.assertEqual(plugin._password, None)
self.assertEqual(plugin._namespace, "")
def test_src_with_host_port_empty_db_pass_no_namespace(self):
plugin = TokenRedis('127.0.0.1:1234::verysecret')
self.assertEqual(plugin._server, '127.0.0.1')
self.assertEqual(plugin._port, 1234)
self.assertEqual(plugin._db, 0)
self.assertEqual(plugin._password, 'verysecret')
self.assertEqual(plugin._namespace, "")
def test_src_with_host_empty_port_db_pass_no_namespace(self):
plugin = TokenRedis('127.0.0.1::2:verysecret')
self.assertEqual(plugin._server, '127.0.0.1')
self.assertEqual(plugin._port, 6379)
self.assertEqual(plugin._db, 2)
self.assertEqual(plugin._password, 'verysecret')
self.assertEqual(plugin._namespace, "")
def test_src_with_host_empty_port_db_empty_pass_no_namespace(self):
plugin = TokenRedis('127.0.0.1::2:')
self.assertEqual(plugin._server, '127.0.0.1')
self.assertEqual(plugin._port, 6379)
self.assertEqual(plugin._db, 2)
self.assertEqual(plugin._password, None)
self.assertEqual(plugin._namespace, "")
| 17,297
|
Python
|
.py
| 355
| 39.88169
| 118
| 0.63309
|
novnc/websockify
| 3,872
| 766
| 31
|
LGPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,650
|
load.py
|
novnc_websockify/tests/load.py
|
#!/usr/bin/env python
'''
WebSocket server-side load test program. Sends and receives traffic
that has a random payload (length and content) that is checksummed and
given a sequence number. Any errors are reported and counted.
'''
import sys, os, select, random, time, optparse, logging
sys.path.insert(0,os.path.join(os.path.dirname(__file__), ".."))
from websockify.websockifyserver import WebSockifyServer, WebSockifyRequestHandler
class WebSocketLoadServer(WebSockifyServer):
recv_cnt = 0
send_cnt = 0
def __init__(self, *args, **kwargs):
self.delay = kwargs.pop('delay')
WebSockifyServer.__init__(self, *args, **kwargs)
class WebSocketLoad(WebSockifyRequestHandler):
max_packet_size = 10000
def new_websocket_client(self):
print "Prepopulating random array"
self.rand_array = []
for i in range(0, self.max_packet_size):
self.rand_array.append(random.randint(0, 9))
self.errors = 0
self.send_cnt = 0
self.recv_cnt = 0
self.responder(self.request)
print "accumulated errors:", self.errors
self.errors = 0
def responder(self, client):
c_pend = 0
cqueue = []
cpartial = ""
socks = [client]
last_send = time.time() * 1000
while True:
ins, outs, excepts = select.select(socks, socks, socks, 1)
if excepts: raise Exception("Socket exception")
if client in ins:
frames, closed = self.recv_frames()
err = self.check(frames)
if err:
self.errors = self.errors + 1
print err
if closed:
break
now = time.time() * 1000
if client in outs:
if c_pend:
last_send = now
c_pend = self.send_frames()
elif now > (last_send + self.server.delay):
last_send = now
c_pend = self.send_frames([self.generate()])
def generate(self):
length = random.randint(10, self.max_packet_size)
numlist = self.rand_array[self.max_packet_size-length:]
# Error in length
#numlist.append(5)
chksum = sum(numlist)
# Error in checksum
#numlist[0] = 5
nums = "".join( [str(n) for n in numlist] )
data = "^%d:%d:%d:%s$" % (self.send_cnt, length, chksum, nums)
self.send_cnt += 1
return data
def check(self, frames):
err = ""
for data in frames:
if data.count('$') > 1:
raise Exception("Multiple parts within single packet")
if len(data) == 0:
self.traffic("_")
continue
if data[0] != "^":
err += "buf did not start with '^'\n"
continue
try:
cnt, length, chksum, nums = data[1:-1].split(':')
cnt = int(cnt)
length = int(length)
chksum = int(chksum)
except ValueError:
print "\n<BOF>" + repr(data) + "<EOF>"
err += "Invalid data format\n"
continue
if self.recv_cnt != cnt:
err += "Expected count %d but got %d\n" % (self.recv_cnt, cnt)
self.recv_cnt = cnt + 1
continue
self.recv_cnt += 1
if len(nums) != length:
err += "Expected length %d but got %d\n" % (length, len(nums))
continue
inv = nums.translate(None, "0123456789")
if inv:
err += "Invalid characters found: %s\n" % inv
continue
real_chksum = 0
for num in nums:
real_chksum += int(num)
if real_chksum != chksum:
err += "Expected checksum %d but real chksum is %d\n" % (chksum, real_chksum)
return err
if __name__ == '__main__':
parser = optparse.OptionParser(usage="%prog [options] listen_port")
parser.add_option("--verbose", "-v", action="store_true",
help="verbose messages and per frame traffic")
parser.add_option("--cert", default="self.pem",
help="SSL certificate file")
parser.add_option("--key", default=None,
help="SSL key file (if separate from cert)")
parser.add_option("--ssl-only", action="store_true",
help="disallow non-encrypted connections")
(opts, args) = parser.parse_args()
try:
if len(args) != 1: raise ValueError
opts.listen_port = int(args[0])
if len(args) not in [1,2]: raise ValueError
opts.listen_port = int(args[0])
if len(args) == 2:
opts.delay = int(args[1])
else:
opts.delay = 10
except ValueError:
parser.error("Invalid arguments")
logging.basicConfig(level=logging.INFO)
opts.web = "."
server = WebSocketLoadServer(WebSocketLoad, **opts.__dict__)
server.start_server()
| 5,114
|
Python
|
.py
| 129
| 28.604651
| 93
| 0.54448
|
novnc/websockify
| 3,872
| 766
| 31
|
LGPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,651
|
sysloghandler.py
|
novnc_websockify/websockify/sysloghandler.py
|
import logging.handlers as handlers, socket, os, time
class WebsockifySysLogHandler(handlers.SysLogHandler):
"""
A handler class that sends proper Syslog-formatted messages,
as defined by RFC 5424.
"""
_legacy_head_fmt = '<{pri}>{ident}[{pid}]: '
_rfc5424_head_fmt = '<{pri}>1 {timestamp} {hostname} {ident} {pid} - - '
_head_fmt = _rfc5424_head_fmt
_legacy = False
_timestamp_fmt = '%Y-%m-%dT%H:%M:%SZ'
_max_hostname = 255
_max_ident = 24 #safer for old daemons
_send_length = False
_tail = '\n'
ident = None
def __init__(self, address=('localhost', handlers.SYSLOG_UDP_PORT),
facility=handlers.SysLogHandler.LOG_USER,
socktype=None, ident=None, legacy=False):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "WebsockifySysLogHandler(address="/dev/log")" can be
used. If facility is not specified, LOG_USER is used. If socktype is
specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
socket type will be used. For Unix sockets, you can also specify a
socktype of None, in which case socket.SOCK_DGRAM will be used, falling
back to socket.SOCK_STREAM. If ident is specified, this string will be
used as the application name in all messages sent. Set legacy to True
to use the old version of the protocol.
"""
self.ident = ident
if legacy:
self._legacy = True
self._head_fmt = self._legacy_head_fmt
super().__init__(address, facility, socktype)
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
try:
# Gather info.
text = self.format(record).replace(self._tail, ' ')
if not text: # nothing to log
return
pri = self.encodePriority(self.facility,
self.mapPriority(record.levelname))
timestamp = time.strftime(self._timestamp_fmt, time.gmtime());
hostname = socket.gethostname()[:self._max_hostname]
if self.ident:
ident = self.ident[:self._max_ident]
else:
ident = ''
pid = os.getpid() # shouldn't need truncation
# Format the header.
head = {
'pri': pri,
'timestamp': timestamp,
'hostname': hostname,
'ident': ident,
'pid': pid,
}
msg = self._head_fmt.format(**head).encode('ascii', 'ignore')
# Encode text as plain ASCII if possible, else use UTF-8 with BOM.
try:
msg += text.encode('ascii')
except UnicodeEncodeError:
msg += text.encode('utf-8-sig')
# Add length or tail character, if necessary.
if self.socktype != socket.SOCK_DGRAM:
if self._send_length:
msg = ('%d ' % len(msg)).encode('ascii') + msg
else:
msg += self._tail.encode('ascii')
# Send the message.
if self.unixsocket:
try:
self.socket.send(msg)
except OSError:
self._connect_unixsocket(self.address)
self.socket.send(msg)
else:
if self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
else:
self.socket.sendall(msg)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
| 3,928
|
Python
|
.py
| 92
| 30.369565
| 79
| 0.553018
|
novnc/websockify
| 3,872
| 766
| 31
|
LGPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,652
|
websocketserver.py
|
novnc_websockify/websockify/websocketserver.py
|
#!/usr/bin/env python
'''
Python WebSocket server base
Copyright 2011 Joel Martin
Copyright 2016-2018 Pierre Ossman
Licensed under LGPL version 3 (see docs/LICENSE.LGPL-3)
'''
import sys
from http.server import BaseHTTPRequestHandler, HTTPServer
from websockify.websocket import WebSocket, WebSocketWantReadError, WebSocketWantWriteError
class HttpWebSocket(WebSocket):
"""Class to glue websocket and http request functionality together"""
def __init__(self, request_handler):
super().__init__()
self.request_handler = request_handler
def send_response(self, code, message=None):
self.request_handler.send_response(code, message)
def send_header(self, keyword, value):
self.request_handler.send_header(keyword, value)
def end_headers(self):
self.request_handler.end_headers()
class WebSocketRequestHandlerMixIn:
"""WebSocket request handler mix-in class
This class modifies and existing request handler to handle
WebSocket requests. The request handler will continue to function
as before, except that WebSocket requests are intercepted and the
methods handle_upgrade() and handle_websocket() are called. The
standard do_GET() will be called for normal requests.
The class instance SocketClass can be overridden with the class to
use for the WebSocket connection.
"""
SocketClass = HttpWebSocket
def handle_one_request(self):
"""Extended request handler
This is where WebSocketRequestHandler redirects requests to the
new methods. Any sub-classes must call this method in order for
the calls to function.
"""
self._real_do_GET = self.do_GET
self.do_GET = self._websocket_do_GET
try:
super().handle_one_request()
finally:
self.do_GET = self._real_do_GET
def _websocket_do_GET(self):
# Checks if it is a websocket request and redirects
self.do_GET = self._real_do_GET
if (self.headers.get('upgrade') and
self.headers.get('upgrade').lower() == 'websocket'):
self.handle_upgrade()
else:
self.do_GET()
def handle_upgrade(self):
"""Initial handler for a WebSocket request
This method is called when a WebSocket is requested. By default
it will create a WebSocket object and perform the negotiation.
The WebSocket object will then replace the request object and
handle_websocket() will be called.
"""
websocket = self.SocketClass(self)
try:
websocket.accept(self.request, self.headers)
except Exception:
exc = sys.exc_info()[1]
self.send_error(400, str(exc))
return
self.request = websocket
# Other requests cannot follow Websocket data
self.close_connection = True
self.handle_websocket()
def handle_websocket(self):
"""Handle a WebSocket connection.
This is called when the WebSocket is ready to be used. A
sub-class should perform the necessary communication here and
return once done.
"""
pass
# Convenient ready made classes
class WebSocketRequestHandler(WebSocketRequestHandlerMixIn,
BaseHTTPRequestHandler):
pass
class WebSocketServer(HTTPServer):
pass
| 3,406
|
Python
|
.py
| 83
| 33.373494
| 91
| 0.68674
|
novnc/websockify
| 3,872
| 766
| 31
|
LGPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,653
|
websockifyserver.py
|
novnc_websockify/websockify/websockifyserver.py
|
#!/usr/bin/env python
'''
Python WebSocket server base with support for "wss://" encryption.
Copyright 2011 Joel Martin
Copyright 2016 Pierre Ossman
Licensed under LGPL version 3 (see docs/LICENSE.LGPL-3)
You can make a cert/key with openssl using:
openssl req -new -x509 -days 365 -nodes -out self.pem -keyout self.pem
as taken from http://docs.python.org/dev/library/ssl.html#certificates
'''
import os, sys, time, errno, signal, socket, select, logging
import multiprocessing
from http.server import SimpleHTTPRequestHandler
# Degraded functionality if these imports are missing
for mod, msg in [('ssl', 'TLS/SSL/wss is disabled'),
('resource', 'daemonizing is disabled')]:
try:
globals()[mod] = __import__(mod)
except ImportError:
globals()[mod] = None
print("WARNING: no '%s' module, %s" % (mod, msg))
if sys.platform == 'win32':
# make sockets pickle-able/inheritable
import multiprocessing.reduction
from websockify.websocket import WebSocketWantReadError, WebSocketWantWriteError
from websockify.websocketserver import WebSocketRequestHandlerMixIn
class CompatibleWebSocket(WebSocketRequestHandlerMixIn.SocketClass):
def select_subprotocol(self, protocols):
# Handle old websockify clients that still specify a sub-protocol
if 'binary' in protocols:
return 'binary'
else:
return ''
# HTTP handler with WebSocket upgrade support
class WebSockifyRequestHandler(WebSocketRequestHandlerMixIn, SimpleHTTPRequestHandler):
"""
WebSocket Request Handler Class, derived from SimpleHTTPRequestHandler.
Must be sub-classed with new_websocket_client method definition.
The request handler can be configured by setting optional
attributes on the server object:
* only_upgrade: If true, SimpleHTTPRequestHandler will not be enabled,
only websocket is allowed.
* verbose: If true, verbose logging is activated.
* daemon: Running as daemon, do not write to console etc
* record: Record raw frame data as JavaScript array into specified filename
* run_once: Handle a single request
* handler_id: A sequence number for this connection, appended to record filename
"""
server_version = "WebSockify"
protocol_version = "HTTP/1.1"
SocketClass = CompatibleWebSocket
# An exception while the WebSocket client was connected
class CClose(Exception):
pass
def __init__(self, req, addr, server):
# Retrieve a few configuration variables from the server
self.only_upgrade = getattr(server, "only_upgrade", False)
self.verbose = getattr(server, "verbose", False)
self.daemon = getattr(server, "daemon", False)
self.record = getattr(server, "record", False)
self.run_once = getattr(server, "run_once", False)
self.rec = None
self.handler_id = getattr(server, "handler_id", False)
self.file_only = getattr(server, "file_only", False)
self.traffic = getattr(server, "traffic", False)
self.web_auth = getattr(server, "web_auth", False)
self.host_token = getattr(server, "host_token", False)
self.logger = getattr(server, "logger", None)
if self.logger is None:
self.logger = WebSockifyServer.get_logger()
super().__init__(req, addr, server)
def log_message(self, format, *args):
self.logger.info("%s - - [%s] %s" % (self.client_address[0], self.log_date_time_string(), format % args))
#
# WebSocketRequestHandler logging/output functions
#
def print_traffic(self, token="."):
""" Show traffic flow mode. """
if self.traffic:
sys.stdout.write(token)
sys.stdout.flush()
def msg(self, msg, *args, **kwargs):
""" Output message with handler_id prefix. """
prefix = "% 3d: " % self.handler_id
self.logger.log(logging.INFO, "%s%s" % (prefix, msg), *args, **kwargs)
def vmsg(self, msg, *args, **kwargs):
""" Same as msg() but as debug. """
prefix = "% 3d: " % self.handler_id
self.logger.log(logging.DEBUG, "%s%s" % (prefix, msg), *args, **kwargs)
def warn(self, msg, *args, **kwargs):
""" Same as msg() but as warning. """
prefix = "% 3d: " % self.handler_id
self.logger.log(logging.WARN, "%s%s" % (prefix, msg), *args, **kwargs)
#
# Main WebSocketRequestHandler methods
#
def send_frames(self, bufs=None):
""" Encode and send WebSocket frames. Any frames already
queued will be sent first. If buf is not set then only queued
frames will be sent. Returns True if any frames could not be
fully sent, in which case the caller should call again when
the socket is ready. """
tdelta = int(time.time()*1000) - self.start_time
if bufs:
for buf in bufs:
if self.rec:
# Python 3 compatible conversion
bufstr = buf.decode('latin1').encode('unicode_escape').decode('ascii').replace("'", "\\'")
self.rec.write("'{{{0}{{{1}',\n".format(tdelta, bufstr))
self.send_parts.append(buf)
while self.send_parts:
# Send pending frames
try:
self.request.sendmsg(self.send_parts[0])
except WebSocketWantWriteError:
self.print_traffic("<.")
return True
self.send_parts.pop(0)
self.print_traffic("<")
return False
def recv_frames(self):
""" Receive and decode WebSocket frames.
Returns:
(bufs_list, closed_string)
"""
closed = False
bufs = []
tdelta = int(time.time()*1000) - self.start_time
while True:
try:
buf = self.request.recvmsg()
except WebSocketWantReadError:
self.print_traffic("}.")
break
if buf is None:
closed = {'code': self.request.close_code,
'reason': self.request.close_reason}
return bufs, closed
self.print_traffic("}")
if self.rec:
# Python 3 compatible conversion
bufstr = buf.decode('latin1').encode('unicode_escape').decode('ascii').replace("'", "\\'")
self.rec.write("'}}{0}}}{1}',\n".format(tdelta, bufstr))
bufs.append(buf)
if not self.request.pending():
break
return bufs, closed
def send_close(self, code=1000, reason=''):
""" Send a WebSocket orderly close frame. """
self.request.shutdown(socket.SHUT_RDWR, code, reason)
def send_pong(self, data=b''):
""" Send a WebSocket pong frame. """
self.request.pong(data)
def send_ping(self, data=b''):
""" Send a WebSocket ping frame. """
self.request.ping(data)
def handle_upgrade(self):
# ensure connection is authorized, and determine the target
self.validate_connection()
self.auth_connection()
super().handle_upgrade()
def handle_websocket(self):
# Indicate to server that a Websocket upgrade was done
self.server.ws_connection = True
# Initialize per client settings
self.send_parts = []
self.recv_part = None
self.start_time = int(time.time()*1000)
# client_address is empty with, say, UNIX domain sockets
client_addr = ""
is_ssl = False
try:
client_addr = self.client_address[0]
is_ssl = self.client_address[2]
except IndexError:
pass
if is_ssl:
self.stype = "SSL/TLS (wss://)"
else:
self.stype = "Plain non-SSL (ws://)"
self.log_message("%s: %s WebSocket connection", client_addr,
self.stype)
if self.path != '/':
self.log_message("%s: Path: '%s'", client_addr, self.path)
if self.record:
# Record raw frame data as JavaScript array
fname = "%s.%s" % (self.record,
self.handler_id)
self.log_message("opening record file: %s", fname)
self.rec = open(fname, 'w+')
self.rec.write("var VNC_frame_data = [\n")
try:
self.new_websocket_client()
except self.CClose:
# Close the client
_, exc, _ = sys.exc_info()
self.send_close(exc.args[0], exc.args[1])
def do_GET(self):
if self.web_auth:
# ensure connection is authorized, this seems to apply to list_directory() as well
self.auth_connection()
if self.only_upgrade:
self.send_error(405)
else:
super().do_GET()
def list_directory(self, path):
if self.file_only:
self.send_error(404)
else:
return super().list_directory(path)
def new_websocket_client(self):
""" Do something with a WebSockets client connection. """
raise Exception("WebSocketRequestHandler.new_websocket_client() must be overloaded")
def validate_connection(self):
""" Ensure that the connection has a valid token, and set the target. """
pass
def auth_connection(self):
""" Ensure that the connection is authorized. """
pass
def do_HEAD(self):
if self.web_auth:
self.auth_connection()
if self.only_upgrade:
self.send_error(405)
else:
super().do_HEAD()
def finish(self):
if self.rec:
self.rec.write("'EOF'];\n")
self.rec.close()
super().finish()
def handle(self):
# When using run_once, we have a single process, so
# we cannot loop in BaseHTTPRequestHandler.handle; we
# must return and handle new connections
if self.run_once:
self.handle_one_request()
else:
super().handle()
def log_request(self, code='-', size='-'):
if self.verbose:
super().log_request(code, size)
class WebSockifyServer():
"""
WebSockets server class.
As an alternative, the standard library SocketServer can be used
"""
policy_response = """<cross-domain-policy><allow-access-from domain="*" to-ports="*" /></cross-domain-policy>\n"""
log_prefix = "websocket"
# An exception before the WebSocket connection was established
class EClose(Exception):
pass
class Terminate(Exception):
pass
def __init__(self, RequestHandlerClass, listen_fd=None,
listen_host='', listen_port=None, source_is_ipv6=False,
verbose=False, cert='', key='', key_password=None, ssl_only=None,
verify_client=False, cafile=None,
daemon=False, record='', web='', web_auth=False,
file_only=False,
run_once=False, timeout=0, idle_timeout=0, traffic=False,
tcp_keepalive=True, tcp_keepcnt=None, tcp_keepidle=None,
tcp_keepintvl=None, ssl_ciphers=None, ssl_options=0,
unix_listen=None, unix_listen_mode=None):
# settings
self.RequestHandlerClass = RequestHandlerClass
self.verbose = verbose
self.listen_fd = listen_fd
self.unix_listen = unix_listen
self.unix_listen_mode = unix_listen_mode
self.listen_host = listen_host
self.listen_port = listen_port
self.prefer_ipv6 = source_is_ipv6
self.ssl_only = ssl_only
self.ssl_ciphers = ssl_ciphers
self.ssl_options = ssl_options
self.verify_client = verify_client
self.daemon = daemon
self.run_once = run_once
self.timeout = timeout
self.idle_timeout = idle_timeout
self.traffic = traffic
self.file_only = file_only
self.web_auth = web_auth
self.launch_time = time.time()
self.ws_connection = False
self.handler_id = 1
self.terminating = False
self.logger = self.get_logger()
self.tcp_keepalive = tcp_keepalive
self.tcp_keepcnt = tcp_keepcnt
self.tcp_keepidle = tcp_keepidle
self.tcp_keepintvl = tcp_keepintvl
# keyfile path must be None if not specified
self.key = None
self.key_password = key_password
# Make paths settings absolute
self.cert = os.path.abspath(cert)
self.web = self.record = self.cafile = ''
if key:
self.key = os.path.abspath(key)
if web:
self.web = os.path.abspath(web)
if record:
self.record = os.path.abspath(record)
if cafile:
self.cafile = os.path.abspath(cafile)
if self.web:
os.chdir(self.web)
self.only_upgrade = not self.web
# Sanity checks
if not ssl and self.ssl_only:
raise Exception("No 'ssl' module and SSL-only specified")
if self.daemon and not resource:
raise Exception("Module 'resource' required to daemonize")
# Show configuration
self.msg("WebSocket server settings:")
if self.listen_fd != None:
self.msg(" - Listen for inetd connections")
elif self.unix_listen != None:
self.msg(" - Listen on unix socket %s", self.unix_listen)
else:
self.msg(" - Listen on %s:%s",
self.listen_host, self.listen_port)
if self.web:
if self.file_only:
self.msg(" - Web server (no directory listings). Web root: %s", self.web)
else:
self.msg(" - Web server. Web root: %s", self.web)
if ssl:
if os.path.exists(self.cert):
self.msg(" - SSL/TLS support")
if self.ssl_only:
self.msg(" - Deny non-SSL/TLS connections")
else:
self.msg(" - No SSL/TLS support (no cert file)")
else:
self.msg(" - No SSL/TLS support (no 'ssl' module)")
if self.daemon:
self.msg(" - Backgrounding (daemon)")
if self.record:
self.msg(" - Recording to '%s.*'", self.record)
#
# WebSockifyServer static methods
#
@staticmethod
def get_logger():
return logging.getLogger("%s.%s" % (
WebSockifyServer.log_prefix,
WebSockifyServer.__class__.__name__))
@staticmethod
def socket(host, port=None, connect=False, prefer_ipv6=False,
unix_socket=None, unix_socket_mode=None, unix_socket_listen=False,
use_ssl=False, tcp_keepalive=True, tcp_keepcnt=None,
tcp_keepidle=None, tcp_keepintvl=None):
""" Resolve a host (and optional port) to an IPv4 or IPv6
address. Create a socket. Bind to it if listen is set,
otherwise connect to it. Return the socket.
"""
flags = 0
if host == '':
host = None
if connect and not (port or unix_socket):
raise Exception("Connect mode requires a port")
if use_ssl and not ssl:
raise Exception("SSL socket requested but Python SSL module not loaded.");
if not connect and use_ssl:
raise Exception("SSL only supported in connect mode (for now)")
if not connect:
flags = flags | socket.AI_PASSIVE
if not unix_socket:
addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM,
socket.IPPROTO_TCP, flags)
if not addrs:
raise Exception("Could not resolve host '%s'" % host)
addrs.sort(key=lambda x: x[0])
if prefer_ipv6:
addrs.reverse()
sock = socket.socket(addrs[0][0], addrs[0][1])
if tcp_keepalive:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if tcp_keepcnt:
sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPCNT,
tcp_keepcnt)
if tcp_keepidle:
sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPIDLE,
tcp_keepidle)
if tcp_keepintvl:
sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPINTVL,
tcp_keepintvl)
if connect:
sock.connect(addrs[0][4])
if use_ssl:
context = ssl.create_default_context()
sock = context.wrap_socket(sock, server_hostname=host)
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(addrs[0][4])
sock.listen(100)
else:
if unix_socket_listen:
# Make sure the socket does not already exist
try:
os.unlink(unix_socket)
except FileNotFoundError:
pass
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
oldmask = os.umask(0o777 ^ unix_socket_mode)
try:
sock.bind(unix_socket)
finally:
os.umask(oldmask)
sock.listen(100)
else:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(unix_socket)
return sock
@staticmethod
def daemonize(keepfd=None, chdir='/'):
if keepfd is None:
keepfd = []
os.umask(0)
if chdir:
os.chdir(chdir)
else:
os.chdir('/')
os.setgid(os.getgid()) # relinquish elevations
os.setuid(os.getuid()) # relinquish elevations
# Double fork to daemonize
if os.fork() > 0: os._exit(0) # Parent exits
os.setsid() # Obtain new process group
if os.fork() > 0: os._exit(0) # Parent exits
# Signal handling
signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Close open files
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if maxfd == resource.RLIM_INFINITY: maxfd = 256
for fd in reversed(range(maxfd)):
try:
if fd not in keepfd:
os.close(fd)
except OSError:
_, exc, _ = sys.exc_info()
if exc.errno != errno.EBADF: raise
# Redirect I/O to /dev/null
os.dup2(os.open(os.devnull, os.O_RDWR), sys.stdin.fileno())
os.dup2(os.open(os.devnull, os.O_RDWR), sys.stdout.fileno())
os.dup2(os.open(os.devnull, os.O_RDWR), sys.stderr.fileno())
def do_handshake(self, sock, address):
"""
do_handshake does the following:
- Peek at the first few bytes from the socket.
- If the connection is an HTTPS/SSL/TLS connection then SSL
wrap the socket.
- Read from the (possibly wrapped) socket.
- If we have received a HTTP GET request and the webserver
functionality is enabled, answer it, close the socket and
return.
- Assume we have a WebSockets connection, parse the client
handshake data.
- Send a WebSockets handshake server response.
- Return the socket for this WebSocket client.
"""
ready = select.select([sock], [], [], 3)[0]
if not ready:
raise self.EClose("")
# Peek, but do not read the data so that we have a opportunity
# to SSL wrap the socket first
handshake = sock.recv(1024, socket.MSG_PEEK)
#self.msg("Handshake [%s]" % handshake)
if not handshake:
raise self.EClose("")
elif handshake[0] in (22, 128):
# SSL wrap the connection
if not ssl:
raise self.EClose("SSL connection but no 'ssl' module")
if not os.path.exists(self.cert):
raise self.EClose("SSL connection but '%s' not found"
% self.cert)
retsock = None
try:
# create new-style SSL wrapping for extended features
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
if self.ssl_ciphers is not None:
context.set_ciphers(self.ssl_ciphers)
context.options = self.ssl_options
context.load_cert_chain(certfile=self.cert, keyfile=self.key, password=self.key_password)
if self.verify_client:
context.verify_mode = ssl.CERT_REQUIRED
if self.cafile:
context.load_verify_locations(cafile=self.cafile)
else:
context.set_default_verify_paths()
retsock = context.wrap_socket(
sock,
server_side=True)
except ssl.SSLError:
_, x, _ = sys.exc_info()
if x.args[0] == ssl.SSL_ERROR_EOF:
if len(x.args) > 1:
raise self.EClose(x.args[1])
else:
raise self.EClose("Got SSL_ERROR_EOF")
else:
raise
elif self.ssl_only:
raise self.EClose("non-SSL connection received but disallowed")
else:
retsock = sock
# If the address is like (host, port), we are extending it
# with a flag indicating SSL. Not many other options
# available...
if len(address) == 2:
address = (address[0], address[1], (retsock != sock))
self.RequestHandlerClass(retsock, address, self)
# Return the WebSockets socket which may be SSL wrapped
return retsock
#
# WebSockifyServer logging/output functions
#
def msg(self, *args, **kwargs):
""" Output message as info """
self.logger.log(logging.INFO, *args, **kwargs)
def vmsg(self, *args, **kwargs):
""" Same as msg() but as debug. """
self.logger.log(logging.DEBUG, *args, **kwargs)
def warn(self, *args, **kwargs):
""" Same as msg() but as warning. """
self.logger.log(logging.WARN, *args, **kwargs)
#
# Events that can/should be overridden in sub-classes
#
def started(self):
""" Called after WebSockets startup """
self.vmsg("WebSockets server started")
def poll(self):
""" Run periodically while waiting for connections. """
#self.vmsg("Running poll()")
pass
def terminate(self):
if not self.terminating:
self.terminating = True
raise self.Terminate()
def multiprocessing_SIGCHLD(self, sig, stack):
# TODO: figure out a way to actually log this information without
# calling `log` in the signal handlers
multiprocessing.active_children()
def fallback_SIGCHLD(self, sig, stack):
# Reap zombies when using os.fork() (python 2.4)
# TODO: figure out a way to actually log this information without
# calling `log` in the signal handlers
try:
result = os.waitpid(-1, os.WNOHANG)
while result[0]:
self.vmsg("Reaped child process %s" % result[0])
result = os.waitpid(-1, os.WNOHANG)
except (OSError):
pass
def do_SIGINT(self, sig, stack):
# TODO: figure out a way to actually log this information without
# calling `log` in the signal handlers
self.terminate()
def do_SIGTERM(self, sig, stack):
# TODO: figure out a way to actually log this information without
# calling `log` in the signal handlers
self.terminate()
def top_new_client(self, startsock, address):
""" Do something with a WebSockets client connection. """
# handler process
client = None
try:
try:
client = self.do_handshake(startsock, address)
except self.EClose:
_, exc, _ = sys.exc_info()
# Connection was not a WebSockets connection
if exc.args[0]:
self.msg("%s: %s" % (address[0], exc.args[0]))
except WebSockifyServer.Terminate:
raise
except Exception:
_, exc, _ = sys.exc_info()
self.msg("handler exception: %s" % str(exc))
self.vmsg("exception", exc_info=True)
finally:
if client and client != startsock:
# Close the SSL wrapped socket
# Original socket closed by caller
client.close()
def get_log_fd(self):
"""
Get file descriptors for the loggers.
They should not be closed when the process is forked.
"""
descriptors = []
for handler in self.logger.parent.handlers:
if isinstance(handler, logging.FileHandler):
descriptors.append(handler.stream.fileno())
return descriptors
def start_server(self):
"""
Daemonize if requested. Listen for for connections. Run
do_handshake() method for each connection. If the connection
is a WebSockets client then call new_websocket_client() method (which must
be overridden) for each new client connection.
"""
try:
if self.listen_fd != None:
lsock = socket.fromfd(self.listen_fd, socket.AF_INET, socket.SOCK_STREAM)
elif self.unix_listen != None:
lsock = self.socket(host=None,
unix_socket=self.unix_listen,
unix_socket_mode=self.unix_listen_mode,
unix_socket_listen=True)
else:
lsock = self.socket(self.listen_host, self.listen_port, False,
self.prefer_ipv6,
tcp_keepalive=self.tcp_keepalive,
tcp_keepcnt=self.tcp_keepcnt,
tcp_keepidle=self.tcp_keepidle,
tcp_keepintvl=self.tcp_keepintvl)
except OSError as e:
self.msg("Openening socket failed: %s", str(e))
self.vmsg("exception", exc_info=True)
sys.exit()
if self.daemon:
keepfd = self.get_log_fd()
keepfd.append(lsock.fileno())
self.daemonize(keepfd=keepfd, chdir=self.web)
self.started() # Some things need to happen after daemonizing
# Allow override of signals
original_signals = {
signal.SIGINT: signal.getsignal(signal.SIGINT),
signal.SIGTERM: signal.getsignal(signal.SIGTERM),
}
if getattr(signal, 'SIGCHLD', None) is not None:
original_signals[signal.SIGCHLD] = signal.getsignal(signal.SIGCHLD)
signal.signal(signal.SIGINT, self.do_SIGINT)
signal.signal(signal.SIGTERM, self.do_SIGTERM)
# make sure that _cleanup is called when children die
# by calling active_children on SIGCHLD
if getattr(signal, 'SIGCHLD', None) is not None:
signal.signal(signal.SIGCHLD, self.multiprocessing_SIGCHLD)
last_active_time = self.launch_time
try:
while True:
try:
try:
startsock = None
pid = err = 0
child_count = 0
# Collect zombie child processes
child_count = len(multiprocessing.active_children())
time_elapsed = time.time() - self.launch_time
if self.timeout and time_elapsed > self.timeout:
self.msg('listener exit due to --timeout %s'
% self.timeout)
break
if self.idle_timeout:
idle_time = 0
if child_count == 0:
idle_time = time.time() - last_active_time
else:
idle_time = 0
last_active_time = time.time()
if idle_time > self.idle_timeout and child_count == 0:
self.msg('listener exit due to --idle-timeout %s'
% self.idle_timeout)
break
try:
self.poll()
ready = select.select([lsock], [], [], 1)[0]
if lsock in ready:
startsock, address = lsock.accept()
# Unix Socket will not report address (empty string), but address[0] is logged a bunch
if self.unix_listen != None:
address = [ self.unix_listen ]
else:
continue
except self.Terminate:
raise
except Exception:
_, exc, _ = sys.exc_info()
if hasattr(exc, 'errno'):
err = exc.errno
elif hasattr(exc, 'args'):
err = exc.args[0]
else:
err = exc[0]
if err == errno.EINTR:
self.vmsg("Ignoring interrupted syscall")
continue
else:
raise
if self.run_once:
# Run in same process if run_once
self.top_new_client(startsock, address)
if self.ws_connection :
self.msg('%s: exiting due to --run-once'
% address[0])
break
else:
self.vmsg('%s: new handler Process' % address[0])
p = multiprocessing.Process(
target=self.top_new_client,
args=(startsock, address))
p.start()
# child will not return
# parent process
self.handler_id += 1
except (self.Terminate, SystemExit, KeyboardInterrupt):
self.msg("In exit")
# terminate all child processes
if not self.run_once:
children = multiprocessing.active_children()
for child in children:
self.msg("Terminating child %s" % child.pid)
child.terminate()
break
except Exception:
exc = sys.exc_info()[1]
self.msg("handler exception: %s", str(exc))
self.vmsg("exception", exc_info=True)
finally:
if startsock:
startsock.close()
finally:
# Close listen port
self.vmsg("Closing socket listening at %s:%s",
self.listen_host, self.listen_port)
lsock.close()
# Restore signals
for sig, func in original_signals.items():
signal.signal(sig, func)
| 32,528
|
Python
|
.py
| 735
| 30.961905
| 118
| 0.541388
|
novnc/websockify
| 3,872
| 766
| 31
|
LGPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,654
|
websocketproxy.py
|
novnc_websockify/websockify/websocketproxy.py
|
#!/usr/bin/env python
'''
A WebSocket to TCP socket proxy with support for "wss://" encryption.
Copyright 2011 Joel Martin
Licensed under LGPL version 3 (see docs/LICENSE.LGPL-3)
You can make a cert/key with openssl using:
openssl req -new -x509 -days 365 -nodes -out self.pem -keyout self.pem
as taken from http://docs.python.org/dev/library/ssl.html#certificates
'''
import signal, socket, optparse, time, os, sys, subprocess, logging, errno, ssl, stat
from socketserver import ThreadingMixIn
from http.server import HTTPServer
import select
from websockify import websockifyserver
from websockify import auth_plugins as auth
from urllib.parse import parse_qs, urlparse
class ProxyRequestHandler(websockifyserver.WebSockifyRequestHandler):
buffer_size = 65536
traffic_legend = """
Traffic Legend:
} - Client receive
}. - Client receive partial
{ - Target receive
> - Target send
>. - Target send partial
< - Client send
<. - Client send partial
"""
def send_auth_error(self, ex):
self.send_response(ex.code, ex.msg)
self.send_header('Content-Type', 'text/html')
for name, val in ex.headers.items():
self.send_header(name, val)
self.end_headers()
def validate_connection(self):
if not self.server.token_plugin:
return
host, port = self.get_target(self.server.token_plugin)
if host == 'unix_socket':
self.server.unix_target = port
else:
self.server.target_host = host
self.server.target_port = port
def auth_connection(self):
if not self.server.auth_plugin:
return
try:
# get client certificate data
client_cert_data = self.request.getpeercert()
# extract subject information
client_cert_subject = client_cert_data['subject']
# flatten data structure
client_cert_subject = dict([x[0] for x in client_cert_subject])
# add common name to headers (apache +StdEnvVars style)
self.headers['SSL_CLIENT_S_DN_CN'] = client_cert_subject['commonName']
except (TypeError, AttributeError, KeyError):
# not a SSL connection or client presented no certificate with valid data
pass
try:
self.server.auth_plugin.authenticate(
headers=self.headers, target_host=self.server.target_host,
target_port=self.server.target_port)
except auth.AuthenticationError:
ex = sys.exc_info()[1]
self.send_auth_error(ex)
raise
def new_websocket_client(self):
"""
Called after a new WebSocket connection has been established.
"""
# Checking for a token is done in validate_connection()
# Connect to the target
if self.server.wrap_cmd:
msg = "connecting to command: '%s' (port %s)" % (" ".join(self.server.wrap_cmd), self.server.target_port)
elif self.server.unix_target:
msg = "connecting to unix socket: %s" % self.server.unix_target
else:
msg = "connecting to: %s:%s" % (
self.server.target_host, self.server.target_port)
if self.server.ssl_target:
msg += " (using SSL)"
self.log_message(msg)
try:
tsock = websockifyserver.WebSockifyServer.socket(self.server.target_host,
self.server.target_port,
connect=True,
use_ssl=self.server.ssl_target,
unix_socket=self.server.unix_target)
except Exception as e:
self.log_message("Failed to connect to %s:%s: %s",
self.server.target_host, self.server.target_port, e)
raise self.CClose(1011, "Failed to connect to downstream server")
# Option unavailable when listening to unix socket
if not self.server.unix_listen:
self.request.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
if not self.server.wrap_cmd and not self.server.unix_target:
tsock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
self.print_traffic(self.traffic_legend)
# Start proxying
try:
self.do_proxy(tsock)
finally:
if tsock:
tsock.shutdown(socket.SHUT_RDWR)
tsock.close()
if self.verbose:
self.log_message("%s:%s: Closed target",
self.server.target_host, self.server.target_port)
def get_target(self, target_plugin):
"""
Gets a token from either the path or the host,
depending on --host-token, and looks up a target
for that token using the token plugin. Used by
validate_connection() to set target_host and target_port.
"""
# The files in targets contain the lines
# in the form of token: host:port
if self.host_token:
# Use hostname as token
token = self.headers.get('Host')
# Remove port from hostname, as it'll always be the one where
# websockify listens (unless something between the client and
# websockify is redirecting traffic, but that's beside the point)
if token:
token = token.partition(':')[0]
else:
# Extract the token parameter from url
args = parse_qs(urlparse(self.path)[4]) # 4 is the query from url
if 'token' in args and len(args['token']):
token = args['token'][0].rstrip('\n')
else:
token = None
if token is None:
raise self.server.EClose("Token not present")
result_pair = target_plugin.lookup(token)
if result_pair is not None:
return result_pair
else:
raise self.server.EClose("Token '%s' not found" % token)
def do_proxy(self, target):
"""
Proxy client WebSocket to normal target socket.
"""
cqueue = []
c_pend = 0
tqueue = []
rlist = [self.request, target]
if self.server.heartbeat:
now = time.time()
self.heartbeat = now + self.server.heartbeat
else:
self.heartbeat = None
while True:
wlist = []
if self.heartbeat is not None:
now = time.time()
if now > self.heartbeat:
self.heartbeat = now + self.server.heartbeat
self.send_ping()
if tqueue: wlist.append(target)
if cqueue or c_pend: wlist.append(self.request)
try:
ins, outs, excepts = select.select(rlist, wlist, [], 1)
except OSError:
exc = sys.exc_info()[1]
if hasattr(exc, 'errno'):
err = exc.errno
else:
err = exc[0]
if err != errno.EINTR:
raise
else:
continue
if excepts: raise Exception("Socket exception")
if self.request in outs:
# Send queued target data to the client
c_pend = self.send_frames(cqueue)
cqueue = []
if self.request in ins:
# Receive client data, decode it, and queue for target
bufs, closed = self.recv_frames()
tqueue.extend(bufs)
if closed:
while (len(tqueue) != 0):
# Send queued client data to the target
dat = tqueue.pop(0)
sent = target.send(dat)
if sent == len(dat):
self.print_traffic(">")
else:
# requeue the remaining data
tqueue.insert(0, dat[sent:])
self.print_traffic(".>")
# TODO: What about blocking on client socket?
if self.verbose:
self.log_message("%s:%s: Client closed connection",
self.server.target_host, self.server.target_port)
raise self.CClose(closed['code'], closed['reason'])
if target in outs:
# Send queued client data to the target
dat = tqueue.pop(0)
sent = target.send(dat)
if sent == len(dat):
self.print_traffic(">")
else:
# requeue the remaining data
tqueue.insert(0, dat[sent:])
self.print_traffic(".>")
if target in ins:
# Receive target data, encode it and queue for client
buf = target.recv(self.buffer_size)
if len(buf) == 0:
# Target socket closed, flushing queues and closing client-side websocket
# Send queued target data to the client
if len(cqueue) != 0:
c_pend = True
while(c_pend):
c_pend = self.send_frames(cqueue)
cqueue = []
if self.verbose:
self.log_message("%s:%s: Target closed connection",
self.server.target_host, self.server.target_port)
raise self.CClose(1000, "Target closed")
cqueue.append(buf)
self.print_traffic("{")
class WebSocketProxy(websockifyserver.WebSockifyServer):
"""
Proxy traffic to and from a WebSockets client to a normal TCP
socket server target.
"""
buffer_size = 65536
def __init__(self, RequestHandlerClass=ProxyRequestHandler, *args, **kwargs):
# Save off proxy specific options
self.target_host = kwargs.pop('target_host', None)
self.target_port = kwargs.pop('target_port', None)
self.wrap_cmd = kwargs.pop('wrap_cmd', None)
self.wrap_mode = kwargs.pop('wrap_mode', None)
self.unix_target = kwargs.pop('unix_target', None)
self.ssl_target = kwargs.pop('ssl_target', None)
self.heartbeat = kwargs.pop('heartbeat', None)
self.token_plugin = kwargs.pop('token_plugin', None)
self.host_token = kwargs.pop('host_token', None)
self.auth_plugin = kwargs.pop('auth_plugin', None)
# Last 3 timestamps command was run
self.wrap_times = [0, 0, 0]
if self.wrap_cmd:
wsdir = os.path.dirname(sys.argv[0])
rebinder_path = [os.path.join(wsdir, "..", "lib"),
os.path.join(wsdir, "..", "lib", "websockify"),
os.path.join(wsdir, ".."),
wsdir]
self.rebinder = None
for rdir in rebinder_path:
rpath = os.path.join(rdir, "rebind.so")
if os.path.exists(rpath):
self.rebinder = rpath
break
if not self.rebinder:
raise Exception("rebind.so not found, perhaps you need to run make")
self.rebinder = os.path.abspath(self.rebinder)
self.target_host = "127.0.0.1" # Loopback
# Find a free high port
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', 0))
self.target_port = sock.getsockname()[1]
sock.close()
# Insert rebinder at the head of the (possibly empty) LD_PRELOAD pathlist
ld_preloads = filter(None, [ self.rebinder, os.environ.get("LD_PRELOAD", None) ])
os.environ.update({
"LD_PRELOAD": os.pathsep.join(ld_preloads),
"REBIND_OLD_PORT": str(kwargs['listen_port']),
"REBIND_NEW_PORT": str(self.target_port)})
super().__init__(RequestHandlerClass, *args, **kwargs)
def run_wrap_cmd(self):
self.msg("Starting '%s'", " ".join(self.wrap_cmd))
self.wrap_times.append(time.time())
self.wrap_times.pop(0)
self.cmd = subprocess.Popen(
self.wrap_cmd, env=os.environ, preexec_fn=_subprocess_setup)
self.spawn_message = True
def started(self):
"""
Called after Websockets server startup (i.e. after daemonize)
"""
# Need to call wrapped command after daemonization so we can
# know when the wrapped command exits
if self.wrap_cmd:
dst_string = "'%s' (port %s)" % (" ".join(self.wrap_cmd), self.target_port)
elif self.unix_target:
dst_string = self.unix_target
else:
dst_string = "%s:%s" % (self.target_host, self.target_port)
if self.listen_fd != None:
src_string = "inetd"
else:
src_string = "%s:%s" % (self.listen_host, self.listen_port)
if self.token_plugin:
msg = " - proxying from %s to targets generated by %s" % (
src_string, type(self.token_plugin).__name__)
else:
msg = " - proxying from %s to %s" % (
src_string, dst_string)
if self.ssl_target:
msg += " (using SSL)"
self.msg("%s", msg)
if self.wrap_cmd:
self.run_wrap_cmd()
def poll(self):
# If we are wrapping a command, check it's status
if self.wrap_cmd and self.cmd:
ret = self.cmd.poll()
if ret != None:
self.vmsg("Wrapped command exited (or daemon). Returned %s" % ret)
self.cmd = None
if self.wrap_cmd and self.cmd == None:
# Response to wrapped command being gone
if self.wrap_mode == "ignore":
pass
elif self.wrap_mode == "exit":
sys.exit(ret)
elif self.wrap_mode == "respawn":
now = time.time()
avg = sum(self.wrap_times)/len(self.wrap_times)
if (now - avg) < 10:
# 3 times in the last 10 seconds
if self.spawn_message:
self.warn("Command respawning too fast")
self.spawn_message = False
else:
self.run_wrap_cmd()
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python successfulbprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
SSL_OPTIONS = {
'default': ssl.OP_ALL,
'tlsv1_1': ssl.PROTOCOL_SSLv23 | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 |
ssl.OP_NO_TLSv1,
'tlsv1_2': ssl.PROTOCOL_SSLv23 | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 |
ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1,
'tlsv1_3': ssl.PROTOCOL_SSLv23 | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 |
ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_TLSv1_2,
}
def select_ssl_version(version):
"""Returns SSL options for the most secure TSL version available on this
Python version"""
if version in SSL_OPTIONS:
return SSL_OPTIONS[version]
else:
# It so happens that version names sorted lexicographically form a list
# from the least to the most secure
keys = list(SSL_OPTIONS.keys())
keys.sort()
fallback = keys[-1]
logger = logging.getLogger(WebSocketProxy.log_prefix)
logger.warn("TLS version %s unsupported. Falling back to %s",
version, fallback)
return SSL_OPTIONS[fallback]
def websockify_init():
# Setup basic logging to stderr.
stderr_handler = logging.StreamHandler()
stderr_handler.setLevel(logging.DEBUG)
log_formatter = logging.Formatter("%(message)s")
stderr_handler.setFormatter(log_formatter)
root = logging.getLogger()
root.addHandler(stderr_handler)
root.setLevel(logging.INFO)
# Setup optparse.
usage = "\n %prog [options]"
usage += " [source_addr:]source_port target_addr:target_port"
usage += "\n %prog [options]"
usage += " --token-plugin=CLASS [source_addr:]source_port"
usage += "\n %prog [options]"
usage += " --unix-target=FILE [source_addr:]source_port"
usage += "\n %prog [options]"
usage += " [source_addr:]source_port -- WRAP_COMMAND_LINE"
parser = optparse.OptionParser(usage=usage)
parser.add_option("--verbose", "-v", action="store_true",
help="verbose messages")
parser.add_option("--traffic", action="store_true",
help="per frame traffic")
parser.add_option("--record",
help="record sessions to FILE.[session_number]", metavar="FILE")
parser.add_option("--daemon", "-D",
dest="daemon", action="store_true",
help="become a daemon (background process)")
parser.add_option("--run-once", action="store_true",
help="handle a single WebSocket connection and exit")
parser.add_option("--timeout", type=int, default=0,
help="after TIMEOUT seconds exit when not connected")
parser.add_option("--idle-timeout", type=int, default=0,
help="server exits after TIMEOUT seconds if there are no "
"active connections")
parser.add_option("--cert", default="self.pem",
help="SSL certificate file")
parser.add_option("--key", default=None,
help="SSL key file (if separate from cert)")
parser.add_option("--key-password", default=None,
help="SSL key password")
parser.add_option("--ssl-only", action="store_true",
help="disallow non-encrypted client connections")
parser.add_option("--ssl-target", action="store_true",
help="connect to SSL target as SSL client")
parser.add_option("--verify-client", action="store_true",
help="require encrypted client to present a valid certificate "
"(needs Python 2.7.9 or newer or Python 3.4 or newer)")
parser.add_option("--cafile", metavar="FILE",
help="file of concatenated certificates of authorities trusted "
"for validating clients (only effective with --verify-client). "
"If omitted, system default list of CAs is used.")
parser.add_option("--ssl-version", type="choice", default="default",
choices=["default", "tlsv1_1", "tlsv1_2", "tlsv1_3"], action="store",
help="minimum TLS version to use (default, tlsv1_1, tlsv1_2, tlsv1_3)")
parser.add_option("--ssl-ciphers", action="store",
help="list of ciphers allowed for connection. For a list of "
"supported ciphers run `openssl ciphers`")
parser.add_option("--unix-listen",
help="listen to unix socket", metavar="FILE", default=None)
parser.add_option("--unix-listen-mode", default=None,
help="specify mode for unix socket (defaults to 0600)")
parser.add_option("--unix-target",
help="connect to unix socket target", metavar="FILE")
parser.add_option("--inetd",
help="inetd mode, receive listening socket from stdin", action="store_true")
parser.add_option("--web", default=None, metavar="DIR",
help="run webserver on same port. Serve files from DIR.")
parser.add_option("--web-auth", action="store_true",
help="require authentication to access webserver.")
parser.add_option("--wrap-mode", default="exit", metavar="MODE",
choices=["exit", "ignore", "respawn"],
help="action to take when the wrapped program exits "
"or daemonizes: exit (default), ignore, respawn")
parser.add_option("--prefer-ipv6", "-6",
action="store_true", dest="source_is_ipv6",
help="prefer IPv6 when resolving source_addr")
parser.add_option("--libserver", action="store_true",
help="use Python library SocketServer engine")
parser.add_option("--target-config", metavar="FILE",
dest="target_cfg",
help="Configuration file containing valid targets "
"in the form 'token: host:port' or, alternatively, a "
"directory containing configuration files of this form "
"(DEPRECATED: use `--token-plugin TokenFile --token-source "
" path/to/token/file` instead)")
parser.add_option("--token-plugin", default=None, metavar="CLASS",
help="use a Python class, usually one from websockify.token_plugins, "
"such as TokenFile, to process tokens into host:port pairs")
parser.add_option("--token-source", default=None, metavar="ARG",
help="an argument to be passed to the token plugin "
"on instantiation")
parser.add_option("--host-token", action="store_true",
help="use the host HTTP header as token instead of the "
"token URL query parameter")
parser.add_option("--auth-plugin", default=None, metavar="CLASS",
help="use a Python class, usually one from websockify.auth_plugins, "
"such as BasicHTTPAuth, to determine if a connection is allowed")
parser.add_option("--auth-source", default=None, metavar="ARG",
help="an argument to be passed to the auth plugin "
"on instantiation")
parser.add_option("--heartbeat", type=int, default=0, metavar="INTERVAL",
help="send a ping to the client every INTERVAL seconds")
parser.add_option("--log-file", metavar="FILE",
dest="log_file",
help="File where logs will be saved")
parser.add_option("--syslog", default=None, metavar="SERVER",
help="Log to syslog server. SERVER can be local socket, "
"such as /dev/log, or a UDP host:port pair.")
parser.add_option("--legacy-syslog", action="store_true",
help="Use the old syslog protocol instead of RFC 5424. "
"Use this if the messages produced by websockify seem abnormal.")
parser.add_option("--file-only", action="store_true",
help="use this to disable directory listings in web server.")
(opts, args) = parser.parse_args()
# Validate options.
if opts.token_source and not opts.token_plugin:
parser.error("You must use --token-plugin to use --token-source")
if opts.host_token and not opts.token_plugin:
parser.error("You must use --token-plugin to use --host-token")
if opts.auth_source and not opts.auth_plugin:
parser.error("You must use --auth-plugin to use --auth-source")
if opts.web_auth and not opts.auth_plugin:
parser.error("You must use --auth-plugin to use --web-auth")
if opts.web_auth and not opts.web:
parser.error("You must use --web to use --web-auth")
if opts.legacy_syslog and not opts.syslog:
parser.error("You must use --syslog to use --legacy-syslog")
opts.ssl_options = select_ssl_version(opts.ssl_version)
del opts.ssl_version
if opts.log_file:
# Setup logging to user-specified file.
opts.log_file = os.path.abspath(opts.log_file)
log_file_handler = logging.FileHandler(opts.log_file)
log_file_handler.setLevel(logging.DEBUG)
log_file_handler.setFormatter(log_formatter)
root = logging.getLogger()
root.addHandler(log_file_handler)
del opts.log_file
if opts.syslog:
# Determine how to connect to syslog...
if opts.syslog.count(':'):
# User supplied a host:port pair.
syslog_host, syslog_port = opts.syslog.rsplit(':', 1)
try:
syslog_port = int(syslog_port)
except ValueError:
parser.error("Error parsing syslog port")
syslog_dest = (syslog_host, syslog_port)
else:
# User supplied a local socket file.
syslog_dest = os.path.abspath(opts.syslog)
from websockify.sysloghandler import WebsockifySysLogHandler
# Determine syslog facility.
if opts.daemon:
syslog_facility = WebsockifySysLogHandler.LOG_DAEMON
else:
syslog_facility = WebsockifySysLogHandler.LOG_USER
# Start logging to syslog.
syslog_handler = WebsockifySysLogHandler(address=syslog_dest,
facility=syslog_facility,
ident='websockify',
legacy=opts.legacy_syslog)
syslog_handler.setLevel(logging.DEBUG)
syslog_handler.setFormatter(log_formatter)
root = logging.getLogger()
root.addHandler(syslog_handler)
del opts.syslog
del opts.legacy_syslog
if opts.verbose:
root = logging.getLogger()
root.setLevel(logging.DEBUG)
# Transform to absolute path as daemon may chdir
if opts.target_cfg:
opts.target_cfg = os.path.abspath(opts.target_cfg)
if opts.target_cfg:
opts.token_plugin = 'TokenFile'
opts.token_source = opts.target_cfg
del opts.target_cfg
if sys.argv.count('--'):
opts.wrap_cmd = args[1:]
else:
opts.wrap_cmd = None
if not websockifyserver.ssl and opts.ssl_target:
parser.error("SSL target requested and Python SSL module not loaded.");
if opts.ssl_only and not os.path.exists(opts.cert):
parser.error("SSL only and %s not found" % opts.cert)
if opts.inetd:
opts.listen_fd = sys.stdin.fileno()
elif opts.unix_listen:
if opts.unix_listen_mode:
try:
# Parse octal notation (like 750)
opts.unix_listen_mode = int(opts.unix_listen_mode, 8)
except ValueError:
parser.error("Error parsing listen unix socket mode")
else:
# Default to 0600 (Owner Read/Write)
opts.unix_listen_mode = stat.S_IREAD | stat.S_IWRITE
else:
if len(args) < 1:
parser.error("Too few arguments")
arg = args.pop(0)
# Parse host:port and convert ports to numbers
if arg.count(':') > 0:
opts.listen_host, opts.listen_port = arg.rsplit(':', 1)
opts.listen_host = opts.listen_host.strip('[]')
else:
opts.listen_host, opts.listen_port = '', arg
try:
opts.listen_port = int(opts.listen_port)
except ValueError:
parser.error("Error parsing listen port")
del opts.inetd
if opts.wrap_cmd or opts.unix_target or opts.token_plugin:
opts.target_host = None
opts.target_port = None
else:
if len(args) < 1:
parser.error("Too few arguments")
arg = args.pop(0)
if arg.count(':') > 0:
opts.target_host, opts.target_port = arg.rsplit(':', 1)
opts.target_host = opts.target_host.strip('[]')
else:
parser.error("Error parsing target")
try:
opts.target_port = int(opts.target_port)
except ValueError:
parser.error("Error parsing target port")
if len(args) > 0 and opts.wrap_cmd == None:
parser.error("Too many arguments")
if opts.token_plugin is not None:
if '.' not in opts.token_plugin:
opts.token_plugin = (
'websockify.token_plugins.%s' % opts.token_plugin)
token_plugin_module, token_plugin_cls = opts.token_plugin.rsplit('.', 1)
__import__(token_plugin_module)
token_plugin_cls = getattr(sys.modules[token_plugin_module], token_plugin_cls)
opts.token_plugin = token_plugin_cls(opts.token_source)
del opts.token_source
if opts.auth_plugin is not None:
if '.' not in opts.auth_plugin:
opts.auth_plugin = 'websockify.auth_plugins.%s' % opts.auth_plugin
auth_plugin_module, auth_plugin_cls = opts.auth_plugin.rsplit('.', 1)
__import__(auth_plugin_module)
auth_plugin_cls = getattr(sys.modules[auth_plugin_module], auth_plugin_cls)
opts.auth_plugin = auth_plugin_cls(opts.auth_source)
del opts.auth_source
# Create and start the WebSockets proxy
libserver = opts.libserver
del opts.libserver
if libserver:
# Use standard Python SocketServer framework
server = LibProxyServer(**opts.__dict__)
server.serve_forever()
else:
# Use internal service framework
server = WebSocketProxy(**opts.__dict__)
server.start_server()
class LibProxyServer(ThreadingMixIn, HTTPServer):
"""
Just like WebSocketProxy, but uses standard Python SocketServer
framework.
"""
def __init__(self, RequestHandlerClass=ProxyRequestHandler, **kwargs):
# Save off proxy specific options
self.target_host = kwargs.pop('target_host', None)
self.target_port = kwargs.pop('target_port', None)
self.wrap_cmd = kwargs.pop('wrap_cmd', None)
self.wrap_mode = kwargs.pop('wrap_mode', None)
self.unix_target = kwargs.pop('unix_target', None)
self.ssl_target = kwargs.pop('ssl_target', None)
self.token_plugin = kwargs.pop('token_plugin', None)
self.auth_plugin = kwargs.pop('auth_plugin', None)
self.heartbeat = kwargs.pop('heartbeat', None)
self.token_plugin = None
self.auth_plugin = None
self.daemon = False
# Server configuration
listen_host = kwargs.pop('listen_host', '')
listen_port = kwargs.pop('listen_port', None)
web = kwargs.pop('web', '')
# Configuration affecting base request handler
self.only_upgrade = not web
self.verbose = kwargs.pop('verbose', False)
record = kwargs.pop('record', '')
if record:
self.record = os.path.abspath(record)
self.run_once = kwargs.pop('run_once', False)
self.handler_id = 0
for arg in kwargs.keys():
print("warning: option %s ignored when using --libserver" % arg)
if web:
os.chdir(web)
super().__init__((listen_host, listen_port), RequestHandlerClass)
def process_request(self, request, client_address):
"""Override process_request to implement a counter"""
self.handler_id += 1
super().process_request(request, client_address)
if __name__ == '__main__':
websockify_init()
| 31,002
|
Python
|
.py
| 660
| 35.237879
| 117
| 0.581882
|
novnc/websockify
| 3,872
| 766
| 31
|
LGPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,655
|
token_plugins.py
|
novnc_websockify/websockify/token_plugins.py
|
import logging
import os
import sys
import time
import re
import json
logger = logging.getLogger(__name__)
_SOURCE_SPLIT_REGEX = re.compile(
r'(?<=^)"([^"]+)"(?=:|$)'
r'|(?<=:)"([^"]+)"(?=:|$)'
r'|(?<=^)([^:]*)(?=:|$)'
r'|(?<=:)([^:]*)(?=:|$)',
)
def parse_source_args(src):
"""It works like src.split(":") but with the ability to use a colon
if you wrap the word in quotation marks.
a:b:c:d -> ['a', 'b', 'c', 'd'
a:"b:c":c -> ['a', 'b:c', 'd']
"""
matches = _SOURCE_SPLIT_REGEX.findall(src)
return [m[0] or m[1] or m[2] or m[3] for m in matches]
class BasePlugin():
def __init__(self, src):
self.source = src
def lookup(self, token):
return None
class ReadOnlyTokenFile(BasePlugin):
# source is a token file with lines like
# token: host:port
# or a directory of such files
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._targets = None
def _load_targets(self):
if os.path.isdir(self.source):
cfg_files = [os.path.join(self.source, f) for
f in os.listdir(self.source)]
else:
cfg_files = [self.source]
self._targets = {}
index = 1
for f in cfg_files:
for line in [l.strip() for l in open(f).readlines()]:
if line and not line.startswith('#'):
try:
tok, target = re.split(r':\s', line)
self._targets[tok] = target.strip().rsplit(':', 1)
except ValueError:
logger.error("Syntax error in %s on line %d" % (self.source, index))
index += 1
def lookup(self, token):
if self._targets is None:
self._load_targets()
if token in self._targets:
return self._targets[token]
else:
return None
# the above one is probably more efficient, but this one is
# more backwards compatible (although in most cases
# ReadOnlyTokenFile should suffice)
class TokenFile(ReadOnlyTokenFile):
# source is a token file with lines like
# token: host:port
# or a directory of such files
def lookup(self, token):
self._load_targets()
return super().lookup(token)
class TokenFileName(BasePlugin):
# source is a directory
# token is filename
# contents of file is host:port
def __init__(self, src):
super().__init__(src)
if not os.path.isdir(src):
raise Exception("TokenFileName plugin requires a directory")
def lookup(self, token):
token = os.path.basename(token)
path = os.path.join(self.source, token)
if os.path.exists(path):
return open(path).read().strip().split(':')
else:
return None
class BaseTokenAPI(BasePlugin):
# source is a url with a '%s' in it where the token
# should go
# we import things on demand so that other plugins
# in this file can be used w/o unnecessary dependencies
def process_result(self, resp):
host, port = resp.text.split(':')
port = port.encode('ascii','ignore')
return [ host, port ]
def lookup(self, token):
import requests
resp = requests.get(self.source % token)
if resp.ok:
return self.process_result(resp)
else:
return None
class JSONTokenApi(BaseTokenAPI):
# source is a url with a '%s' in it where the token
# should go
def process_result(self, resp):
resp_json = resp.json()
return (resp_json['host'], resp_json['port'])
class JWTTokenApi(BasePlugin):
# source is a JWT-token, with hostname and port included
# Both JWS as JWE tokens are accepted. With regards to JWE tokens, the key is re-used for both validation and decryption.
def lookup(self, token):
try:
from jwcrypto import jwt, jwk
import json
key = jwk.JWK()
try:
with open(self.source, 'rb') as key_file:
key_data = key_file.read()
except Exception as e:
logger.error("Error loading key file: %s" % str(e))
return None
try:
key.import_from_pem(key_data)
except:
try:
key.import_key(k=key_data.decode('utf-8'),kty='oct')
except:
logger.error('Failed to correctly parse key data!')
return None
try:
token = jwt.JWT(key=key, jwt=token)
parsed_header = json.loads(token.header)
if 'enc' in parsed_header:
# Token is encrypted, so we need to decrypt by passing the claims to a new instance
token = jwt.JWT(key=key, jwt=token.claims)
parsed = json.loads(token.claims)
if 'nbf' in parsed:
# Not Before is present, so we need to check it
if time.time() < parsed['nbf']:
logger.warning('Token can not be used yet!')
return None
if 'exp' in parsed:
# Expiration time is present, so we need to check it
if time.time() > parsed['exp']:
logger.warning('Token has expired!')
return None
return (parsed['host'], parsed['port'])
except Exception as e:
logger.error("Failed to parse token: %s" % str(e))
return None
except ImportError:
logger.error("package jwcrypto not found, are you sure you've installed it correctly?")
return None
class TokenRedis(BasePlugin):
"""Token plugin based on the Redis in-memory data store.
The token source is in the format:
host[:port[:db[:password[:namespace]]]]
where port, db, password and namespace are optional. If port or db are left empty
they will take its default value, ie. 6379 and 0 respectively.
If your redis server is using the default port (6379) then you can use:
my-redis-host
In case you need to authenticate with the redis server and you are using
the default database and port you can use:
my-redis-host:::verysecretpass
You can also specify a namespace. In this case, the tokens
will be stored in the format '{namespace}:{token}'
my-redis-host::::my-app-namespace
Or if your namespace is nested, you can wrap it in quotes:
my-redis-host::::"first-ns:second-ns"
In the more general case you will use:
my-redis-host:6380:1:verysecretpass:my-app-namespace
The TokenRedis plugin expects the format of the target in one of these two
formats:
- JSON
{"host": "target-host:target-port"}
- Plain text
target-host:target-port
Prepare data with:
redis-cli set my-token '{"host": "127.0.0.1:5000"}'
Verify with:
redis-cli --raw get my-token
Spawn a test "server" using netcat
nc -l 5000 -v
Note: This Token Plugin depends on the 'redis' module, so you have
to install it before using this plugin:
pip install redis
"""
def __init__(self, src):
try:
import redis
except ImportError:
logger.error("Unable to load redis module")
sys.exit()
# Default values
self._port = 6379
self._db = 0
self._password = None
self._namespace = ""
try:
fields = parse_source_args(src)
if len(fields) == 1:
self._server = fields[0]
elif len(fields) == 2:
self._server, self._port = fields
if not self._port:
self._port = 6379
elif len(fields) == 3:
self._server, self._port, self._db = fields
if not self._port:
self._port = 6379
if not self._db:
self._db = 0
elif len(fields) == 4:
self._server, self._port, self._db, self._password = fields
if not self._port:
self._port = 6379
if not self._db:
self._db = 0
if not self._password:
self._password = None
elif len(fields) == 5:
self._server, self._port, self._db, self._password, self._namespace = fields
if not self._port:
self._port = 6379
if not self._db:
self._db = 0
if not self._password:
self._password = None
if not self._namespace:
self._namespace = ""
else:
raise ValueError
self._port = int(self._port)
self._db = int(self._db)
if self._namespace:
self._namespace += ":"
logger.info("TokenRedis backend initialized (%s:%s)" %
(self._server, self._port))
except ValueError:
logger.error("The provided --token-source='%s' is not in the "
"expected format <host>[:<port>[:<db>[:<password>[:<namespace>]]]]" %
src)
sys.exit()
def lookup(self, token):
try:
import redis
except ImportError:
logger.error("package redis not found, are you sure you've installed them correctly?")
sys.exit()
logger.info("resolving token '%s'" % token)
client = redis.Redis(host=self._server, port=self._port,
db=self._db, password=self._password)
stuff = client.get(self._namespace + token)
if stuff is None:
return None
else:
responseStr = stuff.decode("utf-8").strip()
logger.debug("response from redis : %s" % responseStr)
if responseStr.startswith("{"):
try:
combo = json.loads(responseStr)
host, port = combo["host"].split(":")
except ValueError:
logger.error("Unable to decode JSON token: %s" %
responseStr)
return None
except KeyError:
logger.error("Unable to find 'host' key in JSON token: %s" %
responseStr)
return None
elif re.match(r'\S+:\S+', responseStr):
host, port = responseStr.split(":")
else:
logger.error("Unable to parse token: %s" % responseStr)
return None
logger.debug("host: %s, port: %s" % (host, port))
return [host, port]
class UnixDomainSocketDirectory(BasePlugin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._dir_path = os.path.abspath(self.source)
def lookup(self, token):
try:
import stat
if not os.path.isdir(self._dir_path):
return None
uds_path = os.path.abspath(os.path.join(self._dir_path, token))
if not uds_path.startswith(self._dir_path):
return None
if not os.path.exists(uds_path):
return None
if not stat.S_ISSOCK(os.stat(uds_path).st_mode):
return None
return [ 'unix_socket', uds_path ]
except Exception as e:
logger.error("Error finding unix domain socket: %s" % str(e))
return None
| 11,883
|
Python
|
.py
| 294
| 28.622449
| 125
| 0.538635
|
novnc/websockify
| 3,872
| 766
| 31
|
LGPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,656
|
websocket.py
|
novnc_websockify/websockify/websocket.py
|
#!/usr/bin/env python
'''
Python WebSocket library
Copyright 2011 Joel Martin
Copyright 2016 Pierre Ossman
Licensed under LGPL version 3 (see docs/LICENSE.LGPL-3)
Supports following protocol versions:
- http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-07
- http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-10
- http://tools.ietf.org/html/rfc6455
'''
import sys
import array
import email
import errno
import random
import socket
import ssl
import struct
from base64 import b64encode
from hashlib import sha1
from urllib.parse import urlparse
try:
import numpy
except ImportError:
import warnings
warnings.warn("no 'numpy' module, HyBi protocol will be slower")
numpy = None
class WebSocketWantReadError(ssl.SSLWantReadError):
pass
class WebSocketWantWriteError(ssl.SSLWantWriteError):
pass
class WebSocket:
"""WebSocket protocol socket like class.
This provides access to the WebSocket protocol by behaving much
like a real socket would. It shares many similarities with
ssl.SSLSocket.
The WebSocket protocols requires extra data to be sent and received
compared to the application level data. This means that a socket
that is ready to be read may not hold enough data to decode any
application data, and a socket that is ready to be written to may
not have enough space for an entire WebSocket frame. This is
handled by the exceptions WebSocketWantReadError and
WebSocketWantWriteError. When these are raised the caller must wait
for the socket to become ready again and call the relevant function
again.
A connection is established by using either connect() or accept(),
depending on if a client or server session is desired. See the
respective functions for details.
The following methods are passed on to the underlying socket:
- fileno
- getpeername, getsockname
- getsockopt, setsockopt
- gettimeout, settimeout
- setblocking
"""
GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
def __init__(self):
"""Creates an unconnected WebSocket"""
self._state = "new"
self._partial_msg = b''
self._recv_buffer = b''
self._recv_queue = []
self._send_buffer = b''
self._previous_sendmsg = None
self._sent_close = False
self._received_close = False
self.close_code = None
self.close_reason = None
self.socket = None
def __getattr__(self, name):
# These methods are just redirected to the underlying socket
if name in ["fileno",
"getpeername", "getsockname",
"getsockopt", "setsockopt",
"gettimeout", "settimeout",
"setblocking"]:
assert self.socket is not None
return getattr(self.socket, name)
else:
raise AttributeError("%s instance has no attribute '%s'" %
(self.__class__.__name__, name))
def connect(self, uri, origin=None, protocols=[]):
"""Establishes a new connection to a WebSocket server.
This method connects to the host specified by uri and
negotiates a WebSocket connection. origin should be specified
in accordance with RFC 6454 if known. A list of valid
sub-protocols can be specified in the protocols argument.
The data will be sent in the clear if the "ws" scheme is used,
and encrypted if the "wss" scheme is used.
Both WebSocketWantReadError and WebSocketWantWriteError can be
raised whilst negotiating the connection. Repeated calls to
connect() must retain the same arguments.
"""
self.client = True;
uri = urlparse(uri)
port = uri.port
if uri.scheme in ("ws", "http"):
if not port:
port = 80
elif uri.scheme in ("wss", "https"):
if not port:
port = 443
else:
raise Exception("Unknown scheme '%s'" % uri.scheme)
# This is a state machine in order to handle
# WantRead/WantWrite events
if self._state == "new":
self.socket = socket.create_connection((uri.hostname, port))
if uri.scheme in ("wss", "https"):
context = ssl.create_default_context()
self.socket = context.wrap_socket(self.socket,
server_hostname=uri.hostname)
self._state = "ssl_handshake"
else:
self._state = "headers"
if self._state == "ssl_handshake":
self.socket.do_handshake()
self._state = "headers"
if self._state == "headers":
self._key = ''
for i in range(16):
self._key += chr(random.randrange(256))
self._key = b64encode(self._key.encode("latin-1")).decode("ascii")
path = uri.path
if not path:
path = "/"
self.send_request("GET", path)
self.send_header("Host", uri.hostname)
self.send_header("Upgrade", "websocket")
self.send_header("Connection", "upgrade")
self.send_header("Sec-WebSocket-Key", self._key)
self.send_header("Sec-WebSocket-Version", 13)
if origin is not None:
self.send_header("Origin", origin)
if len(protocols) > 0:
self.send_header("Sec-WebSocket-Protocol", ", ".join(protocols))
self.end_headers()
self._state = "send_headers"
if self._state == "send_headers":
self._flush()
self._state = "response"
if self._state == "response":
if not self._recv():
raise Exception("Socket closed unexpectedly")
if self._recv_buffer.find(b'\r\n\r\n') == -1:
raise WebSocketWantReadError
(request, self._recv_buffer) = self._recv_buffer.split(b'\r\n', 1)
request = request.decode("latin-1")
words = request.split()
if (len(words) < 2) or (words[0] != "HTTP/1.1"):
raise Exception("Invalid response")
if words[1] != "101":
raise Exception("WebSocket request denied: %s" % " ".join(words[1:]))
(headers, self._recv_buffer) = self._recv_buffer.split(b'\r\n\r\n', 1)
headers = headers.decode('latin-1') + '\r\n'
headers = email.message_from_string(headers)
if headers.get("Upgrade", "").lower() != "websocket":
print(type(headers))
raise Exception("Missing or incorrect upgrade header")
accept = headers.get('Sec-WebSocket-Accept')
if accept is None:
raise Exception("Missing Sec-WebSocket-Accept header");
expected = sha1((self._key + self.GUID).encode("ascii")).digest()
expected = b64encode(expected).decode("ascii")
del self._key
if accept != expected:
raise Exception("Invalid Sec-WebSocket-Accept header");
self.protocol = headers.get('Sec-WebSocket-Protocol')
if len(protocols) == 0:
if self.protocol is not None:
raise Exception("Unexpected Sec-WebSocket-Protocol header")
else:
if self.protocol not in protocols:
raise Exception("Invalid protocol chosen by server")
self._state = "done"
return
raise Exception("WebSocket is in an invalid state")
def accept(self, socket, headers):
"""Establishes a new WebSocket session with a client.
This method negotiates a WebSocket connection with an incoming
client. The caller must provide the client socket and the
headers from the HTTP request.
A server can identify that a client is requesting a WebSocket
connection by looking at the "Upgrade" header. It will include
the value "websocket" in such cases.
WebSocketWantWriteError can be raised if the response cannot be
sent right away. accept() must be called again once more space
is available using the same arguments.
"""
# This is a state machine in order to handle
# WantRead/WantWrite events
if self._state == "new":
self.client = False
self.socket = socket
if headers.get("upgrade", "").lower() != "websocket":
raise Exception("Missing or incorrect upgrade header")
ver = headers.get('Sec-WebSocket-Version')
if ver is None:
raise Exception("Missing Sec-WebSocket-Version header");
# HyBi-07 report version 7
# HyBi-08 - HyBi-12 report version 8
# HyBi-13 reports version 13
if ver in ['7', '8', '13']:
self.version = "hybi-%02d" % int(ver)
else:
raise Exception("Unsupported protocol version %s" % ver)
key = headers.get('Sec-WebSocket-Key')
if key is None:
raise Exception("Missing Sec-WebSocket-Key header");
# Generate the hash value for the accept header
accept = sha1((key + self.GUID).encode("ascii")).digest()
accept = b64encode(accept).decode("ascii")
self.protocol = ''
protocols = headers.get('Sec-WebSocket-Protocol', '').split(',')
if protocols:
self.protocol = self.select_subprotocol(protocols)
# We are required to choose one of the protocols
# presented by the client
if self.protocol not in protocols:
raise Exception('Invalid protocol selected')
self.send_response(101, "Switching Protocols")
self.send_header("Upgrade", "websocket")
self.send_header("Connection", "Upgrade")
self.send_header("Sec-WebSocket-Accept", accept)
if self.protocol:
self.send_header("Sec-WebSocket-Protocol", self.protocol)
self.end_headers()
self._state = "flush"
if self._state == "flush":
self._flush()
self._state = "done"
return
raise Exception("WebSocket is in an invalid state")
def select_subprotocol(self, protocols):
"""Returns which sub-protocol should be used.
This method does not select any sub-protocol by default and is
meant to be overridden by an implementation that wishes to make
use of sub-protocols. It will be called during handling of
accept().
"""
return ""
def handle_ping(self, data):
"""Called when a WebSocket ping message is received.
This will be called whilst processing recv()/recvmsg(). The
default implementation sends a pong reply back."""
self.pong(data)
def handle_pong(self, data):
"""Called when a WebSocket pong message is received.
This will be called whilst processing recv()/recvmsg(). The
default implementation does nothing."""
pass
def recv(self):
"""Read data from the WebSocket.
This will return any available data on the socket (which may
be the empty string if the peer sent an empty message or
messages). If the socket is closed then None will be
returned. The reason for the close is found in the
'close_code' and 'close_reason' properties.
Unlike recvmsg() this method may return data from more than one
WebSocket message. It is however not guaranteed to return all
buffered data. Callers should continue calling recv() whilst
pending() returns True.
Both WebSocketWantReadError and WebSocketWantWriteError can be
raised when calling recv().
"""
return self.recvmsg()
def recvmsg(self):
"""Read a single message from the WebSocket.
This will return a single WebSocket message from the socket
(which will be the empty string if the peer sent an empty
message). If the socket is closed then None will be
returned. The reason for the close is found in the
'close_code' and 'close_reason' properties.
Unlike recv() this method will not return data from more than
one WebSocket message. Callers should continue calling
recvmsg() whilst pending() returns True.
Both WebSocketWantReadError and WebSocketWantWriteError can be
raised when calling recvmsg().
"""
# May have been called to flush out a close
if self._received_close:
self._flush()
return None
# Anything already queued?
if self.pending():
return self._recvmsg()
# Note: If self._recvmsg() raised WebSocketWantReadError,
# we cannot proceed to self._recv() here as we may
# have already called it once as part of the caller's
# "while websock.pending():" loop
# Nope, let's try to read a bit
if not self._recv_frames():
return None
# Anything queued now?
return self._recvmsg()
def pending(self):
"""Check if any WebSocket data is pending.
This method will return True as long as there are WebSocket
frames that have yet been processed. A single recv() from the
underlying socket may return multiple WebSocket frames and it
is therefore important that a caller continues calling recv()
or recvmsg() as long as pending() returns True.
Note that this function merely tells if there are raw WebSocket
frames pending. Those frames may not contain any application
data.
"""
return len(self._recv_queue) > 0
def send(self, bytes):
"""Write data to the WebSocket
This will queue the given data and attempt to send it to the
peer. Unlike sendmsg() this method might coalesce the data with
data from other calls, or split it over multiple messages.
WebSocketWantWriteError can be raised if there is insufficient
space in the underlying socket. send() must be called again
once more space is available using the same arguments.
"""
if len(bytes) == 0:
return 0
return self.sendmsg(bytes)
def sendmsg(self, msg):
"""Write a single message to the WebSocket
This will queue the given message and attempt to send it to the
peer. Unlike send() this method will preserve the data as a
single WebSocket message.
WebSocketWantWriteError can be raised if there is insufficient
space in the underlying socket. sendmsg() must be called again
once more space is available using the same arguments.
"""
if not isinstance(msg, bytes):
raise TypeError
if self._sent_close:
return 0
if self._previous_sendmsg is not None:
if self._previous_sendmsg != msg:
raise ValueError
self._flush()
self._previous_sendmsg = None
return len(msg)
try:
self._sendmsg(0x2, msg)
except WebSocketWantWriteError:
self._previous_sendmsg = msg
raise
return len(msg)
def send_response(self, code, message):
self._queue_str("HTTP/1.1 %d %s\r\n" % (code, message))
def send_header(self, keyword, value):
self._queue_str("%s: %s\r\n" % (keyword, value))
def end_headers(self):
self._queue_str("\r\n")
def send_request(self, type, path):
self._queue_str("%s %s HTTP/1.1\r\n" % (type.upper(), path))
def ping(self, data=b''):
"""Write a ping message to the WebSocket
WebSocketWantWriteError can be raised if there is insufficient
space in the underlying socket. ping() must be called again once
more space is available using the same arguments.
"""
if not isinstance(data, bytes):
raise TypeError
if self._previous_sendmsg is not None:
if self._previous_sendmsg != data:
raise ValueError
self._flush()
self._previous_sendmsg = None
return
try:
self._sendmsg(0x9, data)
except WebSocketWantWriteError:
self._previous_sendmsg = data
raise
def pong(self, data=b''):
"""Write a pong message to the WebSocket
WebSocketWantWriteError can be raised if there is insufficient
space in the underlying socket. pong() must be called again once
more space is available using the same arguments.
"""
if not isinstance(data, bytes):
raise TypeError
if self._previous_sendmsg is not None:
if self._previous_sendmsg != data:
raise ValueError
self._flush()
self._previous_sendmsg = None
return
try:
self._sendmsg(0xA, data)
except WebSocketWantWriteError:
self._previous_sendmsg = data
raise
def shutdown(self, how, code=1000, reason=None):
"""Gracefully terminate the WebSocket connection.
This will start the process to terminate the WebSocket
connection. The caller must continue to calling recv() or
recvmsg() after this function in order to wait for the peer to
acknowledge the close. Calls to send() and sendmsg() will be
ignored.
WebSocketWantWriteError can be raised if there is insufficient
space in the underlying socket for the close message. shutdown()
must be called again once more space is available using the same
arguments.
The how argument is currently ignored.
"""
# Already closing?
if self._sent_close:
self._flush()
return
# Special code to indicate that we closed the connection
if not self._received_close:
self.close_code = 1000
self.close_reason = "Locally initiated close"
self._sent_close = True
msg = b''
if code is not None:
msg += struct.pack(">H", code)
if reason is not None:
msg += reason.encode("UTF-8")
self._sendmsg(0x8, msg)
def close(self, code=1000, reason=None):
"""Terminate the WebSocket connection immediately.
This will close the WebSocket connection directly after sending
a close message to the peer.
WebSocketWantWriteError can be raised if there is insufficient
space in the underlying socket for the close message. close()
must be called again once more space is available using the same
arguments.
"""
self.shutdown(socket.SHUT_RDWR, code, reason)
self._close()
def _recv(self):
# Fetches more data from the socket to the buffer
assert self.socket is not None
while True:
try:
data = self.socket.recv(4096)
except OSError as exc:
if exc.errno == errno.EWOULDBLOCK:
raise WebSocketWantReadError
raise
if len(data) == 0:
return False
self._recv_buffer += data
# Support for SSLSocket like objects
if hasattr(self.socket, "pending"):
if not self.socket.pending():
break
else:
break
return True
def _recv_frames(self):
# Fetches more data and decodes the frames
if not self._recv():
if self.close_code is None:
self.close_code = 1006
self.close_reason = "Connection closed abnormally"
self._sent_close = self._received_close = True
self._close()
return False
while True:
frame = self._decode_hybi(self._recv_buffer)
if frame is None:
break
self._recv_buffer = self._recv_buffer[frame['length']:]
self._recv_queue.append(frame)
return True
def _recvmsg(self):
# Process pending frames and returns any application data
while self._recv_queue:
frame = self._recv_queue.pop(0)
if not self.client and not frame['masked']:
self.shutdown(socket.SHUT_RDWR, 1002, "Procotol error: Frame not masked")
continue
if self.client and frame['masked']:
self.shutdown(socket.SHUT_RDWR, 1002, "Procotol error: Frame masked")
continue
if frame["opcode"] == 0x0:
if not self._partial_msg:
self.shutdown(socket.SHUT_RDWR, 1002, "Procotol error: Unexpected continuation frame")
continue
self._partial_msg += frame["payload"]
if frame["fin"]:
msg = self._partial_msg
self._partial_msg = b''
return msg
elif frame["opcode"] == 0x1:
self.shutdown(socket.SHUT_RDWR, 1003, "Unsupported: Text frames are not supported")
elif frame["opcode"] == 0x2:
if self._partial_msg:
self.shutdown(socket.SHUT_RDWR, 1002, "Procotol error: Unexpected new frame")
continue
if frame["fin"]:
return frame["payload"]
else:
self._partial_msg = frame["payload"]
elif frame["opcode"] == 0x8:
if self._received_close:
continue
self._received_close = True
if self._sent_close:
self._close()
return None
if not frame["fin"]:
self.shutdown(socket.SHUT_RDWR, 1003, "Unsupported: Fragmented close")
continue
code = None
reason = None
if len(frame["payload"]) >= 2:
code = struct.unpack(">H", frame["payload"][:2])[0]
if len(frame["payload"]) > 2:
reason = frame["payload"][2:]
try:
reason = reason.decode("UTF-8")
except UnicodeDecodeError:
self.shutdown(socket.SHUT_RDWR, 1002, "Procotol error: Invalid UTF-8 in close")
continue
if code is None:
self.close_code = 1005
self.close_reason = "No close status code specified by peer"
else:
self.close_code = code
if reason is not None:
self.close_reason = reason
self.shutdown(None, code, reason)
return None
elif frame["opcode"] == 0x9:
if not frame["fin"]:
self.shutdown(socket.SHUT_RDWR, 1003, "Unsupported: Fragmented ping")
continue
self.handle_ping(frame["payload"])
elif frame["opcode"] == 0xA:
if not frame["fin"]:
self.shutdown(socket.SHUT_RDWR, 1003, "Unsupported: Fragmented pong")
continue
self.handle_pong(frame["payload"])
else:
self.shutdown(socket.SHUT_RDWR, 1003, "Unsupported: Unknown opcode 0x%02x" % frame["opcode"])
raise WebSocketWantReadError
def _flush(self):
# Writes pending data to the socket
if not self._send_buffer:
return
assert self.socket is not None
try:
sent = self.socket.send(self._send_buffer)
except OSError as exc:
if exc.errno == errno.EWOULDBLOCK:
raise WebSocketWantWriteError
raise
self._send_buffer = self._send_buffer[sent:]
if self._send_buffer:
raise WebSocketWantWriteError
# We had a pending close and we've flushed the buffer,
# time to end things
if self._received_close and self._sent_close:
self._close()
def _send(self, data):
# Queues data and attempts to send it
self._send_buffer += data
self._flush()
def _queue_str(self, string):
# Queue some data to be sent later.
# Only used by the connecting methods.
self._send_buffer += string.encode("latin-1")
def _sendmsg(self, opcode, msg):
# Sends a standard data message
if self.client:
mask = b''
for i in range(4):
mask += random.randrange(256).to_bytes()
frame = self._encode_hybi(opcode, msg, mask)
else:
frame = self._encode_hybi(opcode, msg)
return self._send(frame)
def _close(self):
# Close the underlying socket
self.socket.close()
self.socket = None
def _mask(self, buf, mask):
# Mask a frame
return self._unmask(buf, mask)
def _unmask(self, buf, mask):
# Unmask a frame
if numpy:
plen = len(buf)
pstart = 0
pend = plen
b = c = b''
if plen >= 4:
dtype=numpy.dtype('<u4')
if sys.byteorder == 'big':
dtype = dtype.newbyteorder('>')
mask = numpy.frombuffer(mask, dtype, count=1)
data = numpy.frombuffer(buf, dtype, count=int(plen / 4))
#b = numpy.bitwise_xor(data, mask).data
b = numpy.bitwise_xor(data, mask).tobytes()
if plen % 4:
dtype=numpy.dtype('B')
if sys.byteorder == 'big':
dtype = dtype.newbyteorder('>')
mask = numpy.frombuffer(mask, dtype, count=(plen % 4))
data = numpy.frombuffer(buf, dtype,
offset=plen - (plen % 4), count=(plen % 4))
c = numpy.bitwise_xor(data, mask).tobytes()
return b + c
else:
# Slower fallback
data = array.array('B')
data.frombytes(buf)
for i in range(len(data)):
data[i] ^= mask[i % 4]
return data.tobytes()
def _encode_hybi(self, opcode, buf, mask_key=None, fin=True):
""" Encode a HyBi style WebSocket frame.
Optional opcode:
0x0 - continuation
0x1 - text frame
0x2 - binary frame
0x8 - connection close
0x9 - ping
0xA - pong
"""
b1 = opcode & 0x0f
if fin:
b1 |= 0x80
mask_bit = 0
if mask_key is not None:
mask_bit = 0x80
buf = self._mask(buf, mask_key)
payload_len = len(buf)
if payload_len <= 125:
header = struct.pack('>BB', b1, payload_len | mask_bit)
elif payload_len > 125 and payload_len < 65536:
header = struct.pack('>BBH', b1, 126 | mask_bit, payload_len)
elif payload_len >= 65536:
header = struct.pack('>BBQ', b1, 127 | mask_bit, payload_len)
if mask_key is not None:
return header + mask_key + buf
else:
return header + buf
def _decode_hybi(self, buf):
""" Decode HyBi style WebSocket packets.
Returns:
{'fin' : boolean,
'opcode' : number,
'masked' : boolean,
'length' : encoded_length,
'payload' : decoded_buffer}
"""
f = {'fin' : 0,
'opcode' : 0,
'masked' : False,
'length' : 0,
'payload' : None}
blen = len(buf)
hlen = 2
if blen < hlen:
return None
b1, b2 = struct.unpack(">BB", buf[:2])
f['opcode'] = b1 & 0x0f
f['fin'] = not not (b1 & 0x80)
f['masked'] = not not (b2 & 0x80)
if f['masked']:
hlen += 4
if blen < hlen:
return None
length = b2 & 0x7f
if length == 126:
hlen += 2
if blen < hlen:
return None
length, = struct.unpack('>H', buf[2:4])
elif length == 127:
hlen += 8
if blen < hlen:
return None
length, = struct.unpack('>Q', buf[2:10])
f['length'] = hlen + length
if blen < f['length']:
return None
if f['masked']:
# unmask payload
mask_key = buf[hlen-4:hlen]
f['payload'] = self._unmask(buf[hlen:(hlen+length)], mask_key)
else:
f['payload'] = buf[hlen:(hlen+length)]
return f
| 29,428
|
Python
|
.py
| 687
| 31.069869
| 109
| 0.572639
|
novnc/websockify
| 3,872
| 766
| 31
|
LGPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,657
|
auth_plugins.py
|
novnc_websockify/websockify/auth_plugins.py
|
class BasePlugin():
def __init__(self, src=None):
self.source = src
def authenticate(self, headers, target_host, target_port):
pass
class AuthenticationError(Exception):
def __init__(self, log_msg=None, response_code=403, response_headers={}, response_msg=None):
self.code = response_code
self.headers = response_headers
self.msg = response_msg
if log_msg is None:
log_msg = response_msg
super().__init__('%s %s' % (self.code, log_msg))
class InvalidOriginError(AuthenticationError):
def __init__(self, expected, actual):
self.expected_origin = expected
self.actual_origin = actual
super().__init__(
response_msg='Invalid Origin',
log_msg="Invalid Origin Header: Expected one of "
"%s, got '%s'" % (expected, actual))
class BasicHTTPAuth():
"""Verifies Basic Auth headers. Specify src as username:password"""
def __init__(self, src=None):
self.src = src
def authenticate(self, headers, target_host, target_port):
import base64
auth_header = headers.get('Authorization')
if auth_header:
if not auth_header.startswith('Basic '):
self.auth_error()
try:
user_pass_raw = base64.b64decode(auth_header[6:])
except TypeError:
self.auth_error()
try:
# http://stackoverflow.com/questions/7242316/what-encoding-should-i-use-for-http-basic-authentication
user_pass_as_text = user_pass_raw.decode('ISO-8859-1')
except UnicodeDecodeError:
self.auth_error()
user_pass = user_pass_as_text.split(':', 1)
if len(user_pass) != 2:
self.auth_error()
if not self.validate_creds(*user_pass):
self.demand_auth()
else:
self.demand_auth()
def validate_creds(self, username, password):
if '%s:%s' % (username, password) == self.src:
return True
else:
return False
def auth_error(self):
raise AuthenticationError(response_code=403)
def demand_auth(self):
raise AuthenticationError(response_code=401,
response_headers={'WWW-Authenticate': 'Basic realm="Websockify"'})
class ExpectOrigin():
def __init__(self, src=None):
if src is None:
self.source = []
else:
self.source = src.split()
def authenticate(self, headers, target_host, target_port):
origin = headers.get('Origin', None)
if origin is None or origin not in self.source:
raise InvalidOriginError(expected=self.source, actual=origin)
class ClientCertCNAuth():
"""Verifies client by SSL certificate. Specify src as whitespace separated list of common names."""
def __init__(self, src=None):
if src is None:
self.source = []
else:
self.source = src.split()
def authenticate(self, headers, target_host, target_port):
if headers.get('SSL_CLIENT_S_DN_CN', None) not in self.source:
raise AuthenticationError(response_code=403)
| 3,265
|
Python
|
.py
| 77
| 32.116883
| 117
| 0.59785
|
novnc/websockify
| 3,872
| 766
| 31
|
LGPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,658
|
setup.py
|
aaronsw_html2text/setup.py
|
import sys
from setuptools import setup, find_packages
setup(
name = "html2text",
version = "3.200.3",
description = "Turn HTML into equivalent Markdown-structured text.",
author = "Aaron Swartz",
author_email = "me@aaronsw.com",
url='http://www.aaronsw.com/2002/html2text/',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.3',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2'
],
entry_points="""
[console_scripts]
html2text=html2text:main
""",
license='GNU GPL 3',
packages=find_packages(),
py_modules=['html2text'],
include_package_data=True,
zip_safe=False,
)
| 1,224
|
Python
|
.py
| 35
| 29.285714
| 71
| 0.627104
|
aaronsw/html2text
| 2,607
| 412
| 67
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,659
|
html2text.py
|
aaronsw_html2text/html2text.py
|
#!/usr/bin/env python
"""html2text: Turn HTML into equivalent Markdown-structured text."""
__version__ = "3.200.3"
__author__ = "Aaron Swartz (me@aaronsw.com)"
__copyright__ = "(C) 2004-2008 Aaron Swartz. GNU GPL 3."
__contributors__ = ["Martin 'Joey' Schulze", "Ricardo Reyes", "Kevin Jay North"]
# TODO:
# Support decoded entities with unifiable.
try:
True
except NameError:
setattr(__builtins__, 'True', 1)
setattr(__builtins__, 'False', 0)
def has_key(x, y):
if hasattr(x, 'has_key'): return x.has_key(y)
else: return y in x
try:
import htmlentitydefs
import urlparse
import HTMLParser
except ImportError: #Python3
import html.entities as htmlentitydefs
import urllib.parse as urlparse
import html.parser as HTMLParser
try: #Python3
import urllib.request as urllib
except:
import urllib
import optparse, re, sys, codecs, types
try: from textwrap import wrap
except: pass
# Use Unicode characters instead of their ascii psuedo-replacements
UNICODE_SNOB = 0
# Escape all special characters. Output is less readable, but avoids corner case formatting issues.
ESCAPE_SNOB = 0
# Put the links after each paragraph instead of at the end.
LINKS_EACH_PARAGRAPH = 0
# Wrap long lines at position. 0 for no wrapping. (Requires Python 2.3.)
BODY_WIDTH = 78
# Don't show internal links (href="#local-anchor") -- corresponding link targets
# won't be visible in the plain text file anyway.
SKIP_INTERNAL_LINKS = True
# Use inline, rather than reference, formatting for images and links
INLINE_LINKS = True
# Number of pixels Google indents nested lists
GOOGLE_LIST_INDENT = 36
IGNORE_ANCHORS = False
IGNORE_IMAGES = False
IGNORE_EMPHASIS = False
### Entity Nonsense ###
def name2cp(k):
if k == 'apos': return ord("'")
if hasattr(htmlentitydefs, "name2codepoint"): # requires Python 2.3
return htmlentitydefs.name2codepoint[k]
else:
k = htmlentitydefs.entitydefs[k]
if k.startswith("&#") and k.endswith(";"): return int(k[2:-1]) # not in latin-1
return ord(codecs.latin_1_decode(k)[0])
unifiable = {'rsquo':"'", 'lsquo':"'", 'rdquo':'"', 'ldquo':'"',
'copy':'(C)', 'mdash':'--', 'nbsp':' ', 'rarr':'->', 'larr':'<-', 'middot':'*',
'ndash':'-', 'oelig':'oe', 'aelig':'ae',
'agrave':'a', 'aacute':'a', 'acirc':'a', 'atilde':'a', 'auml':'a', 'aring':'a',
'egrave':'e', 'eacute':'e', 'ecirc':'e', 'euml':'e',
'igrave':'i', 'iacute':'i', 'icirc':'i', 'iuml':'i',
'ograve':'o', 'oacute':'o', 'ocirc':'o', 'otilde':'o', 'ouml':'o',
'ugrave':'u', 'uacute':'u', 'ucirc':'u', 'uuml':'u',
'lrm':'', 'rlm':''}
unifiable_n = {}
for k in unifiable.keys():
unifiable_n[name2cp(k)] = unifiable[k]
### End Entity Nonsense ###
def onlywhite(line):
"""Return true if the line does only consist of whitespace characters."""
for c in line:
if c is not ' ' and c is not ' ':
return c is ' '
return line
def hn(tag):
if tag[0] == 'h' and len(tag) == 2:
try:
n = int(tag[1])
if n in range(1, 10): return n
except ValueError: return 0
def dumb_property_dict(style):
"""returns a hash of css attributes"""
return dict([(x.strip(), y.strip()) for x, y in [z.split(':', 1) for z in style.split(';') if ':' in z]]);
def dumb_css_parser(data):
"""returns a hash of css selectors, each of which contains a hash of css attributes"""
# remove @import sentences
data += ';'
importIndex = data.find('@import')
while importIndex != -1:
data = data[0:importIndex] + data[data.find(';', importIndex) + 1:]
importIndex = data.find('@import')
# parse the css. reverted from dictionary compehension in order to support older pythons
elements = [x.split('{') for x in data.split('}') if '{' in x.strip()]
try:
elements = dict([(a.strip(), dumb_property_dict(b)) for a, b in elements])
except ValueError:
elements = {} # not that important
return elements
def element_style(attrs, style_def, parent_style):
"""returns a hash of the 'final' style attributes of the element"""
style = parent_style.copy()
if 'class' in attrs:
for css_class in attrs['class'].split():
css_style = style_def['.' + css_class]
style.update(css_style)
if 'style' in attrs:
immediate_style = dumb_property_dict(attrs['style'])
style.update(immediate_style)
return style
def google_list_style(style):
"""finds out whether this is an ordered or unordered list"""
if 'list-style-type' in style:
list_style = style['list-style-type']
if list_style in ['disc', 'circle', 'square', 'none']:
return 'ul'
return 'ol'
def google_has_height(style):
"""check if the style of the element has the 'height' attribute explicitly defined"""
if 'height' in style:
return True
return False
def google_text_emphasis(style):
"""return a list of all emphasis modifiers of the element"""
emphasis = []
if 'text-decoration' in style:
emphasis.append(style['text-decoration'])
if 'font-style' in style:
emphasis.append(style['font-style'])
if 'font-weight' in style:
emphasis.append(style['font-weight'])
return emphasis
def google_fixed_width_font(style):
"""check if the css of the current element defines a fixed width font"""
font_family = ''
if 'font-family' in style:
font_family = style['font-family']
if 'Courier New' == font_family or 'Consolas' == font_family:
return True
return False
def list_numbering_start(attrs):
"""extract numbering from list element attributes"""
if 'start' in attrs:
return int(attrs['start']) - 1
else:
return 0
class HTML2Text(HTMLParser.HTMLParser):
def __init__(self, out=None, baseurl=''):
HTMLParser.HTMLParser.__init__(self)
# Config options
self.unicode_snob = UNICODE_SNOB
self.escape_snob = ESCAPE_SNOB
self.links_each_paragraph = LINKS_EACH_PARAGRAPH
self.body_width = BODY_WIDTH
self.skip_internal_links = SKIP_INTERNAL_LINKS
self.inline_links = INLINE_LINKS
self.google_list_indent = GOOGLE_LIST_INDENT
self.ignore_links = IGNORE_ANCHORS
self.ignore_images = IGNORE_IMAGES
self.ignore_emphasis = IGNORE_EMPHASIS
self.google_doc = False
self.ul_item_mark = '*'
self.emphasis_mark = '_'
self.strong_mark = '**'
if out is None:
self.out = self.outtextf
else:
self.out = out
self.outtextlist = [] # empty list to store output characters before they are "joined"
try:
self.outtext = unicode()
except NameError: # Python3
self.outtext = str()
self.quiet = 0
self.p_p = 0 # number of newline character to print before next output
self.outcount = 0
self.start = 1
self.space = 0
self.a = []
self.astack = []
self.maybe_automatic_link = None
self.absolute_url_matcher = re.compile(r'^[a-zA-Z+]+://')
self.acount = 0
self.list = []
self.blockquote = 0
self.pre = 0
self.startpre = 0
self.code = False
self.br_toggle = ''
self.lastWasNL = 0
self.lastWasList = False
self.style = 0
self.style_def = {}
self.tag_stack = []
self.emphasis = 0
self.drop_white_space = 0
self.inheader = False
self.abbr_title = None # current abbreviation definition
self.abbr_data = None # last inner HTML (for abbr being defined)
self.abbr_list = {} # stack of abbreviations to write later
self.baseurl = baseurl
try: del unifiable_n[name2cp('nbsp')]
except KeyError: pass
unifiable['nbsp'] = ' _place_holder;'
def feed(self, data):
data = data.replace("</' + 'script>", "</ignore>")
HTMLParser.HTMLParser.feed(self, data)
def handle(self, data):
self.feed(data)
self.feed("")
return self.optwrap(self.close())
def outtextf(self, s):
self.outtextlist.append(s)
if s: self.lastWasNL = s[-1] == '\n'
def close(self):
HTMLParser.HTMLParser.close(self)
self.pbr()
self.o('', 0, 'end')
self.outtext = self.outtext.join(self.outtextlist)
if self.unicode_snob:
nbsp = unichr(name2cp('nbsp'))
else:
nbsp = u' '
self.outtext = self.outtext.replace(u' _place_holder;', nbsp)
return self.outtext
def handle_charref(self, c):
self.o(self.charref(c), 1)
def handle_entityref(self, c):
self.o(self.entityref(c), 1)
def handle_starttag(self, tag, attrs):
self.handle_tag(tag, attrs, 1)
def handle_endtag(self, tag):
self.handle_tag(tag, None, 0)
def previousIndex(self, attrs):
""" returns the index of certain set of attributes (of a link) in the
self.a list
If the set of attributes is not found, returns None
"""
if not has_key(attrs, 'href'): return None
i = -1
for a in self.a:
i += 1
match = 0
if has_key(a, 'href') and a['href'] == attrs['href']:
if has_key(a, 'title') or has_key(attrs, 'title'):
if (has_key(a, 'title') and has_key(attrs, 'title') and
a['title'] == attrs['title']):
match = True
else:
match = True
if match: return i
def drop_last(self, nLetters):
if not self.quiet:
self.outtext = self.outtext[:-nLetters]
def handle_emphasis(self, start, tag_style, parent_style):
"""handles various text emphases"""
tag_emphasis = google_text_emphasis(tag_style)
parent_emphasis = google_text_emphasis(parent_style)
# handle Google's text emphasis
strikethrough = 'line-through' in tag_emphasis and self.hide_strikethrough
bold = 'bold' in tag_emphasis and not 'bold' in parent_emphasis
italic = 'italic' in tag_emphasis and not 'italic' in parent_emphasis
fixed = google_fixed_width_font(tag_style) and not \
google_fixed_width_font(parent_style) and not self.pre
if start:
# crossed-out text must be handled before other attributes
# in order not to output qualifiers unnecessarily
if bold or italic or fixed:
self.emphasis += 1
if strikethrough:
self.quiet += 1
if italic:
self.o(self.emphasis_mark)
self.drop_white_space += 1
if bold:
self.o(self.strong_mark)
self.drop_white_space += 1
if fixed:
self.o('`')
self.drop_white_space += 1
self.code = True
else:
if bold or italic or fixed:
# there must not be whitespace before closing emphasis mark
self.emphasis -= 1
self.space = 0
self.outtext = self.outtext.rstrip()
if fixed:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o('`')
self.code = False
if bold:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(2)
self.drop_white_space -= 1
else:
self.o(self.strong_mark)
if italic:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o(self.emphasis_mark)
# space is only allowed after *all* emphasis marks
if (bold or italic) and not self.emphasis:
self.o(" ")
if strikethrough:
self.quiet -= 1
def handle_tag(self, tag, attrs, start):
#attrs = fixattrs(attrs)
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
if self.google_doc:
# the attrs parameter is empty for a closing tag. in addition, we
# need the attributes of the parent nodes in order to get a
# complete style description for the current element. we assume
# that google docs export well formed html.
parent_style = {}
if start:
if self.tag_stack:
parent_style = self.tag_stack[-1][2]
tag_style = element_style(attrs, self.style_def, parent_style)
self.tag_stack.append((tag, attrs, tag_style))
else:
dummy, attrs, tag_style = self.tag_stack.pop()
if self.tag_stack:
parent_style = self.tag_stack[-1][2]
if hn(tag):
self.p()
if start:
self.inheader = True
self.o(hn(tag)*"#" + ' ')
else:
self.inheader = False
return # prevent redundant emphasis marks on headers
if tag in ['p', 'div']:
if self.google_doc:
if start and google_has_height(tag_style):
self.p()
else:
self.soft_br()
else:
self.p()
if tag == "br" and start: self.o(" \n")
if tag == "hr" and start:
self.p()
self.o("* * *")
self.p()
if tag in ["head", "style", 'script']:
if start: self.quiet += 1
else: self.quiet -= 1
if tag == "style":
if start: self.style += 1
else: self.style -= 1
if tag in ["body"]:
self.quiet = 0 # sites like 9rules.com never close <head>
if tag == "blockquote":
if start:
self.p(); self.o('> ', 0, 1); self.start = 1
self.blockquote += 1
else:
self.blockquote -= 1
self.p()
if tag in ['em', 'i', 'u'] and not self.ignore_emphasis: self.o(self.emphasis_mark)
if tag in ['strong', 'b'] and not self.ignore_emphasis: self.o(self.strong_mark)
if tag in ['del', 'strike', 's']:
if start:
self.o("<"+tag+">")
else:
self.o("</"+tag+">")
if self.google_doc:
if not self.inheader:
# handle some font attributes, but leave headers clean
self.handle_emphasis(start, tag_style, parent_style)
if tag in ["code", "tt"] and not self.pre: self.o('`') #TODO: `` `this` ``
if tag == "abbr":
if start:
self.abbr_title = None
self.abbr_data = ''
if has_key(attrs, 'title'):
self.abbr_title = attrs['title']
else:
if self.abbr_title != None:
self.abbr_list[self.abbr_data] = self.abbr_title
self.abbr_title = None
self.abbr_data = ''
if tag == "a" and not self.ignore_links:
if start:
if has_key(attrs, 'href') and not (self.skip_internal_links and attrs['href'].startswith('#')):
self.astack.append(attrs)
self.maybe_automatic_link = attrs['href']
else:
self.astack.append(None)
else:
if self.astack:
a = self.astack.pop()
if self.maybe_automatic_link:
self.maybe_automatic_link = None
elif a:
if self.inline_links:
self.o("](" + escape_md(a['href']) + ")")
else:
i = self.previousIndex(a)
if i is not None:
a = self.a[i]
else:
self.acount += 1
a['count'] = self.acount
a['outcount'] = self.outcount
self.a.append(a)
self.o("][" + str(a['count']) + "]")
if tag == "img" and start and not self.ignore_images:
if has_key(attrs, 'src'):
attrs['href'] = attrs['src']
alt = attrs.get('alt', '')
self.o("![" + escape_md(alt) + "]")
if self.inline_links:
self.o("(" + escape_md(attrs['href']) + ")")
else:
i = self.previousIndex(attrs)
if i is not None:
attrs = self.a[i]
else:
self.acount += 1
attrs['count'] = self.acount
attrs['outcount'] = self.outcount
self.a.append(attrs)
self.o("[" + str(attrs['count']) + "]")
if tag == 'dl' and start: self.p()
if tag == 'dt' and not start: self.pbr()
if tag == 'dd' and start: self.o(' ')
if tag == 'dd' and not start: self.pbr()
if tag in ["ol", "ul"]:
# Google Docs create sub lists as top level lists
if (not self.list) and (not self.lastWasList):
self.p()
if start:
if self.google_doc:
list_style = google_list_style(tag_style)
else:
list_style = tag
numbering_start = list_numbering_start(attrs)
self.list.append({'name':list_style, 'num':numbering_start})
else:
if self.list: self.list.pop()
self.lastWasList = True
else:
self.lastWasList = False
if tag == 'li':
self.pbr()
if start:
if self.list: li = self.list[-1]
else: li = {'name':'ul', 'num':0}
if self.google_doc:
nest_count = self.google_nest_count(tag_style)
else:
nest_count = len(self.list)
self.o(" " * nest_count) #TODO: line up <ol><li>s > 9 correctly.
if li['name'] == "ul": self.o(self.ul_item_mark + " ")
elif li['name'] == "ol":
li['num'] += 1
self.o(str(li['num'])+". ")
self.start = 1
if tag in ["table", "tr"] and start: self.p()
if tag == 'td': self.pbr()
if tag == "pre":
if start:
self.startpre = 1
self.pre = 1
else:
self.pre = 0
self.p()
def pbr(self):
if self.p_p == 0:
self.p_p = 1
def p(self):
self.p_p = 2
def soft_br(self):
self.pbr()
self.br_toggle = ' '
def o(self, data, puredata=0, force=0):
if self.abbr_data is not None:
self.abbr_data += data
if not self.quiet:
if self.google_doc:
# prevent white space immediately after 'begin emphasis' marks ('**' and '_')
lstripped_data = data.lstrip()
if self.drop_white_space and not (self.pre or self.code):
data = lstripped_data
if lstripped_data != '':
self.drop_white_space = 0
if puredata and not self.pre:
data = re.sub('\s+', ' ', data)
if data and data[0] == ' ':
self.space = 1
data = data[1:]
if not data and not force: return
if self.startpre:
#self.out(" :") #TODO: not output when already one there
if not data.startswith("\n"): # <pre>stuff...
data = "\n" + data
bq = (">" * self.blockquote)
if not (force and data and data[0] == ">") and self.blockquote: bq += " "
if self.pre:
if not self.list:
bq += " "
#else: list content is already partially indented
for i in xrange(len(self.list)):
bq += " "
data = data.replace("\n", "\n"+bq)
if self.startpre:
self.startpre = 0
if self.list:
data = data.lstrip("\n") # use existing initial indentation
if self.start:
self.space = 0
self.p_p = 0
self.start = 0
if force == 'end':
# It's the end.
self.p_p = 0
self.out("\n")
self.space = 0
if self.p_p:
self.out((self.br_toggle+'\n'+bq)*self.p_p)
self.space = 0
self.br_toggle = ''
if self.space:
if not self.lastWasNL: self.out(' ')
self.space = 0
if self.a and ((self.p_p == 2 and self.links_each_paragraph) or force == "end"):
if force == "end": self.out("\n")
newa = []
for link in self.a:
if self.outcount > link['outcount']:
self.out(" ["+ str(link['count']) +"]: " + urlparse.urljoin(self.baseurl, link['href']))
if has_key(link, 'title'): self.out(" ("+link['title']+")")
self.out("\n")
else:
newa.append(link)
if self.a != newa: self.out("\n") # Don't need an extra line when nothing was done.
self.a = newa
if self.abbr_list and force == "end":
for abbr, definition in self.abbr_list.items():
self.out(" *[" + abbr + "]: " + definition + "\n")
self.p_p = 0
self.out(data)
self.outcount += 1
def handle_data(self, data):
if r'\/script>' in data: self.quiet -= 1
if self.style:
self.style_def.update(dumb_css_parser(data))
if not self.maybe_automatic_link is None:
href = self.maybe_automatic_link
if href == data and self.absolute_url_matcher.match(href):
self.o("<" + data + ">")
return
else:
self.o("[")
self.maybe_automatic_link = None
if not self.code and not self.pre:
data = escape_md_section(data, snob=self.escape_snob)
self.o(data, 1)
def unknown_decl(self, data): pass
def charref(self, name):
if name[0] in ['x','X']:
c = int(name[1:], 16)
else:
c = int(name)
if not self.unicode_snob and c in unifiable_n.keys():
return unifiable_n[c]
else:
try:
return unichr(c)
except NameError: #Python3
return chr(c)
def entityref(self, c):
if not self.unicode_snob and c in unifiable.keys():
return unifiable[c]
else:
try: name2cp(c)
except KeyError: return "&" + c + ';'
else:
try:
return unichr(name2cp(c))
except NameError: #Python3
return chr(name2cp(c))
def replaceEntities(self, s):
s = s.group(1)
if s[0] == "#":
return self.charref(s[1:])
else: return self.entityref(s)
r_unescape = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
def unescape(self, s):
return self.r_unescape.sub(self.replaceEntities, s)
def google_nest_count(self, style):
"""calculate the nesting count of google doc lists"""
nest_count = 0
if 'margin-left' in style:
nest_count = int(style['margin-left'][:-2]) / self.google_list_indent
return nest_count
def optwrap(self, text):
"""Wrap all paragraphs in the provided text."""
if not self.body_width:
return text
assert wrap, "Requires Python 2.3."
result = ''
newlines = 0
for para in text.split("\n"):
if len(para) > 0:
if not skipwrap(para):
result += "\n".join(wrap(para, self.body_width))
if para.endswith(' '):
result += " \n"
newlines = 1
else:
result += "\n\n"
newlines = 2
else:
if not onlywhite(para):
result += para + "\n"
newlines = 1
else:
if newlines < 2:
result += "\n"
newlines += 1
return result
ordered_list_matcher = re.compile(r'\d+\.\s')
unordered_list_matcher = re.compile(r'[-\*\+]\s')
md_chars_matcher = re.compile(r"([\\\[\]\(\)])")
md_chars_matcher_all = re.compile(r"([`\*_{}\[\]\(\)#!])")
md_dot_matcher = re.compile(r"""
^ # start of line
(\s*\d+) # optional whitespace and a number
(\.) # dot
(?=\s) # lookahead assert whitespace
""", re.MULTILINE | re.VERBOSE)
md_plus_matcher = re.compile(r"""
^
(\s*)
(\+)
(?=\s)
""", flags=re.MULTILINE | re.VERBOSE)
md_dash_matcher = re.compile(r"""
^
(\s*)
(-)
(?=\s|\-) # followed by whitespace (bullet list, or spaced out hr)
# or another dash (header or hr)
""", flags=re.MULTILINE | re.VERBOSE)
slash_chars = r'\`*_{}[]()#+-.!'
md_backslash_matcher = re.compile(r'''
(\\) # match one slash
(?=[%s]) # followed by a char that requires escaping
''' % re.escape(slash_chars),
flags=re.VERBOSE)
def skipwrap(para):
# If the text begins with four spaces or one tab, it's a code block; don't wrap
if para[0:4] == ' ' or para[0] == '\t':
return True
# If the text begins with only two "--", possibly preceded by whitespace, that's
# an emdash; so wrap.
stripped = para.lstrip()
if stripped[0:2] == "--" and len(stripped) > 2 and stripped[2] != "-":
return False
# I'm not sure what this is for; I thought it was to detect lists, but there's
# a <br>-inside-<span> case in one of the tests that also depends upon it.
if stripped[0:1] == '-' or stripped[0:1] == '*':
return True
# If the text begins with a single -, *, or +, followed by a space, or an integer,
# followed by a ., followed by a space (in either case optionally preceeded by
# whitespace), it's a list; don't wrap.
if ordered_list_matcher.match(stripped) or unordered_list_matcher.match(stripped):
return True
return False
def wrapwrite(text):
text = text.encode('utf-8')
try: #Python3
sys.stdout.buffer.write(text)
except AttributeError:
sys.stdout.write(text)
def html2text(html, baseurl=''):
h = HTML2Text(baseurl=baseurl)
return h.handle(html)
def unescape(s, unicode_snob=False):
h = HTML2Text()
h.unicode_snob = unicode_snob
return h.unescape(s)
def escape_md(text):
"""Escapes markdown-sensitive characters within other markdown constructs."""
return md_chars_matcher.sub(r"\\\1", text)
def escape_md_section(text, snob=False):
"""Escapes markdown-sensitive characters across whole document sections."""
text = md_backslash_matcher.sub(r"\\\1", text)
if snob:
text = md_chars_matcher_all.sub(r"\\\1", text)
text = md_dot_matcher.sub(r"\1\\\2", text)
text = md_plus_matcher.sub(r"\1\\\2", text)
text = md_dash_matcher.sub(r"\1\\\2", text)
return text
def main():
baseurl = ''
p = optparse.OptionParser('%prog [(filename|url) [encoding]]',
version='%prog ' + __version__)
p.add_option("--ignore-emphasis", dest="ignore_emphasis", action="store_true",
default=IGNORE_EMPHASIS, help="don't include any formatting for emphasis")
p.add_option("--ignore-links", dest="ignore_links", action="store_true",
default=IGNORE_ANCHORS, help="don't include any formatting for links")
p.add_option("--ignore-images", dest="ignore_images", action="store_true",
default=IGNORE_IMAGES, help="don't include any formatting for images")
p.add_option("-g", "--google-doc", action="store_true", dest="google_doc",
default=False, help="convert an html-exported Google Document")
p.add_option("-d", "--dash-unordered-list", action="store_true", dest="ul_style_dash",
default=False, help="use a dash rather than a star for unordered list items")
p.add_option("-e", "--asterisk-emphasis", action="store_true", dest="em_style_asterisk",
default=False, help="use an asterisk rather than an underscore for emphasized text")
p.add_option("-b", "--body-width", dest="body_width", action="store", type="int",
default=BODY_WIDTH, help="number of characters per output line, 0 for no wrap")
p.add_option("-i", "--google-list-indent", dest="list_indent", action="store", type="int",
default=GOOGLE_LIST_INDENT, help="number of pixels Google indents nested lists")
p.add_option("-s", "--hide-strikethrough", action="store_true", dest="hide_strikethrough",
default=False, help="hide strike-through text. only relevant when -g is specified as well")
p.add_option("--escape-all", action="store_true", dest="escape_snob",
default=False, help="Escape all special characters. Output is less readable, but avoids corner case formatting issues.")
(options, args) = p.parse_args()
# process input
encoding = "utf-8"
if len(args) > 0:
file_ = args[0]
if len(args) == 2:
encoding = args[1]
if len(args) > 2:
p.error('Too many arguments')
if file_.startswith('http://') or file_.startswith('https://'):
baseurl = file_
j = urllib.urlopen(baseurl)
data = j.read()
if encoding is None:
try:
from feedparser import _getCharacterEncoding as enc
except ImportError:
enc = lambda x, y: ('utf-8', 1)
encoding = enc(j.headers, data)[0]
if encoding == 'us-ascii':
encoding = 'utf-8'
else:
data = open(file_, 'rb').read()
if encoding is None:
try:
from chardet import detect
except ImportError:
detect = lambda x: {'encoding': 'utf-8'}
encoding = detect(data)['encoding']
else:
data = sys.stdin.read()
data = data.decode(encoding)
h = HTML2Text(baseurl=baseurl)
# handle options
if options.ul_style_dash: h.ul_item_mark = '-'
if options.em_style_asterisk:
h.emphasis_mark = '*'
h.strong_mark = '__'
h.body_width = options.body_width
h.list_indent = options.list_indent
h.ignore_emphasis = options.ignore_emphasis
h.ignore_links = options.ignore_links
h.ignore_images = options.ignore_images
h.google_doc = options.google_doc
h.hide_strikethrough = options.hide_strikethrough
h.escape_snob = options.escape_snob
wrapwrite(h.handle(data))
if __name__ == "__main__":
main()
| 32,121
|
Python
|
.py
| 783
| 29.90166
| 129
| 0.53222
|
aaronsw/html2text
| 2,607
| 412
| 67
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,660
|
run_tests.py
|
aaronsw_html2text/test/run_tests.py
|
import codecs
import glob
import os
import re
import subprocess
import sys
sys.path.insert(0, '..')
import html2text
def test_module(fn, google_doc=False, **kwargs):
print_conditions('module', google_doc=google_doc, **kwargs)
h = html2text.HTML2Text()
if google_doc:
h.google_doc = True
h.ul_item_mark = '-'
h.body_width = 0
h.hide_strikethrough = True
for k, v in kwargs.iteritems():
setattr(h, k, v)
result = get_baseline(fn)
actual = h.handle(file(fn).read())
return print_result(fn, 'module', result, actual)
def test_command(fn, *args):
print_conditions('command', *args)
args = list(args)
cmd = [sys.executable or 'python', '../html2text.py']
if '--googledoc' in args:
args.remove('--googledoc')
cmd += ['-g', '-d', '-b', '0', '-s']
if args:
cmd.extend(args)
cmd += [fn]
result = get_baseline(fn)
actual = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout.read()
if os.name == 'nt':
# Fix the unwanted CR to CRCRLF replacement
# during text pipelining on Windows/cygwin
actual = re.sub(r'\r+', '\r', actual)
actual = actual.replace('\r\n', '\n')
return print_result(fn, 'command', result, actual)
def print_conditions(mode, *args, **kwargs):
format = " * %s %s, %s: "
sys.stdout.write(format % (mode, args, kwargs))
def print_result(fn, mode, result, actual):
if result == actual:
print('PASS')
return True
else:
print('FAIL')
if mode == 'command':
print(len(result), len(actual))
dump_name = get_dump_name(fn, mode)
f = codecs.open(dump_name, encoding='utf-8', mode='w+')
f.write(actual)
print(" Use: diff -u %s %s" % (get_baseline_name(fn), dump_name))
return False
def get_dump_name(fn, suffix):
return '%s-%s_output.md' % (os.path.splitext(fn)[0], suffix)
def get_baseline_name(fn):
return os.path.splitext(fn)[0] + '.md'
def get_baseline(fn):
name = get_baseline_name(fn)
f = codecs.open(name, mode='r', encoding='utf8')
return f.read()
def run_all_tests():
html_files = glob.glob("*.html")
passing = True
for fn in html_files:
module_args = {}
cmdline_args = []
if fn.lower().startswith('google'):
module_args['google_doc'] = True
cmdline_args.append('--googledoc')
if fn.lower().find('unicode') >= 0:
module_args['unicode_snob'] = True
if fn.lower().find('flip_emphasis') >= 0:
module_args['emphasis_mark'] = '*'
module_args['strong_mark'] = '__'
cmdline_args.append('-e')
if fn.lower().find('escape_snob') >= 0:
module_args['escape_snob'] = True
cmdline_args.append('--escape-all')
print('\n' + fn + ':')
passing = passing and test_module(fn, **module_args)
if not 'unicode_snob' in module_args: # Because there is no command-line option to control unicode_snob
passing = passing and test_command(fn, *cmdline_args)
if passing:
print("ALL TESTS PASSED")
else:
print("Fail.")
sys.exit(1)
if __name__ == "__main__":
run_all_tests()
| 3,292
|
Python
|
.py
| 92
| 28.869565
| 111
| 0.589015
|
aaronsw/html2text
| 2,607
| 412
| 67
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,661
|
pyglossary.pyw
|
ilius_pyglossary/pyglossary.pyw
|
#!/usr/bin/env python3
import sys
from os.path import dirname
sys.path.insert(0, dirname(__file__))
from pyglossary.ui.main import main
main()
| 147
|
Python
|
.py
| 6
| 22.833333
| 37
| 0.781022
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,662
|
setup.py
|
ilius_pyglossary/setup.py
|
#!/usr/bin/env python3
import glob
import logging
import os
import re
from os.path import dirname, exists, isdir, join
from setuptools import setup
from setuptools.command.install import install
from pyglossary.ui.version import getVersion
log = logging.getLogger("root")
relRootDir = "share/pyglossary"
class my_install(install):
def run(self):
install.run(self)
if os.sep == "/":
binPath = join(self.install_scripts, "pyglossary")
log.info(f"creating script file {binPath!r}")
if not exists(self.install_scripts):
os.makedirs(self.install_scripts)
# let it fail on wrong permissions.
else:
if not isdir(self.install_scripts):
raise OSError(
"installation path already exists "
f"but is not a directory: {self.install_scripts}",
)
open(binPath, "w").write("""#!/usr/bin/env python3
import sys
from os.path import dirname
sys.path.insert(0, dirname(__file__))
from pyglossary.ui.main import main
main()""")
os.chmod(binPath, 0o755)
root_data_file_names = [
"about",
"LICENSE",
"_license-dialog",
"Dockerfile",
"pyglossary.pyw",
"pyproject.toml",
"help",
"AUTHORS",
"config.json",
]
sep = "\\\\" if os.sep == "\\" else os.sep
package_data = {
"": root_data_file_names,
"plugins-meta": [
"index.json",
"tools/*",
],
"pyglossary": [
"*.py",
"xdxf.xsl",
"res/*",
"plugins/*",
"langs/*",
"plugin_lib/*.py",
"plugin_lib/py*/*.py",
"sort_modules/*.py",
"ui/*.py",
"ui/progressbar/*.py",
"ui/gtk3_utils/*.py",
"ui/gtk4_utils/*.py",
"ui/tools/*.py",
"ui/wcwidth/*.py",
"xdxf/xdxf.xsl",
"xdxf/*.py",
] + [
# safest way found so far to include every resource of plugins
# producing plugins/pkg/*, plugins/pkg/sub1/*, ... except .pyc/.pyo
re.sub(
fr"^.*?pyglossary{sep}(?=plugins)",
"",
join(dirpath, fname),
)
for top in glob.glob(
join(dirname(__file__), "pyglossary", "plugins"),
)
for dirpath, _, files in os.walk(top)
for fname in files
if not fname.endswith((".pyc", ".pyo"))
],
}
with open("README.md", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name="pyglossary",
version=getVersion(),
python_requires=">=3.10.0",
cmdclass={
"install": my_install,
},
description="A tool for converting dictionary files aka glossaries.",
long_description_content_type="text/markdown",
long_description=long_description,
author="Saeed Rasooli",
author_email="saeed.gnu@gmail.com",
license="GPLv3+",
url="https://github.com/ilius/pyglossary",
packages=[
"pyglossary",
],
entry_points={
"console_scripts": [
"pyglossary = pyglossary.ui.main:main",
],
},
package_data=package_data,
# data_files is deprecated, but without it
# `pip install --user` does not work, tested with pip 22.0.2
data_files=[
(relRootDir, root_data_file_names),
(f"{relRootDir}/plugins-meta", ["plugins-meta/index.json"]),
(f"{relRootDir}/res", glob.glob("res/*")),
],
extras_require={
"full": [
"lxml",
"beautifulsoup4",
"PyICU",
"PyYAML",
"marisa-trie",
"libzim",
"python-lzo",
"html5lib",
],
},
)
| 3,087
|
Python
|
.py
| 129
| 21.217054
| 70
| 0.672554
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,663
|
whitelist.py
|
ilius_pyglossary/whitelist.py
|
AnyStr # unused import (pyglossary/json_utils.py:6)
AnyStr # unused import (pyglossary/text_utils.py:25)
EntryListType # unused import (pyglossary/glossary_v2.py:65)
exc_tb # unused variable (pyglossary/os_utils.py:54)
exc_tb # unused variable (pyglossary/slob.py:1569)
exc_tb # unused variable (pyglossary/slob.py:280)
exc_tb # unused variable (pyglossary/slob.py:739)
exc_type # unused variable (pyglossary/os_utils.py:52)
exc_type # unused variable (pyglossary/slob.py:1567)
exc_type # unused variable (pyglossary/slob.py:278)
exc_type # unused variable (pyglossary/slob.py:737)
IOBase # unused import (pyglossary/plugins/freedict.py:4)
IOBase # unused import (pyglossary/plugins/wiktextract.py:3)
IOBase # unused import (pyglossary/slob.py:34)
Iterable # unused import (pyglossary/iter_utils.py:25)
Iterable # unused import (pyglossary/plugin_lib/dictdlib.py:29)
Iterable # unused import (pyglossary/plugins/csv_plugin.py:22)
Iterable # unused import (pyglossary/sq_entry_list.py:27)
lxml # unused import (pyglossary/plugins/cc_kedict.py:10)
Mapping # unused import (pyglossary/slob.py:31)
RawEntryType # unused import (pyglossary/entry_list.py:28)
RawEntryType # unused import (pyglossary/entry.py:25)
RawEntryType # unused import (pyglossary/glossary_v2.py:65)
RawEntryType # unused import (pyglossary/sq_entry_list.py:30)
SortKeyType # unused import (pyglossary/sort_keys.py:27)
SortKeyType # unused import (pyglossary/sort_modules/dicformids.py:5)
SortKeyType # unused import (pyglossary/sort_modules/ebook_length3.py:6)
SortKeyType # unused import (pyglossary/sort_modules/ebook.py:4)
SortKeyType # unused import (pyglossary/sort_modules/headword_bytes_lower.py:4)
SortKeyType # unused import (pyglossary/sort_modules/headword_lower.py:7)
SortKeyType # unused import (pyglossary/sort_modules/headword.py:7)
SortKeyType # unused import (pyglossary/sort_modules/random.py:7)
SortKeyType # unused import (pyglossary/sort_modules/stardict.py:4)
SQLiteSortKeyType # unused import (pyglossary/sort_keys.py:27)
SQLiteSortKeyType # unused import (pyglossary/sort_modules/dicformids.py:5)
SQLiteSortKeyType # unused import (pyglossary/sort_modules/ebook_length3.py:6)
SQLiteSortKeyType # unused import (pyglossary/sort_modules/ebook.py:4)
SQLiteSortKeyType # unused import (pyglossary/sort_modules/headword_bytes_lower.py:4)
SQLiteSortKeyType # unused import (pyglossary/sort_modules/headword_lower.py:7)
SQLiteSortKeyType # unused import (pyglossary/sort_modules/headword.py:7)
SQLiteSortKeyType # unused import (pyglossary/sort_modules/random.py:7)
SQLiteSortKeyType # unused import (pyglossary/sort_modules/stardict.py:4)
T_Collator # unused import (pyglossary/slob.py:54)
T_Collator # unused import (pyglossary/sort_keys.py:26)
T_Collator # unused import (pyglossary/sort_modules/headword_lower.py:5)
T_Collator # unused import (pyglossary/sort_modules/headword.py:5)
T_Collator # unused import (pyglossary/sort_modules/random.py:5)
T_htmlfile # unused import (pyglossary/plugins/cc_cedict/conv.py:13)
T_htmlfile # unused import (pyglossary/plugins/dict_cc.py:12)
T_htmlfile # unused import (pyglossary/plugins/freedict.py:12)
T_htmlfile # unused import (pyglossary/plugins/jmdict.py:13)
T_htmlfile # unused import (pyglossary/plugins/jmnedict.py:11)
T_htmlfile # unused import (pyglossary/plugins/wiktextract.py:11)
T_htmlfile # unused import (pyglossary/xdxf/transform.py:6)
T_Locale # unused import (pyglossary/sort_keys.py:26)
UIType # unused import (pyglossary/glossary_progress.py:6)
UIType # unused import (pyglossary/glossary_v2.py:87)
_.add_alias # unused method (pyglossary/slob.py:1252)
_.addAlt # unused method (pyglossary/entry.py:123)
_.addAlt # unused method (pyglossary/entry.py:308)
_.addEntryObj # unused method (pyglossary/glossary.py:96)
_.bglHeader # unused attribute (pyglossary/plugins/babylon_bgl/bgl_reader_debug.py:412)
_.bglHeader # unused attribute (pyglossary/plugins/babylon_bgl/bgl_reader_debug.py:44)
_.closed # unused property (pyglossary/plugins/babylon_bgl/bgl_gzip.py:287)
_.closed # unused property (pyglossary/slob.py:290)
_.cls_get_prefix # unused method (pyglossary/plugins/ebook_epub2.py:231)
DebugBglReader # unused class (pyglossary/plugins/babylon_bgl/bgl_reader_debug.py:167)
debugReadOptions # unused variable (pyglossary/plugins/babylon_bgl/bgl_reader.py:72)
_.defiAsciiCount # unused attribute (pyglossary/plugins/babylon_bgl/bgl_reader_debug.py:469)
_.defiAsciiCount # unused attribute (pyglossary/plugins/babylon_bgl/bgl_reader_debug.py:81)
_.defiProcessedCount # unused attribute (pyglossary/plugins/babylon_bgl/bgl_reader_debug.py:467)
_.defiProcessedCount # unused attribute (pyglossary/plugins/babylon_bgl/bgl_reader_debug.py:79)
_.defiUtf8Count # unused attribute (pyglossary/plugins/babylon_bgl/bgl_reader_debug.py:475)
_.defiUtf8Count # unused attribute (pyglossary/plugins/babylon_bgl/bgl_reader_debug.py:80)
_.deleteAll # unused method (pyglossary/sq_entry_list.py:208)
_.detach # unused method (pyglossary/io_utils.py:119)
_.detach # unused method (pyglossary/io_utils.py:47)
_.directRead # unused method (pyglossary/glossary_v2.py:677)
_.dumpBlocks # unused method (pyglossary/plugins/babylon_bgl/bgl_reader_debug.py:357)
_.dumpMetadata2 # unused method (pyglossary/plugins/babylon_bgl/bgl_reader_debug.py:419)
escapeNewlines # unused function (pyglossary/plugins/babylon_bgl/bgl_text.py:158)
ExcInfoType # unused variable (pyglossary/core.py:25)
formatName # unused variable (pyglossary/plugin_manager.py:44)
_.getConfig # unused method (pyglossary/glossary_v2.py:540)
_.getDefaultDefiFormat # unused method (pyglossary/glossary_v2.py:437)
_.getReadExtraOptions # unused method (pyglossary/plugin_prop.py:497)
_.getTitleTag # unused method (pyglossary/plugins/freedict.py:187)
_.getTitleTag # unused method (pyglossary/plugins/wiktextract.py:656)
_.getWriteExtraOptions # unused method (pyglossary/plugin_prop.py:503)
_.groupValues # unused method (pyglossary/option.py:133)
_.groupValues # unused method (pyglossary/option.py:204)
_.groupValues # unused method (pyglossary/option.py:442)
_.gzipEndOffset # unused attribute (pyglossary/plugins/babylon_bgl/bgl_reader_debug.py:409)
_.gzipEndOffset # unused attribute (pyglossary/plugins/babylon_bgl/bgl_reader_debug.py:42)
_.gzipStartOffset # unused attribute (pyglossary/plugins/babylon_bgl/bgl_reader_debug.py:363)
_.gzipStartOffset # unused attribute (pyglossary/plugins/babylon_bgl/bgl_reader_debug.py:41)
_.innerXML # unused method (pyglossary/plugins/iupac_goldbook.py:160)
_.isatty # unused method (pyglossary/io_utils.py:26)
_.isatty # unused method (pyglossary/io_utils.py:98)
_.isatty # unused method (pyglossary/slob.py:294)
_.keyScoreList # unused method (scripts/wiktextract/extract-schema.py:18)
levelNamesCap # unused variable (pyglossary/core.py:103)
_.lex_filenum # unused property (pyglossary/plugins/wordnet.py:89)
_.longComment # unused property (pyglossary/option.py:76)
_.numFiles # unused attribute (pyglossary/plugins/babylon_bgl/bgl_reader_debug.py:362)
_.numFiles # unused attribute (pyglossary/plugins/babylon_bgl/bgl_reader_debug.py:382)
_.numFiles # unused attribute (pyglossary/plugins/babylon_bgl/bgl_reader_debug.py:40)
_.readable # unused method (pyglossary/io_utils.py:101)
_.readable # unused method (pyglossary/io_utils.py:29)
_.readable # unused method (pyglossary/plugins/babylon_bgl/bgl_gzip.py:333)
_.readable # unused method (pyglossary/slob.py:297)
_.readinto1 # unused method (pyglossary/io_utils.py:131)
_.readinto1 # unused method (pyglossary/io_utils.py:59)
_.readinto # unused method (pyglossary/io_utils.py:128)
_.readinto # unused method (pyglossary/io_utils.py:56)
_.readlines # unused method (pyglossary/io_utils.py:147)
_.readlines # unused method (pyglossary/io_utils.py:75)
_.rewind # unused method (pyglossary/plugins/babylon_bgl/bgl_gzip.py:325)
_.setTimeEnable # unused method (pyglossary/core.py:141)
_.setVerbosity # unused method (pyglossary/core.py:118)
_.size_content_types # unused method (pyglossary/slob.py:1529)
_.size_header # unused method (pyglossary/slob.py:1502)
_.size_tags # unused method (pyglossary/slob.py:1522)
_.sortWords # unused method (pyglossary/glossary.py:102)
_.specialCharPattern # unused attribute (pyglossary/plugins/babylon_bgl/bgl_reader.py:370)
_.sub_title_line # unused method (pyglossary/plugins/dsl/__init__.py:273)
TextListSymbolCleanup # unused class (pyglossary/entry_filters.py:329)
_.titleElement # unused method (pyglossary/glossary.py:39)
T_SdList # unused class (pyglossary/plugins/stardict.py:183)
_.updateIter # unused method (pyglossary/glossary.py:99)
_.validateRaw # unused method (pyglossary/option.py:124)
_.writable # unused method (pyglossary/io_utils.py:116)
_.writable # unused method (pyglossary/io_utils.py:44)
_.writable # unused method (pyglossary/plugins/babylon_bgl/bgl_gzip.py:336)
_.writable # unused method (pyglossary/slob.py:321)
_.writelines # unused method (pyglossary/io_utils.py:150)
_.writelines # unused method (pyglossary/io_utils.py:78)
XdxfTransformerType # unused class (pyglossary/plugins/stardict.py:165)
| 9,194
|
Python
|
.py
| 138
| 65.623188
| 97
| 0.786109
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,664
|
main.py
|
ilius_pyglossary/main.py
|
#!/usr/bin/env python3
import sys
from os.path import dirname
sys.path.insert(0, dirname(__file__))
from pyglossary.ui.main import main
main()
| 147
|
Python
|
.py
| 6
| 22.833333
| 37
| 0.781022
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,665
|
g_json_test.py
|
ilius_pyglossary/tests/g_json_test.py
|
import unittest
from glossary_v2_test import TestGlossaryBase
class TestGlossaryJSON(TestGlossaryBase):
def __init__(self, *args, **kwargs):
TestGlossaryBase.__init__(self, *args, **kwargs)
self.dataFileCRC32.update(
{
"004-bar.json": "7e4b2663",
"100-en-de-v4.json": "6a20c6f6",
"100-en-fa.json": "8d29c1be",
"100-ja-en.json": "fab2c106",
},
)
def convert_txt_json(self, fname):
self.convert(
f"{fname}.txt",
f"{fname}-2.json",
compareText=f"{fname}.json",
)
def test_convert_txt_json_0(self):
self.convert_txt_json("004-bar")
def test_convert_txt_json_1(self):
self.convert_txt_json("100-en-fa")
def test_convert_txt_json_2(self):
self.convert_txt_json("100-en-de-v4")
def test_convert_txt_json_3(self):
self.convert_txt_json("100-ja-en")
if __name__ == "__main__":
unittest.main()
| 851
|
Python
|
.py
| 29
| 26.034483
| 50
| 0.679408
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,666
|
g_aard2_slob_test.py
|
ilius_pyglossary/tests/g_aard2_slob_test.py
|
import os
import unittest
from glossary_v2_test import TestGlossaryBase
skip_module = False
class TestGlossarySlob(TestGlossaryBase):
def __init__(self, *args, **kwargs):
TestGlossaryBase.__init__(self, *args, **kwargs)
self.dataFileCRC32.update(
{
"100-en-fa-res.slob": "0216d006",
"100-en-fa-res-slob.txt": "c73100b3",
"100-en-fa-res-slob-sort.txt": "8253fe96",
"300-ru-en.txt": "77cfee2f",
},
)
def setUp(self):
if skip_module:
self.skipTest("module is skipped")
TestGlossaryBase.setUp(self)
def test_convert_txt_slob_1(self):
fname = "100-en-fa"
os.environ["SLOB_TIMESTAMP"] = "2023-01-01T12:00:00.000000+00:00"
self.convert(
f"{fname}.txt",
f"{fname}.slob",
# sha1sum="",
# compareBinary="",
# slob file is different each time (and so its sha1sum and md5sum)
# even with same exact tags!
# writeOptions={"compression": ""},
)
def test_convert_txt_slob_2_file_size_approx(self):
fname = "300-ru-en"
file_size_approx = 25000
files = [
(35852, self.newTempFilePath("300-ru-en.slob")),
(35687, self.newTempFilePath("300-ru-en.1.slob")),
(33856, self.newTempFilePath("300-ru-en.2.slob")),
(29413, self.newTempFilePath("300-ru-en.3.slob")),
]
self.convert(
f"{fname}.txt",
f"{fname}.slob",
writeOptions={
"file_size_approx": file_size_approx,
"file_size_approx_check_num_entries": 1,
},
compareBinary="",
# slob file is different each time (and so its sha1sum and md5sum)
)
for size, fpath in files:
with open(fpath, mode="rb") as _file:
actualSize = len(_file.read())
delta = actualSize - size
self.assertLess(
delta,
100,
msg=f"size expected={size} actual={actualSize}, file {fpath}",
)
def convert_slob_txt(self, fname, fname2, resFiles, **convertArgs):
resFilesPath = {
resFileName: self.newTempFilePath(f"{fname}-2.txt_res/{resFileName}")
for resFileName in resFiles
}
self.convert(
f"{fname}.slob",
f"{fname}-2.txt",
compareText=f"{fname2}.txt",
**convertArgs,
)
for resFileName in resFiles:
fpath1 = self.downloadFile(f"res/{resFileName}")
fpath2 = resFilesPath[resFileName]
self.compareBinaryFiles(fpath1, fpath2)
def test_convert_slob_txt_1(self):
self.convert_slob_txt(
"100-en-fa-res",
"100-en-fa-res-slob",
resFiles=[
"stardict.png",
"test.json",
],
)
def test_convert_slob_txt_2(self):
self.convert_slob_txt(
"100-en-fa-res",
"100-en-fa-res-slob",
resFiles=[
"stardict.png",
"test.json",
],
direct=False,
)
def test_convert_slob_txt_3(self):
self.convert_slob_txt(
"100-en-fa-res",
"100-en-fa-res-slob",
resFiles=[
"stardict.png",
"test.json",
],
sqlite=True,
)
def test_convert_slob_txt_4(self):
self.convert_slob_txt(
"100-en-fa-res",
"100-en-fa-res-slob-sort",
resFiles=[
"stardict.png",
"test.json",
],
sort=True,
)
if __name__ == "__main__":
unittest.main()
| 2,988
|
Python
|
.py
| 115
| 22.26087
| 72
| 0.661415
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,667
|
g_stardict_textual_test.py
|
ilius_pyglossary/tests/g_stardict_textual_test.py
|
import unittest
from glossary_v2_test import TestGlossaryBase
class TestGlossaryStarDictTextual(TestGlossaryBase):
def __init__(self, *args, **kwargs):
TestGlossaryBase.__init__(self, *args, **kwargs)
self.dataFileCRC32.update(
{
"100-en-fa-sdt.xml": "48cb3336",
"100-en-fa-sdt.xml.txt": "0c9b4025",
"stardict-xdxf-2.xml": "b3285d5c",
"stardict-xdxf-2.xml-h.txt": "97b3a22b",
"stardict-xdxf-2.xml-x.txt": "de63f937",
"stardict-mixed-types-2.xml": "51d9ceb2",
"stardict-mixed-types-2.xml.txt": "c896cf68",
},
)
def convert_txt_sdxml(self, fname, fname2, **convertArgs):
self.convert(
f"{fname}.txt",
f"{fname}-2.xml",
compareText=f"{fname2}.xml",
outputFormat="StardictTextual",
**convertArgs,
)
def convert_sdxml_txt(self, fname, fname2, **convertArgs):
self.convert(
f"{fname}.xml",
f"{fname}-2.txt",
compareText=f"{fname2}.txt",
inputFormat="StardictTextual",
**convertArgs,
)
def test_convert_txt_sdxml_1(self):
self.convert_txt_sdxml(
"100-en-fa",
"100-en-fa-sdt",
)
def test_convert_sdxml_txt_1(self):
self.convert_sdxml_txt(
"100-en-fa-sdt",
"100-en-fa-sdt.xml",
)
def test_convert_sdxml_txt_2(self):
self.convert_sdxml_txt(
"stardict-mixed-types-2",
"stardict-mixed-types-2.xml",
)
def test_convert_sdxml_txt_3(self):
self.convert_sdxml_txt(
"stardict-xdxf-2",
"stardict-xdxf-2.xml-h",
readOptions={"xdxf_to_html": True},
)
def test_convert_sdxml_txt_4(self):
self.convert_sdxml_txt(
"stardict-xdxf-2",
"stardict-xdxf-2.xml-x",
readOptions={"xdxf_to_html": False},
)
if __name__ == "__main__":
unittest.main()
| 1,673
|
Python
|
.py
| 61
| 23.868852
| 59
| 0.673546
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,668
|
glossary_security_test.py
|
ilius_pyglossary/tests/glossary_security_test.py
|
import logging
import os
import sys
import unittest
from os.path import abspath, dirname
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
from glossary_errors_test import TestGlossaryErrors
from glossary_v2_test import testCacheDir
from pyglossary.glossary import Glossary
class TestGlossarySecurity(TestGlossaryErrors):
def __init__(self, *args, **kwargs):
TestGlossaryErrors.__init__(self, *args, **kwargs)
self.mockLog.setLevel(logging.INFO)
def test_convert_1(self):
glos = Glossary()
res = glos.convert(
inputFilename="os.system('abcd')",
outputFilename="os.system('abcd -l')",
)
self.assertIsNone(res)
self.assertLogCritical("Unable to detect output format!")
self.assertLogCritical(
"Writing file \"os.system('abcd -l')\" failed.",
)
def test_convert_2(self):
glos = Glossary()
res = glos.convert(
inputFilename="os.system('abcd');test.txt",
outputFilename="os.system('abcd -l')",
)
self.assertIsNone(res)
self.assertLogCritical("Unable to detect output format!")
self.assertLogCritical(
"Writing file \"os.system('abcd -l')\" failed.",
)
def test_convert_3(self):
glos = Glossary()
res = glos.convert(
inputFilename="os.system('abcd');test.txt",
outputFilename="os.system('abcd -l');test.csv",
)
self.assertIsNone(res)
errMsg = (
"[Errno 2] No such file or directory: "
f"\"{testCacheDir}{os.sep}os.system('abcd');test.txt\""
)
errMsg = errMsg.replace("\\", "\\\\")
self.assertLogCritical(errMsg)
self.assertLogCritical(
"Reading file \"os.system('abcd');test.txt\" failed.",
)
def test_convert_4(self):
glos = Glossary()
res = glos.convert(
inputFilename="test.txt\nos.system('abcd')",
outputFilename="test.csv\nos.system('abcd -l')",
)
self.assertIsNone(res)
self.assertLogCritical("Unable to detect output format!")
self.assertLogCritical(
"Writing file \"test.csv\\nos.system('abcd -l')\" failed.",
)
if __name__ == "__main__":
unittest.main()
| 2,006
|
Python
|
.py
| 65
| 27.938462
| 62
| 0.711399
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,669
|
gregorian_test.py
|
ilius_pyglossary/tests/gregorian_test.py
|
import sys
import unittest
from os.path import abspath, dirname
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
from pyglossary import gregorian
def getMonthLen(y: int, m: int) -> int:
if m == 12:
return gregorian.to_jd(y + 1, 1, 1) - gregorian.to_jd(y, 12, 1)
return gregorian.to_jd(y, m + 1, 1) - gregorian.to_jd(y, m, 1)
class Testgregorian(unittest.TestCase):
def notest_isLeap_negativeYear(self):
print()
isLeapFunc = gregorian.isLeap
for year in range(10, -101, -1):
isLeap = isLeapFunc(year)
# print(f"{str(year).center(10)} {'L' if isLeap1 else ' '}")
print(f"{year}: \"{'L' if isLeap else ' '}\",")
# year -> f"{'L' if isLeap33 else ' '}{'L' if isLeap2820 else ' '}"
isLeapDict = {
-50: " ",
-49: " ",
-48: "L",
-47: " ",
-46: " ",
-45: " ",
-44: "L",
-43: " ",
-42: " ",
-41: " ",
-40: "L",
-39: " ",
-38: " ",
-37: " ",
-36: "L",
-35: " ",
-34: " ",
-33: " ",
-32: "L",
-31: " ",
-30: " ",
-29: " ",
-28: "L",
-27: " ",
-26: " ",
-25: " ",
-24: "L",
-23: " ",
-22: " ",
-21: " ",
-20: "L",
-19: " ",
-18: " ",
-17: " ",
-16: "L",
-15: " ",
-14: " ",
-13: " ",
-12: "L",
-11: " ",
-10: " ",
-9: " ",
-8: "L",
-7: " ",
-6: " ",
-5: " ",
-4: "L",
-3: " ",
-2: " ",
-1: " ",
0: "L",
1: " ",
2: " ",
3: " ",
4: "L",
5: " ",
6: " ",
7: " ",
8: "L",
9: " ",
10: " ",
11: " ",
12: "L",
13: " ",
14: " ",
15: " ",
16: "L",
17: " ",
18: " ",
19: " ",
20: "L",
21: " ",
22: " ",
23: " ",
24: "L",
25: " ",
26: " ",
27: " ",
28: "L",
29: " ",
30: " ",
31: " ",
32: "L",
33: " ",
34: " ",
35: " ",
36: "L",
37: " ",
38: " ",
39: " ",
40: "L",
41: " ",
42: " ",
43: " ",
44: "L",
45: " ",
46: " ",
47: " ",
48: "L",
49: " ",
50: " ",
1990: " ",
1991: " ",
1992: "L",
1993: " ",
1994: " ",
1995: " ",
1996: "L",
1997: " ",
1998: " ",
1999: " ",
2000: "L",
2001: " ",
2002: " ",
2003: " ",
2004: "L",
2005: " ",
2006: " ",
2007: " ",
2008: "L",
2009: " ",
2010: " ",
2011: " ",
2012: "L",
2013: " ",
2014: " ",
2015: " ",
2016: "L",
2017: " ",
2018: " ",
2019: " ",
2020: "L",
2021: " ",
2022: " ",
2023: " ",
2024: "L",
2025: " ",
2026: " ",
2027: " ",
2028: "L",
2029: " ",
}
dateToJdDict = {
(-50, 1, 1): 1702798,
(-49, 1, 1): 1703163,
(-48, 1, 1): 1703528,
(-47, 1, 1): 1703894,
(-46, 1, 1): 1704259,
(-45, 1, 1): 1704624,
(-44, 1, 1): 1704989,
(-43, 1, 1): 1705355,
(-42, 1, 1): 1705720,
(-41, 1, 1): 1706085,
(-40, 1, 1): 1706450,
(-39, 1, 1): 1706816,
(-38, 1, 1): 1707181,
(-37, 1, 1): 1707546,
(-36, 1, 1): 1707911,
(-35, 1, 1): 1708277,
(-34, 1, 1): 1708642,
(-33, 1, 1): 1709007,
(-32, 1, 1): 1709372,
(-31, 1, 1): 1709738,
(-30, 1, 1): 1710103,
(-29, 1, 1): 1710468,
(-28, 1, 1): 1710833,
(-27, 1, 1): 1711199,
(-26, 1, 1): 1711564,
(-25, 1, 1): 1711929,
(-24, 1, 1): 1712294,
(-23, 1, 1): 1712660,
(-22, 1, 1): 1713025,
(-21, 1, 1): 1713390,
(-20, 1, 1): 1713755,
(-19, 1, 1): 1714121,
(-18, 1, 1): 1714486,
(-17, 1, 1): 1714851,
(-16, 1, 1): 1715216,
(-15, 1, 1): 1715582,
(-14, 1, 1): 1715947,
(-13, 1, 1): 1716312,
(-12, 1, 1): 1716677,
(-11, 1, 1): 1717043,
(-10, 1, 1): 1717408,
(-9, 1, 1): 1717773,
(-8, 1, 1): 1718138,
(-7, 1, 1): 1718504,
(-6, 1, 1): 1718869,
(-5, 1, 1): 1719234,
(-4, 1, 1): 1719599,
(-3, 1, 1): 1719965,
(-2, 1, 1): 1720330,
(-1, 1, 1): 1720695,
(0, 1, 1): 1721060,
(1, 1, 1): 1721426,
(2, 1, 1): 1721791,
(3, 1, 1): 1722156,
(4, 1, 1): 1722521,
(5, 1, 1): 1722887,
(6, 1, 1): 1723252,
(7, 1, 1): 1723617,
(8, 1, 1): 1723982,
(9, 1, 1): 1724348,
(10, 1, 1): 1724713,
(11, 1, 1): 1725078,
(12, 1, 1): 1725443,
(13, 1, 1): 1725809,
(14, 1, 1): 1726174,
(15, 1, 1): 1726539,
(16, 1, 1): 1726904,
(17, 1, 1): 1727270,
(18, 1, 1): 1727635,
(19, 1, 1): 1728000,
(20, 1, 1): 1728365,
(21, 1, 1): 1728731,
(22, 1, 1): 1729096,
(23, 1, 1): 1729461,
(24, 1, 1): 1729826,
(25, 1, 1): 1730192,
(26, 1, 1): 1730557,
(27, 1, 1): 1730922,
(28, 1, 1): 1731287,
(29, 1, 1): 1731653,
(30, 1, 1): 1732018,
(31, 1, 1): 1732383,
(32, 1, 1): 1732748,
(33, 1, 1): 1733114,
(34, 1, 1): 1733479,
(35, 1, 1): 1733844,
(36, 1, 1): 1734209,
(37, 1, 1): 1734575,
(38, 1, 1): 1734940,
(39, 1, 1): 1735305,
(40, 1, 1): 1735670,
(41, 1, 1): 1736036,
(42, 1, 1): 1736401,
(43, 1, 1): 1736766,
(44, 1, 1): 1737131,
(45, 1, 1): 1737497,
(46, 1, 1): 1737862,
(47, 1, 1): 1738227,
(48, 1, 1): 1738592,
(49, 1, 1): 1738958,
(50, 1, 1): 1739323,
(2015, 1, 1): 2457024,
(2015, 2, 1): 2457055,
(2015, 3, 1): 2457083,
(2015, 4, 1): 2457114,
(2015, 5, 1): 2457144,
(2015, 6, 1): 2457175,
(2015, 7, 1): 2457205,
(2015, 8, 1): 2457236,
(2015, 9, 1): 2457267,
(2015, 10, 1): 2457297,
(2015, 11, 1): 2457328,
(2015, 12, 1): 2457358,
(2016, 1, 1): 2457389,
(2016, 2, 1): 2457420,
(2016, 3, 1): 2457449,
(2016, 4, 1): 2457480,
(2016, 5, 1): 2457510,
(2016, 6, 1): 2457541,
(2016, 7, 1): 2457571,
(2016, 8, 1): 2457602,
(2016, 9, 1): 2457633,
(2016, 10, 1): 2457663,
(2016, 11, 1): 2457694,
(2016, 12, 1): 2457724,
(2017, 1, 1): 2457755,
(2017, 2, 1): 2457786,
(2017, 3, 1): 2457814,
(2017, 4, 1): 2457845,
(2017, 5, 1): 2457875,
(2017, 6, 1): 2457906,
(2017, 7, 1): 2457936,
(2017, 8, 1): 2457967,
(2017, 9, 1): 2457998,
(2017, 10, 1): 2458028,
(2017, 11, 1): 2458059,
(2017, 12, 1): 2458089,
}
def test_isLeap(self):
for year, isLeapStr in self.isLeapDict.items():
isLeap = isLeapStr == "L"
isLeapActual = gregorian.isLeap(year)
self.assertEqual(
isLeapActual,
isLeap,
f"{year=}, {isLeap=}, {isLeapActual=}",
)
def test_to_jd(self):
for date, jd in self.dateToJdDict.items():
jdActual = gregorian.to_jd(*date)
self.assertEqual(
jdActual,
jd,
f"{date=}, {jd=}, {jdActual=}",
)
def test_convert(self):
startYear = 1950
endYear = 2050
for year in range(startYear, endYear):
for month in range(1, 13):
monthLen = getMonthLen(year, month)
for day in range(1, monthLen + 1):
date = (year, month, day)
jd = gregorian.to_jd(*date)
ndate = gregorian.jd_to(jd)
self.assertEqual(
ndate,
date,
f"{jd=}, {date=}, {ndate=}",
)
if __name__ == "__main__":
unittest.main()
| 6,577
|
Python
|
.py
| 335
| 16.525373
| 68
| 0.476083
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,670
|
g_csv_test.py
|
ilius_pyglossary/tests/g_csv_test.py
|
import sys
import unittest
from os.path import abspath, dirname
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
from glossary_v2_test import TestGlossaryBase
from pyglossary.glossary import Glossary as GlossaryLegacy
class TestGlossaryCSV(TestGlossaryBase):
def __init__(self, *args, **kwargs):
TestGlossaryBase.__init__(self, *args, **kwargs)
self.dataFileCRC32.update(
{
"100-en-de-v4.csv": "2890fb3e",
"100-en-fa.csv": "eb8b0474",
"100-en-fa-semicolon.csv": "b3f04599",
"100-ja-en.csv": "7af18cf3",
},
)
def convert_txt_csv(self, fname, fname2, **convertArgs):
self.convert(
f"{fname}.txt",
f"{fname}-2.csv",
compareText=f"{fname2}.csv",
**convertArgs,
)
def convert_csv_txt_rw(self, fname, fname2, infoOverride=None):
inputFilename = self.downloadFile(f"{fname}.csv")
outputFilename = self.newTempFilePath(f"{fname}-2.txt")
expectedFilename = self.downloadFile(f"{fname2}.txt")
glos = self.glos = GlossaryLegacy()
# using glos.convert will add "input_file_size" info key
# perhaps add another optional argument to glos.convert named infoOverride
rRes = glos.read(inputFilename, direct=True)
self.assertTrue(rRes)
if infoOverride:
for key, value in infoOverride.items():
glos.setInfo(key, value)
wRes = glos.write(outputFilename, format="Tabfile")
self.assertEqual(outputFilename, wRes)
self.compareTextFiles(outputFilename, expectedFilename)
glos.cleanup()
def convert_csv_txt(self, fname, fname2, **convertArgs):
self.convert(
f"{fname}.csv",
f"{fname}-2.txt",
compareText=f"{fname2}.txt",
**convertArgs,
)
def test_convert_txt_csv_1(self):
self.convert_txt_csv("100-en-fa", "100-en-fa")
def test_convert_txt_csv_2(self):
self.convert_txt_csv("100-en-de-v4", "100-en-de-v4")
def test_convert_txt_csv_3(self):
self.convert_txt_csv("100-ja-en", "100-ja-en")
def test_convert_txt_csv_4(self):
self.convert_txt_csv(
"100-en-fa",
"100-en-fa-semicolon",
writeOptions={"delimiter": ";"},
)
def test_convert_csv_txt_1(self):
self.convert_csv_txt(
"100-en-fa",
"100-en-fa",
infoOverride={"input_file_size": None},
)
def test_convert_csv_txt_2(self):
self.convert_csv_txt(
"100-en-de-v4",
"100-en-de-v4",
)
def test_convert_csv_txt_3(self):
self.convert_csv_txt(
"100-ja-en",
"100-ja-en",
infoOverride={"input_file_size": None},
)
def test_convert_csv_txt_4(self):
self.convert_csv_txt_rw(
"100-en-fa",
"100-en-fa",
infoOverride={"input_file_size": None},
)
def test_convert_txt_csv_5(self):
self.convert_csv_txt(
"100-en-fa-semicolon",
"100-en-fa",
readOptions={"delimiter": ";"},
infoOverride={"input_file_size": None},
)
if __name__ == "__main__":
unittest.main()
| 2,803
|
Python
|
.py
| 92
| 27.130435
| 76
| 0.695199
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,671
|
g_xdxf_test.py
|
ilius_pyglossary/tests/g_xdxf_test.py
|
import unittest
from glossary_v2_test import TestGlossaryBase
class TestGlossaryXDXF(TestGlossaryBase):
def __init__(self, *args, **kwargs):
TestGlossaryBase.__init__(self, *args, **kwargs)
self.dataFileCRC32.update(
{
"100-cyber_lexicon_en-es.xdxf": "8d9ba394",
"100-cyber_lexicon_en-es-v3.txt": "4aa05086",
},
)
def convert_xdxf_txt(self, fname, fname2, **convertArgs):
self.convert(
f"{fname}.xdxf",
f"{fname}-tmp.txt",
compareText=f"{fname2}.txt",
**convertArgs,
)
def test_convert_xdxf_txt_1(self):
self.convert_xdxf_txt(
"100-cyber_lexicon_en-es",
"100-cyber_lexicon_en-es-v3",
)
if __name__ == "__main__":
unittest.main()
| 687
|
Python
|
.py
| 25
| 24.16
| 58
| 0.681957
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,672
|
xml_utils_test.py
|
ilius_pyglossary/tests/xml_utils_test.py
|
import sys
import unittest
from os.path import abspath, dirname
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
from pyglossary.xml_utils import xml_escape
class Test_xml_escape(unittest.TestCase):
def test(self):
f = xml_escape
self.assertEqual(f(""), "")
self.assertEqual(f("abc"), "abc")
self.assertEqual(f('"a"'), ""a"")
self.assertEqual(f("'a'"), "'a'")
self.assertEqual(f('"a"', quotation=False), '"a"')
self.assertEqual(f("'a'", quotation=False), "'a'")
self.assertEqual(f("R&D"), "R&D")
self.assertEqual(f("<-->"), "<-->")
if __name__ == "__main__":
unittest.main()
| 656
|
Python
|
.py
| 19
| 32.157895
| 52
| 0.660856
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,673
|
g_dictunformat_test.py
|
ilius_pyglossary/tests/g_dictunformat_test.py
|
import unittest
from glossary_v2_test import TestGlossaryBase
class TestGlossaryDictunformat(TestGlossaryBase):
def __init__(self, *args, **kwargs):
TestGlossaryBase.__init__(self, *args, **kwargs)
self.dataFileCRC32.update(
{
"100-en-fa-2.dictunformat": "03a13c1a",
"100-en-fa-2.dictunformat.txt": "c88207ec",
},
)
def convert_dictunformat_txt(self, fname, fname2, **convertArgs):
self.convert(
f"{fname}.dictunformat",
f"{fname}-tmp.txt",
compareText=f"{fname2}.txt",
**convertArgs,
)
def test_convert_dictunformat_txt_1(self):
self.convert_dictunformat_txt(
"100-en-fa-2",
"100-en-fa-2.dictunformat",
)
if __name__ == "__main__":
unittest.main()
| 707
|
Python
|
.py
| 25
| 24.96
| 66
| 0.697329
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,674
|
g_dict_org_source_test.py
|
ilius_pyglossary/tests/g_dict_org_source_test.py
|
import unittest
from glossary_v2_test import TestGlossaryBase
class TestGlossaryDictOrg(TestGlossaryBase):
def __init__(self, *args, **kwargs):
TestGlossaryBase.__init__(self, *args, **kwargs)
self.dataFileCRC32.update(
{
"100-en-fa.dtxt": "05d6e939",
},
)
def convert_txt_dict_org_source(self, fname, fname2, **convertArgs):
self.convert(
f"{fname}.txt",
f"{fname}-2.dtxt",
compareText=f"{fname2}.dtxt",
**convertArgs,
)
def test_convert_txt_dict_org_source_1(self):
self.convert_txt_dict_org_source(
"100-en-fa",
"100-en-fa",
)
if __name__ == "__main__":
unittest.main()
| 627
|
Python
|
.py
| 24
| 22.875
| 69
| 0.680672
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,675
|
stardict_test.py
|
ilius_pyglossary/tests/stardict_test.py
|
import locale
import random
import unittest
from functools import cmp_to_key
def toBytes(s):
return bytes(s, "utf-8") if isinstance(s, str) else bytes(s)
def sortKeyBytes(ba: bytes):
assert isinstance(ba, bytes)
# ba.lower() + ba is wrong
return (
ba.lower(),
ba,
)
def stardictStrCmp(s1, s2):
"""
use this function to sort index items in StarDict dictionary
s1 and s2 must be utf-8 encoded strings.
"""
s1 = toBytes(s1)
s2 = toBytes(s2)
a = asciiStrCaseCmp(s1, s2)
if a == 0:
return strCmp(s1, s2)
return a
# the slow way in Python 3 (where there is no cmp arg in list.sort)
sortKeyOld = cmp_to_key(stardictStrCmp) # TOO SLOW
def asciiStrCaseCmp(ba1, ba2):
"""
ba1 and ba2 are instances of bytes
imitate g_ascii_strcasecmp function of glib library gstrfuncs.c file.
"""
commonLen = min(len(ba1), len(ba2))
for i in range(commonLen):
c1 = asciiLower(ba1[i])
c2 = asciiLower(ba2[i])
if c1 != c2:
return c1 - c2
return len(ba1) - len(ba2)
def strCmp(ba1, ba2):
"""
ba1 and ba2 are instances of bytes
imitate strcmp of standard C library.
Attention! You may have a temptation to replace this function with
built-in cmp() function. Hold on! Most probably these two function behave
identically now, but cmp does not document how it compares strings.
There is no guaranty it will not be changed in future.
Since we need predictable sorting order in StarDict dictionary, we need
to preserve this function despite the fact there are other ways to
implement it.
"""
commonLen = min(len(ba1), len(ba2))
for i in range(commonLen):
c1 = ba1[i]
c2 = ba2[i]
if c1 != c2:
return c1 - c2
return len(ba1) - len(ba2)
def isAsciiAlpha(c):
"""C is int."""
return ord("A") <= c <= ord("Z") or ord("a") <= c <= ord("z")
def isAsciiLower(c):
return ord("a") <= c <= ord("z")
def isAsciiUpper(c):
"""
c is int
imitate ISUPPER macro of glib library gstrfuncs.c file.
"""
return ord("A") <= c <= ord("Z")
def asciiLower(c):
"""
c is int
returns int (ascii character code).
imitate TOLOWER macro of glib library gstrfuncs.c file
This function converts upper case Latin letters to corresponding
lower case letters, other chars are not changed.
c must be non-Unicode string of length 1.
You may apply this function to individual bytes of non-Unicode string.
The following encodings are allowed: single byte encoding like koi8-r,
cp1250, cp1251, cp1252, etc, and utf-8 encoding.
Attention! Python Standard Library provides str.lower() method.
It is not a correct replacement for this function.
For non-unicode string str.lower() is locale dependent, it not only
converts Latin letters to lower case, but also locale specific letters
will be converted.
"""
return c - ord("A") + ord("a") if isAsciiUpper(c) else c
def getRandomBytes(avgLen, sigma):
length = round(random.gauss(avgLen, sigma))
return bytes([random.choice(range(256)) for _ in range(length)])
class AsciiLowerUpperTest(unittest.TestCase):
def set_locale_iter(self):
for localeName in locale.locale_alias.values():
try:
locale.setlocale(locale.LC_ALL, localeName)
except Exception as e:
if "unsupported locale setting" not in str(e):
print(e)
continue
yield localeName
def test_isalpha(self):
for _ in self.set_locale_iter():
for code in range(256):
self.assertEqual(
isAsciiAlpha(code),
bytes([code]).isalpha(),
)
def test_islower(self):
for _ in self.set_locale_iter():
for code in range(256):
self.assertEqual(
isAsciiLower(code),
bytes([code]).islower(),
)
def test_isupper(self):
for _ in self.set_locale_iter():
for code in range(256):
self.assertEqual(
isAsciiUpper(code),
bytes([code]).isupper(),
)
def test_lower(self):
for _ in self.set_locale_iter():
for code in range(256):
self.assertEqual(
asciiLower(code),
ord(bytes([code]).lower()),
)
class SortRandomTest(unittest.TestCase):
def set_locale_iter(self):
for localeName in locale.locale_alias.values():
try:
locale.setlocale(locale.LC_ALL, localeName)
except Exception as e:
if "unsupported locale setting" not in str(e):
raise e
continue
# print(localeName)
yield localeName
def test_sort_1(self):
bsList = [getRandomBytes(30, 10) for _ in range(100)]
for _ in self.set_locale_iter():
self.assertEqual(
sorted(
bsList,
key=sortKeyOld,
),
sorted(
bsList,
key=sortKeyBytes,
),
)
if __name__ == "__main__":
unittest.main()
| 4,549
|
Python
|
.py
| 153
| 26.581699
| 74
| 0.710877
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,676
|
dictzip_test.py
|
ilius_pyglossary/tests/dictzip_test.py
|
import gzip
import logging
import unittest
from pathlib import Path
from glossary_v2_errors_test import TestGlossaryErrorsBase
from pyglossary.os_utils import runDictzip
TEXT = """
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor
incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis
nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu
fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in
culpa qui officia deserunt mollit anim id est laborum.
"""
MISSING_DEP_MARK = "Dictzip compression requires idzip module or dictzip utility,"
class TestDictzip(TestGlossaryErrorsBase):
def setUp(self) -> None:
super().setUp()
self.test_file_path = Path(self.tempDir) / "test_file.txt"
filename = self.test_file_path.name + ".dz"
self.result_file_path = self.test_file_path.parent / filename
with open(self.test_file_path, "a", encoding="utf-8") as tmp_file:
tmp_file.write(TEXT)
def skip_on_dep(self, method: str) -> None:
warn = self.mockLog.popLog(logging.WARNING, MISSING_DEP_MARK, partial=True)
if warn:
self.skipTest(f"Missing {method} dependency")
def test_idzip_compressed_exists(self) -> None:
method = "idzip"
runDictzip(self.test_file_path, method)
self.skip_on_dep(method)
self.assertTrue(self.result_file_path.exists())
self.assertTrue(self.result_file_path.is_file())
def test_idzip_compressed_matches(self) -> None:
method = "idzip"
runDictzip(self.test_file_path, method)
self.skip_on_dep(method)
with gzip.open(self.result_file_path, "r") as file:
result = file.read().decode()
self.assertEqual(result, TEXT)
def test_dictzip_compressed_exists(self) -> None:
method = "dictzip"
runDictzip(self.test_file_path, method)
self.skip_on_dep(method)
self.assertTrue(self.result_file_path.exists())
self.assertTrue(self.result_file_path.is_file())
def test_dictzip_compressed_matches(self) -> None:
method = "dictzip"
runDictzip(self.test_file_path, method)
self.skip_on_dep(method)
with gzip.open(self.result_file_path, "r") as file:
result = file.read().decode()
self.assertEqual(result, TEXT)
def test_dictzip_missing_target(self) -> None:
method = "idzip"
filename = "/NOT_EXISTED_PATH/file.txt"
expected = f"No such file or directory: '{filename}'"
runDictzip(filename, method)
self.skip_on_dep(method)
err = self.mockLog.popLog(logging.ERROR, expected, partial=True)
self.assertIsNotNone(err)
def test_idzip_missing_target(self) -> None:
method = "dictzip"
filename = "/NOT_EXISTED_PATH/boilerplate.txt"
expected = f'Cannot open "{filename}"'
runDictzip(filename, method)
self.skip_on_dep(method)
err = self.mockLog.popLog(logging.ERROR, expected, partial=True)
self.assertIsNotNone(err)
if __name__ == "__main__":
unittest.main()
| 2,934
|
Python
|
.py
| 71
| 38.676056
| 82
| 0.757459
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,677
|
g_dicformids_test.py
|
ilius_pyglossary/tests/g_dicformids_test.py
|
import unittest
from glossary_v2_test import TestGlossaryBase
class TestGlossaryDictionaryForMIDs(TestGlossaryBase):
def __init__(self, *args, **kwargs):
TestGlossaryBase.__init__(self, *args, **kwargs)
self.dataFileCRC32.update(
{
"100-en-fa.mids.zip": "32d1185f",
},
)
def convert_txt_mids(self, fname, sha1sumDict, **convertArgs):
outputFname = f"{fname}-2.mids.zip"
outputFpath = self.newTempFilePath(outputFname)
# expectedFpath = self.downloadFile(f"{fname}.mids.zip")
self.convert(
f"{fname}.txt",
outputFname,
**convertArgs,
)
self.checkZipFileSha1sum(outputFpath, sha1sumDict)
def test_convert_txt_mids_1(self):
sha1sumDict = {
"DictionaryForMIDs.properties": "4260a87d6cdd55622dcfe395880bc913f96102b8",
"directory1.csv": "1f1ab12b107608a1513254fff3c323bbcdfbd5cf",
"index1.csv": "494268da410c520e56142b47610f6bbcfd53c79f",
"searchlist.csv": "4f4513d1550436e867e1a79dbd073a7e5bb38e32",
}
self.convert_txt_mids("100-en-fa", sha1sumDict)
if __name__ == "__main__":
unittest.main()
| 1,054
|
Python
|
.py
| 30
| 31.933333
| 78
| 0.751229
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,678
|
slob_test.py
|
ilius_pyglossary/tests/slob_test.py
|
import io
import logging
import os
import random
import sys
import tempfile
import unicodedata
import unittest
from os.path import abspath, dirname
from typing import cast
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
import icu
from pyglossary import slob
from pyglossary.core_test import MockLogHandler
mockLog = MockLogHandler()
log = logging.getLogger("pyglossary")
log.addHandler(mockLog)
class StructReaderWriter(slob.StructWriter):
def __init__(
self,
_file: "io.BufferedWriter",
reader: "slob.StructReader",
encoding: "str | None" = None,
) -> None:
super().__init__(
_file=_file,
encoding=encoding,
)
self._reader = reader
def tell(self) -> int:
return self._file.tell()
def write(self, data: bytes) -> int:
return self._file.write(data)
def read_byte(self) -> int:
return self._reader.read_byte()
def read_tiny_text(self) -> str:
return self._reader.read_tiny_text()
class TagNotFound(Exception):
pass
def set_tag_value(filename: str, name: str, value: str) -> None:
with slob.fopen(filename, "rb+") as _file:
_file.seek(len(slob.MAGIC) + 16)
encoding = slob.read_byte_string(_file, slob.U_CHAR).decode(slob.UTF8)
if slob.encodings.search_function(encoding) is None:
raise slob.UnknownEncoding(encoding)
reader = StructReaderWriter(
_file=_file,
reader=slob.StructReader(_file, encoding=encoding),
encoding=encoding,
)
reader.read_tiny_text()
tag_count = reader.read_byte()
for _ in range(tag_count):
key = reader.read_tiny_text()
if key == name:
reader.write_tiny_text(value, editable=True)
return
reader.read_tiny_text()
raise TagNotFound(name)
class BaseTest(unittest.TestCase):
def setUp(self):
# if skip_module:
# self.skipTest("module is skipped")
self.tmpdir = tempfile.TemporaryDirectory(prefix="test")
self._writers = []
def tearDown(self):
for w in self._writers:
w.close()
self.tmpdir.cleanup()
def _observer(self, event: "slob.WriterEvent"):
log.info(f"slob: {event.name}{': ' + event.data if event.data else ''}")
def create(self, *args, observer=None, **kwargs):
if observer is None:
observer = self._observer
w = slob.Writer(*args, observer=observer, **kwargs)
self._writers.append(w)
return w
class TestReadWrite(BaseTest):
def setUp(self):
BaseTest.setUp(self)
self.path = os.path.join(self.tmpdir.name, "test.slob")
writer = self.create(self.path)
self.tags = {
"a": "abc",
"bb": "xyz123",
"ccc": "lkjlk",
}
for name, value in self.tags.items():
writer.tag(name, value)
self.tag2 = "bb", "xyz123"
self.blob_encoding = "ascii"
self.data = [
(("c", "cc", "ccc"), slob.MIME_TEXT, "Hello C 1"),
("a", slob.MIME_TEXT, "Hello A 12"),
("z", slob.MIME_TEXT, "Hello Z 123"),
("b", slob.MIME_TEXT, "Hello B 1234"),
("d", slob.MIME_TEXT, "Hello D 12345"),
("uuu", slob.MIME_HTML, "<html><body>Hello U!</body></html>"),
((("yy", "frag1"),), slob.MIME_HTML, '<h1 name="frag1">Section 1</h1>'),
]
self.all_keys = []
self.data_as_dict = {}
for k, t, v in self.data:
if isinstance(k, str):
k = (k,) # noqa: PLW2901
for key in k:
if isinstance(key, tuple):
key, fragment = key # noqa: PLW2901
else:
fragment = ""
self.all_keys.append(key)
self.data_as_dict[key] = (t, v, fragment)
writer.add(v.encode(self.blob_encoding), *k, content_type=t)
self.all_keys.sort()
writer.finalize()
self.w = writer
def test_header(self):
with slob.MultiFileReader(self.path) as f:
header = slob.read_header(f)
for key, value in self.tags.items():
self.assertEqual(header.tags[key], value)
self.assertEqual(self.w.encoding, slob.UTF8)
self.assertEqual(header.encoding, self.w.encoding)
self.assertEqual(header.compression, self.w.compression)
for i, content_type in enumerate(header.content_types):
self.assertEqual(self.w.content_types[content_type], i)
self.assertEqual(header.blob_count, len(self.data))
def test_content(self):
with slob.open(self.path) as r:
self.assertEqual(len(r), len(self.all_keys))
self.assertRaises(IndexError, r.__getitem__, len(self.all_keys))
for i, item in enumerate(r):
self.assertEqual(item.key, self.all_keys[i])
content_type, value, fragment = self.data_as_dict[item.key]
self.assertEqual(item.content_type, content_type)
self.assertEqual(item.content.decode(self.blob_encoding), value)
self.assertEqual(item.fragment, fragment)
class TestSort(BaseTest):
def setUp(self):
BaseTest.setUp(self)
self.path = os.path.join(self.tmpdir.name, "test.slob")
writer = self.create(self.path)
data = [
"Ф, ф",
"Ф ф",
"Ф",
"Э",
"Е е",
"г",
"н",
"ф",
"а",
"Ф, Ф",
"е",
"Е",
"Ее",
"ё",
"Ё",
"Её",
"Е ё",
"А",
"э",
"ы",
]
self.data_sorted = sorted(data, key=slob.sortkey(slob.IDENTICAL))
for k in data:
v = ";".join(unicodedata.name(c) for c in k)
writer.add(v.encode("ascii"), k)
writer.finalize()
self.r = slob.open(self.path)
def test_sort_order(self):
for i in range(len(self.r)):
self.assertEqual(self.r[i].key, self.data_sorted[i])
def tearDown(self):
self.r.close()
BaseTest.tearDown(self)
class TestSortKey(BaseTest):
def setUp(self):
BaseTest.setUp(self)
self.data = [
"Ф, ф",
"Ф ф",
"Ф",
"Э",
"Е е",
"г",
"н",
"ф",
"а",
"Ф, Ф",
"е",
"Е",
"Ее",
"ё",
"Ё",
"Её",
"Е ё",
"А",
"э",
"ы",
]
self.data_sorted = [
"а",
"А",
"г",
"е",
"Е",
"ё",
"Ё",
"Е е",
"Ее",
"Е ё",
"Её",
"н",
"ф",
"Ф",
"Ф ф",
"Ф, ф",
"Ф, Ф",
"ы",
"э",
"Э",
]
def test_sort_order(self):
for locName in (
# en_US_POSIX on Mac OS X
# https://github.com/ilius/pyglossary/issues/458
"en_US_POSIX",
"en_US",
"en_CA",
"fa_IR.UTF-8",
):
icu.Locale.setDefault(icu.Locale(locName))
slob.sortkey.cache_clear()
data_sorted = sorted(self.data, key=slob.sortkey(slob.IDENTICAL))
self.assertEqual(self.data_sorted, data_sorted)
class TestFind(BaseTest):
def setUp(self):
BaseTest.setUp(self)
self.path = os.path.join(self.tmpdir.name, "test.slob")
writer = self.create(self.path)
data = [
"Cc",
"aA",
"aa",
"Aa",
"Bb",
"cc",
"Äā",
"ăÀ",
"a\u00a0a",
"a-a",
"a\u2019a",
"a\u2032a",
"a,a",
"a a",
]
for k in data:
v = ";".join(unicodedata.name(c) for c in k)
writer.add(v.encode("ascii"), k)
writer.finalize()
self.r = slob.open(self.path)
def get(self, d, key):
return [item.content.decode("ascii") for item in d[key]]
def test_find_identical(self):
d = self.r.as_dict(slob.IDENTICAL)
self.assertEqual(
self.get(d, "aa"),
["LATIN SMALL LETTER A;LATIN SMALL LETTER A"],
)
self.assertEqual(
self.get(d, "a-a"),
["LATIN SMALL LETTER A;HYPHEN-MINUS;LATIN SMALL LETTER A"],
)
self.assertEqual(
self.get(d, "aA"),
["LATIN SMALL LETTER A;LATIN CAPITAL LETTER A"],
)
self.assertEqual(
self.get(d, "Äā"),
[
"LATIN CAPITAL LETTER A WITH DIAERESIS;"
"LATIN SMALL LETTER A WITH MACRON",
],
)
self.assertEqual(
self.get(d, "a a"),
["LATIN SMALL LETTER A;SPACE;LATIN SMALL LETTER A"],
)
def test_find_quaternary(self):
d = self.r.as_dict(slob.QUATERNARY)
self.assertEqual(
self.get(d, "a\u2032a"),
["LATIN SMALL LETTER A;PRIME;LATIN SMALL LETTER A"],
)
self.assertEqual(
self.get(d, "a a"),
[
"LATIN SMALL LETTER A;SPACE;LATIN SMALL LETTER A",
"LATIN SMALL LETTER A;NO-BREAK SPACE;LATIN SMALL LETTER A",
],
)
def test_find_tertiary(self):
d = self.r.as_dict(slob.TERTIARY)
self.assertEqual(
self.get(d, "aa"),
[
"LATIN SMALL LETTER A;SPACE;LATIN SMALL LETTER A",
"LATIN SMALL LETTER A;NO-BREAK SPACE;LATIN SMALL LETTER A",
"LATIN SMALL LETTER A;HYPHEN-MINUS;LATIN SMALL LETTER A",
"LATIN SMALL LETTER A;COMMA;LATIN SMALL LETTER A",
"LATIN SMALL LETTER A;RIGHT SINGLE QUOTATION MARK;LATIN SMALL LETTER A",
"LATIN SMALL LETTER A;PRIME;LATIN SMALL LETTER A",
"LATIN SMALL LETTER A;LATIN SMALL LETTER A",
],
)
def test_find_secondary(self):
d = self.r.as_dict(slob.SECONDARY)
self.assertEqual(
self.get(d, "aa"),
[
"LATIN SMALL LETTER A;SPACE;LATIN SMALL LETTER A",
"LATIN SMALL LETTER A;NO-BREAK SPACE;LATIN SMALL LETTER A",
"LATIN SMALL LETTER A;HYPHEN-MINUS;LATIN SMALL LETTER A",
"LATIN SMALL LETTER A;COMMA;LATIN SMALL LETTER A",
"LATIN SMALL LETTER A;RIGHT SINGLE QUOTATION MARK;LATIN SMALL LETTER A",
"LATIN SMALL LETTER A;PRIME;LATIN SMALL LETTER A",
"LATIN SMALL LETTER A;LATIN SMALL LETTER A",
"LATIN SMALL LETTER A;LATIN CAPITAL LETTER A",
"LATIN CAPITAL LETTER A;LATIN SMALL LETTER A",
],
)
def test_find_primary(self):
d = self.r.as_dict(slob.PRIMARY)
expected = [
"LATIN SMALL LETTER A;SPACE;LATIN SMALL LETTER A",
"LATIN SMALL LETTER A;NO-BREAK SPACE;LATIN SMALL LETTER A",
"LATIN SMALL LETTER A;HYPHEN-MINUS;LATIN SMALL LETTER A",
"LATIN SMALL LETTER A;COMMA;LATIN SMALL LETTER A",
"LATIN SMALL LETTER A;RIGHT SINGLE QUOTATION MARK;LATIN SMALL LETTER A",
"LATIN SMALL LETTER A;PRIME;LATIN SMALL LETTER A",
"LATIN SMALL LETTER A;LATIN SMALL LETTER A",
"LATIN SMALL LETTER A;LATIN CAPITAL LETTER A",
"LATIN CAPITAL LETTER A;LATIN SMALL LETTER A",
"LATIN SMALL LETTER A WITH BREVE;LATIN CAPITAL LETTER A WITH GRAVE",
"LATIN CAPITAL LETTER A WITH DIAERESIS;LATIN SMALL LETTER A WITH MACRON",
]
self.assertEqual(
self.get(d, "aa"),
expected,
)
def tearDown(self):
self.r.close()
BaseTest.tearDown(self)
class TestPrefixFind(BaseTest):
def setUp(self):
BaseTest.setUp(self)
self.path = os.path.join(self.tmpdir.name, "test.slob")
self.data = ["a", "ab", "abc", "abcd", "abcde"]
writer = self.create(self.path)
for k in self.data:
writer.add(k.encode("ascii"), k)
writer.finalize()
def test(self):
with slob.open(self.path) as r:
for i, k in enumerate(self.data):
d = r.as_dict(slob.IDENTICAL, len(k))
self.assertEqual(
[cast(slob.Blob, v).content.decode("ascii") for v in d[k]],
self.data[i:],
)
class TestAlias(BaseTest):
def setUp(self):
BaseTest.setUp(self)
self.path = os.path.join(self.tmpdir.name, "test.slob")
def test_alias(self):
too_many_redirects = []
target_not_found = []
def observer(event):
if event.name == "too_many_redirects":
too_many_redirects.append(event.data)
elif event.name == "alias_target_not_found":
target_not_found.append(event.data)
w = self.create(self.path, observer=observer)
data = ["z", "b", "q", "a", "u", "g", "p", "n"]
for k in data:
v = ";".join(unicodedata.name(c) for c in k)
w.add(v.encode("ascii"), k)
w.add_alias("w", "u")
w.add_alias("small u", "u")
w.add_alias("y1", "y2")
w.add_alias("y2", "y3")
w.add_alias("y3", "z")
w.add_alias("ZZZ", "YYY")
w.add_alias("l3", "l1")
w.add_alias("l1", "l2")
w.add_alias("l2", "l3")
w.add_alias("a1", ("a", "a-frag1"))
w.add_alias("a2", "a1")
w.add_alias("a3", ("a2", "a-frag2"))
w.add_alias("g1", "g")
w.add_alias("g2", ("g1", "g-frag1"))
w.add_alias("n or p", "n")
w.add_alias("n or p", "p")
w.finalize()
self.assertEqual(too_many_redirects, ["l1", "l2", "l3"])
self.assertEqual(target_not_found, ["l2", "l3", "l1", "YYY"])
with slob.open(self.path) as r:
d = r.as_dict()
def get(key):
return [item.content.decode("ascii") for item in d[key]]
self.assertEqual(get("w"), ["LATIN SMALL LETTER U"])
self.assertEqual(get("small u"), ["LATIN SMALL LETTER U"])
self.assertEqual(get("y1"), ["LATIN SMALL LETTER Z"])
self.assertEqual(get("y2"), ["LATIN SMALL LETTER Z"])
self.assertEqual(get("y3"), ["LATIN SMALL LETTER Z"])
self.assertEqual(get("ZZZ"), [])
self.assertEqual(get("l1"), [])
self.assertEqual(get("l2"), [])
self.assertEqual(get("l3"), [])
self.assertEqual(len(list(d["n or p"])), 2)
item_a1 = cast(slob.Blob, next(d["a1"]))
self.assertEqual(item_a1.content, b"LATIN SMALL LETTER A")
self.assertEqual(item_a1.fragment, "a-frag1")
item_a2 = cast(slob.Blob, next(d["a2"]))
self.assertEqual(item_a2.content, b"LATIN SMALL LETTER A")
self.assertEqual(item_a2.fragment, "a-frag1")
item_a3 = cast(slob.Blob, next(d["a3"]))
self.assertEqual(item_a3.content, b"LATIN SMALL LETTER A")
self.assertEqual(item_a3.fragment, "a-frag1")
item_g1 = cast(slob.Blob, next(d["g1"]))
self.assertEqual(item_g1.content, b"LATIN SMALL LETTER G")
self.assertEqual(item_g1.fragment, "")
item_g2 = cast(slob.Blob, next(d["g2"]))
self.assertEqual(item_g2.content, b"LATIN SMALL LETTER G")
self.assertEqual(item_g2.fragment, "g-frag1")
class TestBlobId(BaseTest):
def test(self):
max_i = 2**32 - 1
max_j = 2**16 - 1
i_values = [0, max_i] + [random.randint(1, max_i - 1) for _ in range(100)]
j_values = [0, max_j] + [random.randint(1, max_j - 1) for _ in range(100)]
for i in i_values:
for j in j_values:
self.assertEqual(slob.unmeld_ints(slob.meld_ints(i, j)), (i, j))
class TestMultiFileReader(BaseTest):
def test_read_all(self):
fnames = []
for name in "abcdef":
path = os.path.join(self.tmpdir.name, name)
fnames.append(path)
with slob.fopen(path, "wb") as f:
f.write(name.encode(slob.UTF8))
with slob.MultiFileReader(*fnames) as m:
self.assertEqual(m.read().decode(slob.UTF8), "abcdef")
def test_seek_and_read(self):
def mkfile(basename, content):
part = os.path.join(self.tmpdir.name, basename)
with slob.fopen(part, "wb") as f:
f.write(content)
return part
content = b"abc\nd\nefgh\nij"
part1 = mkfile("1", content[:4])
part2 = mkfile("2", content[4:5])
part3 = mkfile("3", content[5:])
with slob.MultiFileReader(part1, part2, part3) as m:
self.assertEqual(m.size, len(content))
m.seek(2)
self.assertEqual(m.read(2), content[2:4])
m.seek(1)
self.assertEqual(m.read(len(content) - 2), content[1:-1])
m.seek(-1, whence=io.SEEK_END)
self.assertEqual(m.read(10), content[-1:])
m.seek(4)
m.seek(-2, whence=io.SEEK_CUR)
self.assertEqual(m.read(3), content[2:5])
class TestFormatErrors(BaseTest):
def test_wrong_file_type(self):
name = os.path.join(self.tmpdir.name, "1")
with slob.fopen(name, "wb") as f:
f.write(b"123")
self.assertRaises(slob.UnknownFileFormat, slob.open, name)
def test_truncated_file(self):
name = os.path.join(self.tmpdir.name, "1")
writer = self.create(name)
writer.add(b"123", "a")
writer.add(b"234", "b")
writer.finalize()
with slob.fopen(name, "rb") as f:
all_bytes = f.read()
with slob.fopen(name, "wb") as f:
f.write(all_bytes[:-1])
self.assertRaises(slob.IncorrectFileSize, slob.open, name)
with slob.fopen(name, "wb") as f:
f.write(all_bytes)
f.write(b"\n")
self.assertRaises(slob.IncorrectFileSize, slob.open, name)
class TestTooLongText(BaseTest):
def setUp(self):
BaseTest.setUp(self)
self.path = os.path.join(self.tmpdir.name, "test.slob")
def test_too_long(self):
rejected_keys = []
rejected_aliases = []
rejected_alias_targets = []
rejected_tags = []
rejected_content_types = []
def observer(event):
if event.name == "key_too_long":
rejected_keys.append(event.data)
elif event.name == "alias_too_long":
rejected_aliases.append(event.data)
elif event.name == "alias_target_too_long":
rejected_alias_targets.append(event.data)
elif event.name == "tag_name_too_long":
rejected_tags.append(event.data)
elif event.name == "content_type_too_long":
rejected_content_types.append(event.data)
long_tag_name = "t" * (slob.MAX_TINY_TEXT_LEN + 1)
long_tag_value = "v" * (slob.MAX_TINY_TEXT_LEN + 1)
long_content_type = "T" * (slob.MAX_TEXT_LEN + 1)
long_key = "c" * (slob.MAX_TEXT_LEN + 1)
long_frag = "d" * (slob.MAX_TINY_TEXT_LEN + 1)
key_with_long_frag = ("d", long_frag)
tag_with_long_name = (long_tag_name, "t3 value")
tag_with_long_value = ("t1", long_tag_value)
long_alias = "f" * (slob.MAX_TEXT_LEN + 1)
alias_with_long_frag = ("i", long_frag)
long_alias_target = long_key
long_alias_target_frag = key_with_long_frag
w = self.create(self.path, observer=observer)
w.tag(*tag_with_long_value)
w.tag("t2", "t2 value")
w.tag(*tag_with_long_name)
data = ["a", "b", long_key, key_with_long_frag]
for k in data:
v = k.encode("ascii") if isinstance(k, str) else "#".join(k).encode("ascii")
w.add(v, k)
w.add_alias("e", "a")
w.add_alias(long_alias, "a")
w.add_alias(alias_with_long_frag, "a")
w.add_alias("g", long_alias_target)
w.add_alias("h", long_alias_target_frag)
w.add(b"Hello", "hello", content_type=long_content_type)
w.finalize()
self.assertEqual(
rejected_keys,
[long_key, key_with_long_frag],
)
self.assertEqual(
rejected_aliases,
[long_alias, alias_with_long_frag],
)
self.assertEqual(
rejected_alias_targets,
[long_alias_target, long_alias_target_frag],
)
self.assertEqual(
rejected_tags,
[tag_with_long_name],
)
self.assertEqual(
rejected_content_types,
[long_content_type],
)
with slob.open(self.path) as r:
self.assertEqual(r.tags["t2"], "t2 value")
self.assertNotIn(tag_with_long_name[0], r.tags)
self.assertIn(tag_with_long_value[0], r.tags)
self.assertEqual(r.tags[tag_with_long_value[0]], "")
d = r.as_dict()
self.assertIn("a", d)
self.assertIn("b", d)
self.assertNotIn(long_key, d)
self.assertNotIn(key_with_long_frag[0], d)
self.assertIn("e", d)
self.assertNotIn(long_alias, d)
self.assertNotIn("g", d)
self.assertRaises(
ValueError,
set_tag_value,
self.path,
"t1",
"ы" * 128,
)
class TestEditTag(BaseTest):
def setUp(self):
BaseTest.setUp(self)
self.path = os.path.join(self.tmpdir.name, "test.slob")
writer = self.create(self.path)
writer.tag("a", "123456")
writer.tag("b", "654321")
writer.finalize()
def test_edit_existing_tag(self):
with slob.open(self.path) as f:
self.assertEqual(f.tags["a"], "123456")
self.assertEqual(f.tags["b"], "654321")
set_tag_value(self.path, "b", "efg")
set_tag_value(self.path, "a", "xyz")
with slob.open(self.path) as f:
self.assertEqual(f.tags["a"], "xyz")
self.assertEqual(f.tags["b"], "efg")
def test_edit_nonexisting_tag(self):
self.assertRaises(TagNotFound, set_tag_value, self.path, "z", "abc")
class TestBinItemNumberLimit(BaseTest):
def setUp(self):
BaseTest.setUp(self)
self.path = os.path.join(self.tmpdir.name, "test.slob")
def test_writing_more_then_max_number_of_bin_items(self):
writer = self.create(self.path)
for _ in range(slob.MAX_BIN_ITEM_COUNT + 2):
writer.add(b"a", "a")
self.assertEqual(writer.bin_count, 2)
writer.finalize()
if __name__ == "__main__":
unittest.main()
| 19,014
|
Python
|
.py
| 617
| 27.072934
| 79
| 0.664007
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,679
|
glossary_errors_test.py
|
ilius_pyglossary/tests/glossary_errors_test.py
|
import logging
import os
import sys
import unittest
from os.path import abspath, dirname, isfile, join, relpath
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
from glossary_test import TestGlossaryBase, appTmpDir
from pyglossary.core_test import getMockLogger
from pyglossary.glossary import Glossary
from pyglossary.os_utils import rmtree
__all__ = ["TestGlossaryErrors", "TestGlossaryErrorsBase"]
Glossary.init()
class MyStr(str):
__slots__ = []
class TestGlossaryErrorsBase(TestGlossaryBase):
def __init__(self, *args, **kwargs):
TestGlossaryBase.__init__(self, *args, **kwargs)
self.mockLog = getMockLogger()
def setUp(self):
TestGlossaryBase.setUp(self)
self.mockLog.clear()
def tearDown(self):
TestGlossaryBase.tearDown(self)
method = self._testMethodName
self.assertEqual(0, self.mockLog.printRemainingErrors(method))
warnCount = self.mockLog.printRemainingwWarnings(method)
if warnCount > 0:
print(
f"Got {warnCount} unhandled warnings "
f"from {self.__class__.__name__}: {self._testMethodName}\n",
)
def assertLogCritical(self, errorMsg):
self.assertIsNotNone(
self.mockLog.popLog(
logging.CRITICAL,
errorMsg,
),
msg=f"did not find critical log {errorMsg!r}",
)
def assertLogError(self, errorMsg):
self.assertIsNotNone(
self.mockLog.popLog(
logging.ERROR,
errorMsg,
),
msg=f"did not find error log {errorMsg!r}",
)
def assertLogWarning(self, errorMsg):
self.assertIsNotNone(
self.mockLog.popLog(
logging.WARNING,
errorMsg,
),
msg=f"did not find warning log {errorMsg!r}",
)
def osRoot():
if os.sep == "\\":
return "C:\\"
return "/"
if os.sep == "\\":
osNoSuchFileOrDir = "[WinError 3] The system cannot find the path specified:"
else:
osNoSuchFileOrDir = "[Errno 2] No such file or directory:"
class TestGlossaryErrors(TestGlossaryErrorsBase):
def test_loadPlugins_invalidDir(self):
path = join(osRoot(), "abc", "def", "ghe")
Glossary.loadPlugins(path)
self.assertLogCritical(f"Invalid plugin directory: {path!r}")
def test_detectInputFormat_err1(self):
res = Glossary.detectInputFormat(
filename="",
format="",
)
self.assertIsNone(res)
self.assertLogCritical("Unable to detect input format!")
def test_detectInputFormat_err2(self):
res = Glossary.detectInputFormat(
filename="test.abcd",
format="",
)
self.assertIsNone(res)
self.assertLogCritical("Unable to detect input format!")
def test_detectInputFormat_err3(self):
res = Glossary.detectInputFormat(
filename="test.sql",
format="",
)
self.assertIsNone(res)
self.assertLogCritical("plugin Sql does not support reading")
def test_detectInputFormat_err4(self):
res = Glossary.detectInputFormat(
filename="test",
format="FooBar",
)
self.assertIsNone(res)
self.assertLogCritical("Invalid format 'FooBar'")
def test_detectInputFormat_ok1(self):
res = Glossary.detectInputFormat(
filename="test1.txt.gz",
format="",
)
self.assertEqual(res, ("test1.txt.gz", "Tabfile", ""))
def test_detectInputFormat_ok2(self):
res = Glossary.detectInputFormat(
filename="test2.txt.zip",
format="",
)
self.assertEqual(res, ("test2.txt", "Tabfile", "zip"))
def test_detectOutputFormat_err1(self):
res = Glossary.detectOutputFormat(
filename="",
format="",
inputFilename="",
)
self.assertIsNone(res)
self.assertLogCritical("Invalid filename ''")
def test_detectOutputFormat_err2(self):
res = Glossary.detectOutputFormat(
filename="test",
format="FooBar",
inputFilename="",
)
self.assertIsNone(res)
self.assertLogCritical("Invalid format FooBar")
def test_detectOutputFormat_err3(self):
res = Glossary.detectOutputFormat(
filename="",
format="",
inputFilename="test",
)
self.assertIsNone(res)
self.assertLogCritical("No filename nor format is given for output file")
def test_detectOutputFormat_err4_1(self):
res = Glossary.detectOutputFormat(
filename="",
format="BabylonBgl",
inputFilename="test3.txt",
)
self.assertIsNone(res)
self.assertLogCritical("plugin BabylonBgl does not support writing")
def test_detectOutputFormat_err4_2(self):
res = Glossary.detectOutputFormat(
filename="test.bgl",
format="",
inputFilename="",
)
self.assertIsNone(res)
self.assertLogCritical("plugin BabylonBgl does not support writing")
def test_detectOutputFormat_err5(self):
res = Glossary.detectOutputFormat(
filename="test",
format="",
inputFilename="",
)
self.assertIsNone(res)
self.assertLogCritical("Unable to detect output format!")
def test_detectOutputFormat_err6(self):
res = Glossary.detectOutputFormat(
filename="test",
format="Tabfile",
inputFilename="",
addExt=True,
)
self.assertEqual(res, ("test", "Tabfile", ""))
self.assertLogError("inputFilename is empty")
def test_init_infoBadType(self):
try:
Glossary(info=["a"])
except Exception as e:
self.assertEqual(str(type(e)), "<class 'TypeError'>")
self.assertEqual(
str(e),
"Glossary: `info` has invalid type, dict or OrderedDict expected",
)
else:
self.fail("did not raise an exception")
def test_cleanup_removed(self):
glos = Glossary()
tmpFname = "test_cleanup_removed"
entry = glos.newDataEntry(tmpFname, b"test")
tmpFpath = entry._tmpPath
self.assertTrue(bool(tmpFpath), msg="entry tmpPath is empty")
self.assertTrue(isfile(tmpFpath), msg=f"tmp file does not exist: {tmpFpath}")
rmtree(appTmpDir)
glos.cleanup()
self.assertLogError(f"no such file or directory: {appTmpDir}")
def test_lang_err_get_source(self):
glos = Glossary()
glos.setInfo("sourcelang", "test")
self.assertEqual(glos.sourceLangName, "")
self.assertLogError("unknown language 'test'")
def test_lang_err_get_target(self):
glos = Glossary()
glos.setInfo("targetlang", "test")
self.assertEqual(glos.targetLangName, "")
self.assertLogError("unknown language 'test'")
def test_lang_err_set_source(self):
glos = Glossary()
glos.sourceLangName = "foobar"
self.assertLogError("unknown language 'foobar'")
self.assertEqual(glos.sourceLangName, "")
def test_lang_err_set_target(self):
glos = Glossary()
glos.targetLangName = "foobar"
self.assertLogError("unknown language 'foobar'")
self.assertEqual(glos.targetLangName, "")
def test_lang_err_setObj_source(self):
glos = Glossary()
try:
glos.sourceLang = "foobar"
except TypeError as e:
self.assertEqual(str(e), "invalid lang='foobar', must be a Lang object")
else:
self.fail("must raise a TypeError")
def test_lang_err_setObj_target(self):
glos = Glossary()
try:
glos.targetLang = "foobar"
except TypeError as e:
self.assertEqual(str(e), "invalid lang='foobar', must be a Lang object")
else:
self.fail("must raise a TypeError")
def test_config_attr_set_twice(self):
glos = Glossary()
glos.config = {"lower": True}
self.assertEqual(glos.getConfig("lower", False), True)
glos.config = {"lower": False}
self.assertLogError("glos.config is set more than once")
self.assertEqual(glos.getConfig("lower", False), True)
def test_iter_empty(self):
glos = Glossary()
self.assertEqual(list(glos), [])
def test_convert_typeErr_1(self):
glos = Glossary()
try:
glos.convert(
inputFilename=MyStr(""),
)
except TypeError as e:
self.assertEqual(str(e), "inputFilename must be str")
else:
self.fail("must raise TypeError")
def test_convert_typeErr_2(self):
glos = Glossary()
try:
glos.convert(
inputFilename="",
outputFilename=MyStr(""),
)
except TypeError as e:
self.assertEqual(str(e), "outputFilename must be str")
else:
self.fail("must raise TypeError")
def test_convert_typeErr_3(self):
glos = Glossary()
try:
glos.convert(
inputFilename="",
outputFilename="",
inputFormat=MyStr(""),
)
except TypeError as e:
self.assertEqual(str(e), "inputFormat must be str")
else:
self.fail("must raise TypeError")
def test_convert_typeErr_4(self):
glos = Glossary()
try:
glos.convert(
inputFilename="",
outputFilename="",
inputFormat="",
outputFormat=MyStr(""),
)
except TypeError as e:
self.assertEqual(str(e), "outputFormat must be str")
else:
self.fail("must raise TypeError")
def test_read_typeErr_1(self):
glos = Glossary()
try:
glos.read(
filename=MyStr(""),
)
except TypeError as e:
self.assertEqual(str(e), "filename must be str")
else:
self.fail("must raise TypeError")
def test_read_typeErr_2(self):
glos = Glossary()
try:
glos.read(
filename="",
format=MyStr(""),
)
except TypeError as e:
self.assertEqual(str(e), "format must be str")
else:
self.fail("must raise TypeError")
def test_write_typeErr_1(self):
glos = Glossary()
try:
glos.write(
filename=MyStr(""),
format="",
)
except TypeError as e:
self.assertEqual(str(e), "filename must be str")
else:
self.fail("must raise TypeError")
def test_write_typeErr_2(self):
glos = Glossary()
try:
glos.write(
filename="",
format=MyStr(""),
)
except TypeError as e:
self.assertEqual(str(e), "format must be str")
else:
self.fail("must raise TypeError")
def test_convert_sameFilename(self):
glos = Glossary()
res = glos.convert(
inputFilename="test4.txt",
outputFilename="test4.txt",
)
self.assertIsNone(res)
self.assertLogCritical("Input and output files are the same")
def test_convert_dirExists(self):
glos = Glossary()
tempFilePath = self.newTempFilePath("test_convert_dirExists")
with open(tempFilePath, mode="w", encoding="utf-8") as _file:
_file.write("")
res = glos.convert(
inputFilename="test5.txt",
outputFilename=self.tempDir,
outputFormat="Stardict",
)
self.assertIsNone(res)
self.assertLogCritical(
f"Directory already exists and not empty: {relpath(self.tempDir)}",
)
def test_convert_fileNotFound(self):
glos = Glossary()
inputFilename = join(osRoot(), "abc", "def", "test6.txt")
res = glos.convert(
inputFilename=inputFilename,
outputFilename="test2.txt",
)
self.assertIsNone(res)
self.assertLogCritical(
f"[Errno 2] No such file or directory: {inputFilename!r}",
)
self.assertLogCritical(f"Reading file {relpath(inputFilename)!r} failed.")
def test_convert_unableDetectOutputFormat(self):
glos = Glossary()
res = glos.convert(
inputFilename="test7.txt",
outputFilename="test",
outputFormat="",
)
self.assertIsNone(res)
self.assertLogCritical("Unable to detect output format!")
self.assertLogCritical(f"Writing file {relpath('test')!r} failed.")
def test_convert_writeFileNotFound_txt(self):
outputFilename = join(
appTmpDir,
"test",
"7de8cf6f17bc4c9abb439e71adbec95d.txt",
)
glos = Glossary()
res = glos.convert(
inputFilename=self.downloadFile("100-en-fa.txt"),
outputFilename=outputFilename,
)
self.assertIsNone(res)
self.assertLogCritical(
f"[Errno 2] No such file or directory: {outputFilename!r}",
)
self.assertLogCritical(f"Writing file {relpath(outputFilename)!r} failed.")
def test_convert_writeFileNotFound_hdir(self):
outputFilename = join(osRoot(), "test", "40e20107f5b04087bfc0ec0d61510017.hdir")
glos = Glossary()
res = glos.convert(
inputFilename=self.downloadFile("100-en-fa.txt"),
outputFilename=outputFilename,
)
self.assertIsNone(res)
self.assertLogCritical(
f"{osNoSuchFileOrDir} {outputFilename!r}",
)
self.assertLogCritical(f"Writing file {relpath(outputFilename)!r} failed.")
def test_convert_invalidSortKeyName(self):
glos = self.glos = Glossary()
outputFilename = self.newTempFilePath("none.txt")
res = glos.convert(
inputFilename=self.downloadFile("100-en-fa.txt"),
outputFilename=outputFilename,
sort=True,
sortKeyName="blah",
)
self.assertIsNone(res)
self.assertLogCritical("invalid sortKeyName = 'blah'")
def test_collectDefiFormat_direct(self):
fname = "100-en-fa.txt"
glos = self.glos = Glossary()
glos.read(self.downloadFile(fname), direct=True)
res = glos.collectDefiFormat(10)
self.assertIsNone(res)
self.assertLogError("collectDefiFormat: not supported in direct mode")
def test_sortWords_invalidSortKeyName(self):
glos = self.glos = Glossary()
glos.sortWords(
sortKeyName="blah",
)
self.assertLogCritical("invalid sortKeyName = 'blah'")
if __name__ == "__main__":
unittest.main()
| 12,469
|
Python
|
.py
| 420
| 26.352381
| 82
| 0.726135
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,680
|
g_kobo_test.py
|
ilius_pyglossary/tests/g_kobo_test.py
|
import gzip
import unittest
import marisa_trie # noqa: F401, to ensure it's installed
from glossary_v2_test import TestGlossaryBase
class TestGlossaryKobo(TestGlossaryBase):
def __init__(self, *args, **kwargs):
TestGlossaryBase.__init__(self, *args, **kwargs)
# self.dataFileCRC32.update({})
def convert_txt_kobo(self, fname, sha1sumDict, **convertArgs):
outputFname = f"{fname}-2.kobo.zip"
outputFpath = self.newTempFilePath(outputFname)
# expectedFpath = self.downloadFile(f"{fname}.kobo.zip")
self.convert(
f"{fname}.txt",
outputFname,
**convertArgs,
)
dataReplaceFuncs = {
_zfname: gzip.decompress for _zfname in sha1sumDict if _zfname != "words"
}
self.checkZipFileSha1sum(
outputFpath,
sha1sumDict=sha1sumDict,
dataReplaceFuncs=dataReplaceFuncs,
)
def test_convert_txt_kobo_1(self):
sha1sumDict = {
"11.html": "39f0f46560da7398ab0d3b19cc1c2387ecd201dd",
"aa.html": "df9460450e8b46e913c57bf39dcc799ffdc2fb33",
"ab.html": "be4271a8508dbb499bafd439810af621a7b3474f",
"words": "d0f74e854f090fbaa8211bcfd162ad99ec4da0a3",
}
self.convert_txt_kobo("100-en-fa", sha1sumDict)
if __name__ == "__main__":
unittest.main()
| 1,189
|
Python
|
.py
| 35
| 30.914286
| 76
| 0.748038
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,681
|
g_stardict_sort_test.py
|
ilius_pyglossary/tests/g_stardict_sort_test.py
|
import unittest
from g_stardict_test import TestGlossaryStarDictBase
from glossary_errors_test import TestGlossaryErrorsBase
class TestGlossaryStarDictSortCustom(TestGlossaryStarDictBase):
def __init__(self, *args, **kwargs):
TestGlossaryErrorsBase.__init__(self, *args, **kwargs)
self.dataFileCRC32.update(
{
"100-en-fa.sd/100-en-fa.dict": "223a0d1d",
"100-en-fa.sd/100-en-fa.idx": "6df43378",
"100-en-fa.sd/100-en-fa.ifo": "3f2086cd",
"100-en-fa.sd/100-en-fa.syn": "1160fa0b",
"100-en-fa-sd.txt": "85f9d3fc",
},
)
def convert_txt_stardict_enfa(
self,
fname,
**convertArgs,
):
self.convert_txt_stardict(
fname,
config={"enable_alts": True},
info={
"sourceLang": "English",
"targetLang": "Persian",
},
**convertArgs,
)
def convert_txt_stardict_enfa_1(self):
sortKeyName = "headword"
self.convert_txt_stardict_enfa(
"100-en-fa",
sortKeyName=sortKeyName,
sqlite=True,
)
self.assertLogWarning(
f"Ignoring user-defined sort order {sortKeyName!r}"
", and using sortKey function from Stardict plugin",
)
def test_convert_txt_stardict_enfa_2(self):
sortKeyName = "ebook"
self.convert_txt_stardict_enfa(
"100-en-fa",
sortKeyName=sortKeyName,
sqlite=False,
)
self.assertLogWarning(
f"Ignoring user-defined sort order {sortKeyName!r}"
", and using sortKey function from Stardict plugin",
)
def test_convert_txt_stardict_enfa_3(self):
sortKeyName = "stardict:en_US.UTF-8"
self.convert_txt_stardict_enfa(
"100-en-fa",
sortKeyName=sortKeyName,
sqlite=True,
)
self.assertLogWarning(
f"Ignoring user-defined sort order {sortKeyName!r}"
", and using sortKey function from Stardict plugin",
)
def test_convert_txt_stardict_enfa_4(self):
sortKeyName = "stardict:fa_IR.UTF-8"
self.convert_txt_stardict_enfa(
"100-en-fa",
sortKeyName=sortKeyName,
sqlite=False,
)
self.assertLogWarning(
f"Ignoring user-defined sort order {sortKeyName!r}"
", and using sortKey function from Stardict plugin",
)
if __name__ == "__main__":
unittest.main()
| 2,099
|
Python
|
.py
| 75
| 24.546667
| 63
| 0.713505
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,682
|
g_jmdict_test.py
|
ilius_pyglossary/tests/g_jmdict_test.py
|
import unittest
from glossary_v2_test import TestGlossaryBase
class TestGlossaryJMdict(TestGlossaryBase):
def __init__(self, *args, **kwargs):
TestGlossaryBase.__init__(self, *args, **kwargs)
self.dataFileCRC32.update(
{
"050-JMdict-English": "aec9ad8c",
"050-JMdict-English-v3.txt": "6068b9a7",
},
)
def convert_jmdict_txt(self, fname, fname2, **convertArgs):
self.convert(
fname,
f"{fname}-2.txt",
compareText=f"{fname2}.txt",
inputFormat="JMDict",
**convertArgs,
)
# with lxml==5.3.0, for "bword://{word}", `word` is not unicode-escaped by lxml
# while lxml < 5.3.0 does escape these unicode characters
# that's why 050-JMdict-English-v2 was updated to 050-JMdict-English-v3
def test_convert_jmdict_txt_1(self):
self.convert_jmdict_txt(
"050-JMdict-English",
"050-JMdict-English-v3",
)
if __name__ == "__main__":
unittest.main()
| 896
|
Python
|
.py
| 29
| 27.689655
| 80
| 0.698487
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,683
|
g_stardict_test.py
|
ilius_pyglossary/tests/g_stardict_test.py
|
import sys
import unittest
from os.path import abspath, dirname, relpath
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
from glossary_errors_test import TestGlossaryErrorsBase
from pyglossary.glossary import Glossary
__all__ = ["TestGlossaryStarDictBase"]
class TestGlossaryStarDictBase(TestGlossaryErrorsBase):
def convert_txt_stardict( # noqa: PLR0913
self,
fname,
fname2="",
syn=True,
dictzip=False,
config=None,
rawEntryCompress=None,
writeOptions=None,
info=None,
**convertArgs,
):
if not fname2:
fname2 = fname
binExtList = ["idx", "dict"]
if syn:
binExtList.append("syn")
inputFilename = self.downloadFile(f"{fname}.txt")
outputFilename = self.newTempFilePath(f"{fname}.ifo")
otherFiles = {ext: self.newTempFilePath(f"{fname}.{ext}") for ext in binExtList}
glos = self.glos = Glossary(info=info)
if config is not None:
glos.config = config
if rawEntryCompress is not None:
glos.setRawEntryCompress(rawEntryCompress)
if writeOptions is None:
writeOptions = {}
writeOptions["dictzip"] = dictzip
result = glos.convert(
inputFilename=inputFilename,
outputFilename=outputFilename,
writeOptions=writeOptions,
**convertArgs,
)
self.assertEqual(outputFilename, result)
self.compareTextFiles(
outputFilename,
self.downloadFile(f"{fname2}.sd/{fname2}.ifo"),
)
for ext in binExtList:
self.compareBinaryFiles(
otherFiles[ext],
self.downloadFile(f"{fname2}.sd/{fname2}.{ext}"),
)
def convert_txt_stardict_zip( # noqa: PLR0913
self,
fname,
sha1sumDict,
dictzip=False,
config=None,
rawEntryCompress=None,
**convertArgs,
):
inputFilename = self.downloadFile(f"{fname}.txt")
outputFilename = self.newTempFilePath(f"{fname}.zip")
glos = self.glos = Glossary()
if config is not None:
glos.config = config
if rawEntryCompress is not None:
glos.setRawEntryCompress(rawEntryCompress)
result = glos.convert(
inputFilename=inputFilename,
outputFilename=outputFilename,
outputFormat="Stardict",
writeOptions={
"dictzip": dictzip,
},
**convertArgs,
)
self.assertEqual(outputFilename, result)
self.checkZipFileSha1sum(
outputFilename,
sha1sumDict=sha1sumDict,
)
def convert_stardict_txt(
self,
inputFname: str,
outputFname: str,
testId: str,
syn=True,
**convertArgs,
):
binExtList = ["idx", "dict"]
if syn:
binExtList.append("syn")
for ext in binExtList:
self.downloadFile(f"{inputFname}.sd/{inputFname}.{ext}")
inputFilename = self.downloadFile(f"{inputFname}.sd/{inputFname}.ifo")
outputFilename = self.newTempFilePath(
f"{inputFname}-{testId}.txt",
)
expectedFilename = self.downloadFile(f"{outputFname}.txt")
glos = self.glos = Glossary()
result = glos.convert(
inputFilename=inputFilename,
outputFilename=outputFilename,
**convertArgs,
)
self.assertEqual(outputFilename, result)
self.compareTextFiles(outputFilename, expectedFilename)
class TestGlossaryStarDict(TestGlossaryStarDictBase):
def __init__(self, *args, **kwargs):
TestGlossaryErrorsBase.__init__(self, *args, **kwargs)
self.dataFileCRC32.update(
{
"004-bar.sd/004-bar.dict": "9ea397f8",
"004-bar.sd/004-bar.idx": "cf9440cf",
"004-bar.sd/004-bar.ifo": "ada870e4",
"004-bar.sd/004-bar.syn": "286b17bf",
"100-en-de-v4.sd/100-en-de-v4.dict": "5a97476f",
"100-en-de-v4.sd/100-en-de-v4.idx": "a99f29d2",
"100-en-de-v4.sd/100-en-de-v4.ifo": "6529871f",
"100-en-fa.sd/100-en-fa.dict": "223a0d1d",
"100-en-fa.sd/100-en-fa.idx": "6df43378",
"100-en-fa.sd/100-en-fa.ifo": "3f2086cd",
"100-en-fa.sd/100-en-fa.syn": "1160fa0b",
"100-en-fa-sd.txt": "85f9d3fc",
# FIXME: remove empty description line from 100-en-fa.ifo
# stardict-mixed-types-1.ifo, "stardict-mixed-types-2.ifo
"100-en-fa-merge-syns.sd/100-en-fa-merge-syns.dict": "223a0d1d",
"100-en-fa-merge-syns.sd/100-en-fa-merge-syns.idx": "13f1c7af",
"100-en-fa-merge-syns.sd/100-en-fa-merge-syns.ifo": "07338eed",
"100-ja-en.sd/100-ja-en.dict": "39715f01",
"100-ja-en.sd/100-ja-en.idx": "adf0e552",
"100-ja-en.sd/100-ja-en.ifo": "b01e368c",
"100-ja-en.sd/100-ja-en.syn": "76e6df95",
"300-ru-en.txt": "77cfee2f",
"300-ru-en.sd/300-ru-en.dict": "8be7fa4c",
"300-ru-en.sd/300-ru-en.idx": "1cd30f1a",
"300-ru-en.sd/300-ru-en.ifo": "0b135812",
"300-ru-en.sd/300-ru-en.syn": "87ee3372",
"stardict-mixed-types-2.sd/stardict-mixed-types-2.dict": "2e43237a",
"stardict-mixed-types-2.sd/stardict-mixed-types-2.idx": "65a1f9fc",
"stardict-mixed-types-2.sd/stardict-mixed-types-2.ifo": "e1063b84",
"stardict-mixed-types-2.sd.txt": "94de4bc6",
"002-plain-html.txt": "75484314",
"002-plain-html.sd/002-plain-html.dict": "2e9d20d8",
"002-plain-html.sd/002-plain-html.idx": "3956ad72",
"002-plain-html.sd/002-plain-html.ifo": "1991f125",
"004-plain-html-alts.txt": "505d4675",
"004-plain-html-alts.sd/004-plain-html-alts.dict": "889f11f8",
"004-plain-html-alts.sd/004-plain-html-alts.idx": "edbe368d",
"004-plain-html-alts.sd/004-plain-html-alts.ifo": "b9b92fa3",
"004-plain-html-alts.sd/004-plain-html-alts.syn": "c07f7111",
"004-plain-html-alts-merge-syns.sd/"
"004-plain-html-alts-merge-syns.dict": "889f11f8",
"004-plain-html-alts-merge-syns.sd/"
"004-plain-html-alts-merge-syns.idx": "092ba555",
"004-plain-html-alts-merge-syns.sd/"
"004-plain-html-alts-merge-syns.ifo": "628abe99",
},
)
def test_convert_txt_stardict_0(self):
self.convert_txt_stardict(
"100-en-fa",
config={"auto_sqlite": True},
direct=True,
)
def test_convert_txt_stardict_1(self):
for sqlite in (None, False, True):
for rawEntryCompress in (None, True, False):
self.convert_txt_stardict(
"100-en-fa",
rawEntryCompress=rawEntryCompress,
sqlite=sqlite,
)
def test_convert_txt_stardict_1_merge_syns(self):
self.convert_txt_stardict(
"100-en-fa",
fname2="100-en-fa-merge-syns",
syn=False,
writeOptions={"merge_syns": True},
)
def test_convert_txt_stardict_1_zip(self):
sha1sumDict = {
"100-en-fa.dict": "1e462e829f9e2bf854ceac2ef8bc55911460c79e",
"100-en-fa.idx": "943005945b35abf3a3e7b80375c76daa87e810f0",
"100-en-fa.ifo": "3e982a76f83eef66a8d4915e7a0018746f4180bc",
"100-en-fa.syn": "fcefc76628fed18b84b9aa83cd7139721b488545",
}
for sqlite in (None, False, True):
self.convert_txt_stardict_zip(
"100-en-fa",
sha1sumDict=sha1sumDict,
sqlite=sqlite,
)
def test_convert_txt_stardict_2(self):
for sqlite in (None, False, True):
for rawEntryCompress in (None, True, False):
self.convert_txt_stardict(
"004-bar",
rawEntryCompress=rawEntryCompress,
sqlite=sqlite,
)
def test_convert_txt_stardict_3(self):
for sqlite in (None, False, True):
self.convert_txt_stardict(
"100-en-de-v4",
syn=False,
sqlite=sqlite,
)
def test_convert_txt_stardict_3_merge_syns(self):
self.convert_txt_stardict(
"100-en-de-v4",
syn=False,
writeOptions={"merge_syns": True},
)
def test_convert_txt_stardict_4(self):
for sqlite in (None, False, True):
self.convert_txt_stardict(
"100-ja-en",
syn=True,
sqlite=sqlite,
)
def test_convert_txt_stardict_5(self):
for sqlite in (None, False, True):
self.convert_txt_stardict(
"300-ru-en",
syn=True,
sqlite=sqlite,
)
def test_convert_txt_stardict_sqlite_no_alts(self):
self.convert_txt_stardict(
"100-en-fa",
config={"enable_alts": False},
sqlite=True,
)
self.assertLogWarning(
"SQLite mode only works with enable_alts=True, force-enabling it.",
)
def test_convert_stardict_txt_1(self):
self.convert_stardict_txt(
"100-en-fa",
"100-en-fa-sd",
"1",
)
def test_convert_stardict_txt_mixed_types_1(self):
self.convert_stardict_txt(
"stardict-mixed-types-2",
"stardict-mixed-types-2.sd",
"mixed-types-1",
syn=False,
)
def test_convert_stardict_txt_mixed_types_2(self):
self.convert_stardict_txt(
"stardict-mixed-types-2",
"stardict-mixed-types-2.sd",
"mixed-types-1",
syn=False,
readOptions={"xdxf_to_html": False},
)
def test_convert_txt_stardict_general_1(self):
self.convert_txt_stardict(
"002-plain-html",
syn=False,
)
def test_convert_txt_stardict_general_1_merge_syns(self):
self.convert_txt_stardict(
"002-plain-html",
syn=False,
writeOptions={"merge_syns": True},
)
def test_convert_txt_stardict_general_2(self):
self.convert_txt_stardict(
"004-plain-html-alts",
syn=True,
)
def test_convert_txt_stardict_general_2_merge_syns(self):
self.convert_txt_stardict(
"004-plain-html-alts",
fname2="004-plain-html-alts-merge-syns",
syn=False,
writeOptions={"merge_syns": True},
)
class TestGlossaryErrorsStarDict(TestGlossaryErrorsBase):
def __init__(self, *args, **kwargs):
TestGlossaryErrorsBase.__init__(self, *args, **kwargs)
def test_convert_from_stardict_invalid_sametypesequence(self):
fname = "foobar"
inputFilename = self.newTempFilePath(f"{fname}.ifo")
outputFilename = self.newTempFilePath(f"{fname}.txt")
with open(inputFilename, mode="w", encoding="utf-8") as _file:
_file.write(
"""StarDict's dict ifo file
version=3.0.0
bookname=Test
wordcount=123
idxfilesize=1234
sametypesequence=abcd
""",
)
glos = self.glos = Glossary()
result = glos.convert(
inputFilename=inputFilename,
outputFilename=outputFilename,
)
self.assertIsNone(result)
self.assertLogCritical("Invalid sametypesequence = 'abcd'")
self.assertLogCritical(f"Reading file {relpath(inputFilename)!r} failed.")
if __name__ == "__main__":
unittest.main()
| 9,753
|
Python
|
.py
| 310
| 27.73871
| 82
| 0.706816
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,684
|
glossary_v2_errors_test.py
|
ilius_pyglossary/tests/glossary_v2_errors_test.py
|
import logging
import os
import sys
import unittest
from os.path import abspath, dirname, isfile, join, relpath
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
from glossary_v2_test import TestGlossaryBase, appTmpDir
from pyglossary.core_test import getMockLogger
from pyglossary.glossary_v2 import ConvertArgs, Error, Glossary
from pyglossary.os_utils import rmtree
__all__ = ["TestGlossaryErrorsBase"]
Glossary.init()
class MyStr(str):
__slots__ = []
class TestGlossaryErrorsBase(TestGlossaryBase):
def __init__(self, *args, **kwargs):
TestGlossaryBase.__init__(self, *args, **kwargs)
self.mockLog = getMockLogger()
def setUp(self):
TestGlossaryBase.setUp(self)
self.mockLog.clear()
def tearDown(self):
TestGlossaryBase.tearDown(self)
method = self._testMethodName
self.assertEqual(0, self.mockLog.printRemainingErrors(method))
warnCount = self.mockLog.printRemainingwWarnings(method)
if warnCount > 0:
print(
f"Got {warnCount} unhandled warnings "
f"from {self.__class__.__name__}: {self._testMethodName}\n",
)
def assertLogCritical(self, errorMsg):
self.assertIsNotNone(
self.mockLog.popLog(
logging.CRITICAL,
errorMsg,
),
msg=f"did not find critical log {errorMsg!r}",
)
def assertLogError(self, errorMsg):
self.assertIsNotNone(
self.mockLog.popLog(
logging.ERROR,
errorMsg,
),
msg=f"did not find error log {errorMsg!r}",
)
def assertLogWarning(self, errorMsg):
self.assertIsNotNone(
self.mockLog.popLog(
logging.WARNING,
errorMsg,
),
msg=f"did not find warning log {errorMsg!r}",
)
def osRoot():
if os.sep == "\\":
return "C:\\"
return "/"
if os.sep == "\\":
osNoSuchFileOrDir = "[WinError 3] The system cannot find the path specified:"
else:
osNoSuchFileOrDir = "[Errno 2] No such file or directory:"
class TestGlossaryErrors(TestGlossaryErrorsBase):
def test_loadPlugins_invalidDir(self):
path = join(osRoot(), "abc", "def", "ghe")
Glossary.loadPlugins(path)
self.assertLogCritical(f"Invalid plugin directory: {path!r}")
def test_detectInputFormat_err1(self):
err = None
try:
Glossary.detectInputFormat(
filename="",
format="",
)
except Error as e:
err = str(e)
self.assertEqual(err, "Unable to detect input format!")
def test_detectInputFormat_err2(self):
err = None
try:
Glossary.detectInputFormat(
filename="test.abcd",
format="",
)
except Error as e:
err = str(e)
self.assertEqual(err, "Unable to detect input format!")
def test_detectInputFormat_err3(self):
err = None
try:
Glossary.detectInputFormat(
filename="test.sql",
format="",
)
except Error as e:
err = str(e)
self.assertEqual(err, "plugin Sql does not support reading")
def test_detectInputFormat_err4(self):
err = None
try:
Glossary.detectInputFormat(
filename="test",
format="FooBar",
)
except Error as e:
err = str(e)
self.assertEqual(err, "Invalid format 'FooBar'")
def test_detectInputFormat_ok1(self):
res = Glossary.detectInputFormat(
filename="test1.txt.gz",
format="",
)
self.assertEqual(res, ("test1.txt.gz", "Tabfile", ""))
def test_detectInputFormat_ok2(self):
res = Glossary.detectInputFormat(
filename="test2.txt.zip",
format="",
)
self.assertEqual(res, ("test2.txt", "Tabfile", "zip"))
def test_detectOutputFormat_err1(self):
err = None
try:
Glossary.detectOutputFormat(
filename="",
format="",
inputFilename="",
)
except Error as e:
err = str(e)
self.assertEqual(err, "Invalid filename ''")
def test_detectOutputFormat_err2(self):
try:
Glossary.detectOutputFormat(
filename="test",
format="FooBar",
inputFilename="",
)
except Error as e:
err = str(e)
self.assertEqual(err, "Invalid format FooBar")
def test_detectOutputFormat_err3(self):
err = None
try:
Glossary.detectOutputFormat(
filename="",
format="",
inputFilename="test",
)
except Error as e:
err = str(e)
self.assertEqual(err, "No filename nor format is given for output file")
def test_detectOutputFormat_err4_1(self):
err = None
try:
Glossary.detectOutputFormat(
filename="",
format="BabylonBgl",
inputFilename="test3.txt",
)
except Error as e:
err = str(e)
self.assertEqual(err, "plugin BabylonBgl does not support writing")
def test_detectOutputFormat_err4_2(self):
err = None
try:
Glossary.detectOutputFormat(
filename="test.bgl",
format="",
inputFilename="",
)
except Error as e:
err = str(e)
self.assertEqual(err, "plugin BabylonBgl does not support writing")
def test_detectOutputFormat_err5(self):
err = None
try:
Glossary.detectOutputFormat(
filename="test",
format="",
inputFilename="",
)
except Error as e:
err = str(e)
self.assertEqual(err, "Unable to detect output format!")
def test_detectOutputFormat_err6(self):
res = Glossary.detectOutputFormat(
filename="test",
format="Tabfile",
inputFilename="",
addExt=True,
)
self.assertEqual(res, ("test", "Tabfile", ""))
self.assertLogError("inputFilename is empty")
def test_init_infoBadType(self):
try:
Glossary(info=["a"])
except Exception as e:
self.assertEqual(str(type(e)), "<class 'TypeError'>")
self.assertEqual(
str(e),
"Glossary: `info` has invalid type, dict or OrderedDict expected",
)
else:
self.fail("did not raise an exception")
def test_cleanup_removed(self):
glos = Glossary()
tmpFname = "test_cleanup_removed"
entry = glos.newDataEntry(tmpFname, b"test")
tmpFpath = entry._tmpPath
self.assertTrue(bool(tmpFpath), msg="entry tmpPath is empty")
self.assertTrue(isfile(tmpFpath), msg=f"tmp file does not exist: {tmpFpath}")
rmtree(appTmpDir)
glos.cleanup()
self.assertLogError(f"no such file or directory: {appTmpDir}")
def test_lang_err_get_source(self):
glos = Glossary()
glos.setInfo("sourcelang", "test")
self.assertEqual(glos.sourceLangName, "")
self.assertLogError("unknown language 'test'")
def test_lang_err_get_target(self):
glos = Glossary()
glos.setInfo("targetlang", "test")
self.assertEqual(glos.targetLangName, "")
self.assertLogError("unknown language 'test'")
def test_lang_err_set_source(self):
glos = Glossary()
glos.sourceLangName = "foobar"
self.assertLogError("unknown language 'foobar'")
self.assertEqual(glos.sourceLangName, "")
def test_lang_err_set_target(self):
glos = Glossary()
glos.targetLangName = "foobar"
self.assertLogError("unknown language 'foobar'")
self.assertEqual(glos.targetLangName, "")
def test_lang_err_setObj_source(self):
glos = Glossary()
try:
glos.sourceLang = "foobar"
except TypeError as e:
self.assertEqual(str(e), "invalid lang='foobar', must be a Lang object")
else:
self.fail("must raise a TypeError")
def test_lang_err_setObj_target(self):
glos = Glossary()
try:
glos.targetLang = "foobar"
except TypeError as e:
self.assertEqual(str(e), "invalid lang='foobar', must be a Lang object")
else:
self.fail("must raise a TypeError")
def test_config_attr_set_twice(self):
glos = Glossary()
glos.config = {"lower": True}
self.assertEqual(glos.getConfig("lower", False), True)
glos.config = {"lower": False}
self.assertLogError("glos.config is set more than once")
self.assertEqual(glos.getConfig("lower", False), True)
def test_iter_empty(self):
glos = Glossary()
self.assertEqual(list(glos), [])
def test_convert_typeErr_1(self):
glos = Glossary()
try:
glos.convert(
ConvertArgs(
inputFilename=MyStr(""),
),
)
except TypeError as e:
self.assertEqual(str(e), "inputFilename must be str")
else:
self.fail("must raise TypeError")
def test_convert_typeErr_2(self):
glos = Glossary()
try:
glos.convert(
ConvertArgs(
inputFilename="",
outputFilename=MyStr(""),
),
)
except TypeError as e:
self.assertEqual(str(e), "outputFilename must be str")
else:
self.fail("must raise TypeError")
def test_convert_typeErr_3(self):
glos = Glossary()
try:
glos.convert(
ConvertArgs(
inputFilename="",
outputFilename="",
inputFormat=MyStr(""),
),
)
except TypeError as e:
self.assertEqual(str(e), "inputFormat must be str")
else:
self.fail("must raise TypeError")
def test_convert_typeErr_4(self):
glos = Glossary()
try:
glos.convert(
ConvertArgs(
inputFilename="",
outputFilename="",
inputFormat="",
outputFormat=MyStr(""),
),
)
except TypeError as e:
self.assertEqual(str(e), "outputFormat must be str")
else:
self.fail("must raise TypeError")
def test_write_typeErr_1(self):
glos = Glossary()
try:
glos.write(
filename=MyStr(""),
format="",
)
except TypeError as e:
self.assertEqual(str(e), "filename must be str")
else:
self.fail("must raise TypeError")
def test_write_typeErr_2(self):
glos = Glossary()
try:
glos.write(
filename="",
format=MyStr(""),
)
except TypeError as e:
self.assertEqual(str(e), "format must be str")
else:
self.fail("must raise TypeError")
def test_convert_sameFilename(self):
glos = Glossary()
err = None
try:
glos.convert(
ConvertArgs(
inputFilename="test4.txt",
outputFilename="test4.txt",
),
)
except Error as e:
err = str(e)
self.assertEqual(err, "Input and output files are the same")
def test_convert_dirExists(self):
glos = Glossary()
tempFilePath = self.newTempFilePath("test_convert_dirExists")
with open(tempFilePath, mode="w", encoding="utf-8") as _file:
_file.write("")
err = None
try:
glos.convert(
ConvertArgs(
inputFilename="test5.txt",
outputFilename=self.tempDir,
outputFormat="Stardict",
),
)
except Error as e:
err = str(e)
self.assertEqual(
err,
f"Directory already exists and not empty: {relpath(self.tempDir)}",
)
def test_convert_fileNotFound(self):
glos = Glossary()
inputFilename = join(osRoot(), "abc", "def", "test6.txt")
err = None
try:
glos.convert(
ConvertArgs(
inputFilename=inputFilename,
outputFilename="test2.txt",
),
)
except Error as e:
err = str(e)
self.assertEqual(
err,
f"[Errno 2] No such file or directory: {inputFilename!r}",
)
# self.assertLogCritical(f"Reading file {relpath(inputFilename)!r} failed.")
def test_convert_unableDetectOutputFormat(self):
glos = Glossary()
err = None
try:
glos.convert(
ConvertArgs(
inputFilename="test7.txt",
outputFilename="test",
outputFormat="",
),
)
except Error as e:
err = str(e)
self.assertEqual(err, "Unable to detect output format!")
# self.assertLogCritical(f"Writing file {relpath('test')!r} failed.")
def test_convert_writeFileNotFound_txt(self):
outputFilename = join(
appTmpDir,
"test",
"7de8cf6f17bc4c9abb439e71adbec95d.txt",
)
glos = Glossary()
err = None
try:
glos.convert(
ConvertArgs(
inputFilename=self.downloadFile("100-en-fa.txt"),
outputFilename=outputFilename,
),
)
except Error as e:
err = str(e)
self.assertEqual(
err,
f"[Errno 2] No such file or directory: {outputFilename!r}",
)
# self.assertLogCritical(f"Writing file {relpath(outputFilename)!r} failed.")
def test_convert_writeFileNotFound_hdir(self):
outputFilename = join(osRoot(), "test", "40e20107f5b04087bfc0ec0d61510017.hdir")
glos = Glossary()
err = None
try:
glos.convert(
ConvertArgs(
inputFilename=self.downloadFile("100-en-fa.txt"),
outputFilename=outputFilename,
),
)
except Error as e:
err = str(e)
self.assertEqual(
err,
f"{osNoSuchFileOrDir} {outputFilename!r}",
)
# self.assertLogCritical(f"Writing file {relpath(outputFilename)!r} failed.")
def test_convert_invalidSortKeyName(self):
glos = self.glos = Glossary()
outputFilename = self.newTempFilePath("none.txt")
err = None
try:
glos.convert(
ConvertArgs(
inputFilename=self.downloadFile("100-en-fa.txt"),
outputFilename=outputFilename,
sort=True,
sortKeyName="blah",
),
)
except Error as e:
err = str(e)
self.assertEqual(err, "invalid sortKeyName = 'blah'")
# def test_collectDefiFormat_direct(self):
# from pyglossary.glossary import Glossary as GlossaryLegacy
# fname = "100-en-fa.txt"
# glos = self.glos = GlossaryLegacy()
# glos.read(self.downloadFile(fname), direct=True)
# res = glos.collectDefiFormat(10)
# self.assertIsNone(res)
# self.assertLogError("collectDefiFormat: not supported in direct mode")
if __name__ == "__main__":
unittest.main()
| 12,720
|
Python
|
.py
| 470
| 23.434043
| 82
| 0.702594
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,685
|
ebook_kobo_test.py
|
ilius_pyglossary/tests/ebook_kobo_test.py
|
# -*- coding: utf-8 -*-
import sys
import unittest
from os.path import abspath, dirname
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
from pyglossary.glossary_v2 import Glossary
from pyglossary.plugins.ebook_kobo import (
Writer,
)
class GetPrefixTest(unittest.TestCase):
def case(self, word, prefix):
glos = Glossary()
w = Writer(glos)
self.assertEqual(
w.get_prefix(word),
prefix,
)
def test_examples(self):
# examples from https://pgaskin.net/dictutil/dicthtml/prefixes.html
self.case("test", "te")
self.case("a", "aa")
self.case("Èe", "èe")
self.case("multiple words", "mu")
self.case("àççèñts", "àç")
self.case("à", "àa")
self.case("ç", "ça")
self.case("", "11")
self.case(" ", "11")
self.case(" x", "xa")
self.case(" 123", "11")
self.case("x 23", "xa")
self.case("д ", "д")
self.case("дaд", "дa")
self.case("未未", "未未")
self.case("未", "未a")
self.case(" 未", "11")
self.case(" 未", "未a")
# the rest of test cases are from
# https://github.com/pgaskin/dictutil/blob/master/kobodict/util_test.go
def test_dicthtml_en(self):
self.case("a-", "11")
self.case("-an", "11")
self.case("GB", "gb")
def test_dicthtml_fr(self):
self.case("ébahir", "éb")
self.case("a1", "11")
self.case("ô", "ôa")
self.case("kébab", "ké")
self.case("aérer", "aé")
self.case("living-room", "li")
# dicthtml-ja
# Note, Kanji not currently implemented, so not testing (note, the logic
# is in a separate function, anyways).
# self.case("あ", "あ")
# self.case("アークとう", "アー")
def test_dictword_spaces(self):
# generated by dictword-test: spaces
self.case(" ", "11")
self.case(" ", "11")
self.case("\t\t", "11")
self.case("\t\f\t", "11")
self.case("x ", "xa")
self.case(" xx", "xa")
# generated by dictword-test: spaces where trim/prefix order matters
self.case(" x", "11")
self.case(" xy", "11")
self.case(" xyz", "11")
self.case("x z", "xa")
def test_dictword_cyrillic(self):
# generated by dictword-test: cyrillic
self.case(" д", "д")
self.case(" дд", "д")
self.case("д", "д")
self.case("aд", "aд")
self.case("дa", "дa")
self.case("aдa", "aд")
def test_dictword_uppercase_accented(self):
# generated by dictword-test: uppercase accented letters
self.case("Ȅe", "ȅe")
self.case("eȄ", "eȅ")
self.case("Ȅ", "ȅa")
self.case("Ȅ!", "11")
def test_dictword_cjk(self):
# generated by dictword-test: cjk
self.case("x未", "x未")
self.case("未x", "未x")
self.case("xy未", "xy")
self.case("还没", "还没")
def test_dictword_misc(self):
# generated by dictword-test: misc
self.case("!", "11")
self.case("!!", "11")
self.case("!!!", "11")
self.case("x!", "11")
self.case("x!!", "11")
self.case("xx!", "xx")
self.case("xxx!", "xx")
self.case(" !", "11")
self.case(" !!", "11")
self.case(" !!!", "11")
self.case(" !", "11")
self.case(" !!", "11")
self.case(" !!!", "11")
self.case(" x!", "xa")
self.case(" x!!", "xa")
self.case(" xx!", "xa")
self.case(" xxx!", "xa")
def test_synthetic(self):
self.case("x\x00y", "xa")
self.case("\x00xy", "11")
if __name__ == "__main__":
unittest.main()
| 3,278
|
Python
|
.py
| 113
| 25.292035
| 73
| 0.610181
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,686
|
apple_utils_test.py
|
ilius_pyglossary/tests/apple_utils_test.py
|
import sys
import unittest
from os.path import abspath, dirname
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
from pyglossary.apple_utils import substituteAppleCSS
class Test_substituteAppleCSS(unittest.TestCase):
def test_remove(self):
css = b""".test { -webkit-text-combine: horizontal; color: black }
.test2 {
-apple-color-filter: none;
}"""
fixed_expected = b""".test {color: black }
.test2 {
}"""
fixed_actual = substituteAppleCSS(css)
self.assertEqual(fixed_actual, fixed_expected)
def test_1(self):
css = b"""html.apple_display-separateview
{
-webkit-column-width: 25em;
-webkit-column-rule-color: LightGrey;
-webkit-column-rule-style: solid;
-webkit-column-rule-width: 1px;
}
span.sn
{
-webkit-text-combine: horizontal;
vertical-align: -6%;
}
"""
fixed_expected = b"""html.apple_display-separateview
{
column-width: 25em;
column-rule-color: LightGrey;
column-rule-style: solid;
column-rule-width: 1px;
}
span.sn
{
vertical-align: -6%;
}
"""
fixed_actual = substituteAppleCSS(css)
self.assertEqual(fixed_actual, fixed_expected)
if __name__ == "__main__":
unittest.main()
| 1,145
|
Python
|
.py
| 47
| 22.531915
| 68
| 0.745638
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,687
|
g_lingoes_ldf_test.py
|
ilius_pyglossary/tests/g_lingoes_ldf_test.py
|
import unittest
from glossary_v2_test import TestGlossaryBase
class TestGlossaryLingoesLDF(TestGlossaryBase):
def __init__(self, *args, **kwargs):
TestGlossaryBase.__init__(self, *args, **kwargs)
self.dataFileCRC32.update(
{
"004-bar.ldf": "b1aa776d",
"100-en-fa.ldf": "503d1a9b",
},
)
def convert_txt_ldf(self, fname, fname2, **convertArgs):
self.convert(
f"{fname}.txt",
f"{fname}-2.ldf",
compareText=f"{fname2}.ldf",
**convertArgs,
)
def convert_ldf_txt(self, fname, fname2, **convertArgs):
self.convert(
f"{fname}.ldf",
f"{fname}-2.txt",
compareText=f"{fname2}.txt",
**convertArgs,
)
def test_convert_txt_ldf_1(self):
self.convert_txt_ldf(
"004-bar",
"004-bar",
)
def test_convert_txt_ldf_2(self):
self.convert_txt_ldf(
"100-en-fa",
"100-en-fa",
)
def test_convert_ldf_txt_1(self):
self.convert_ldf_txt(
"004-bar",
"004-bar",
infoOverride={
"name": None,
"input_file_size": None,
},
)
if __name__ == "__main__":
unittest.main()
| 1,046
|
Python
|
.py
| 46
| 19.26087
| 57
| 0.648129
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,688
|
g_appledict_bin_test.py
|
ilius_pyglossary/tests/g_appledict_bin_test.py
|
import sys
import unittest
from os.path import abspath, dirname, join
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
from glossary_v2_test import TestGlossaryBase
from pyglossary.glossary import Glossary
class TestGlossaryAppleDictBin(TestGlossaryBase):
def __init__(self, *args, **kwargs):
TestGlossaryBase.__init__(self, *args, **kwargs)
hashDict = {
"appledict-bin/002-simple.txt": "32a1dbc4",
"appledict-bin/002-simple.txt_res/style.css": "a83210cb",
"appledict-bin/006-en-oxfjord_v10.11_c2_t3.txt": "2d3844bf",
"appledict-bin/006-en-oxfjord_v10.11_c2_t3.txt_res/style.css": "14c3cf2c",
}
self.dataFileCRC32.update(hashDict)
self.addDirCRC32(
"appledict-bin/002-simple.dictionary",
{
"Contents/Info.plist": "fa73dd65",
"Contents/Body.data": "3c073986",
"Contents/DefaultStyle.css": "a83210cb",
"Contents/EntryID.data": "37305249",
"Contents/EntryID.index": "8c30a3fa",
"Contents/Images/_internal_dictionary.png": "da4d4eb1",
"Contents/KeyText.data": "aefe15e0",
"Contents/KeyText.index": "b723c5b2",
"Contents/MyDictionary.xsl": "023de1ea",
"Contents/MyDictionary_prefs.html": "09a9f6e9",
},
)
self.addDirCRC32(
"appledict-bin/006-en-oxfjord_v10.11_c2_t3.dictionary",
{
"Contents/Info.plist": "328abb6f",
"Contents/Resources/Body.data": "03fe72e8",
"Contents/Resources/DefaultStyle.css": "c243b56a",
"Contents/Resources/EntryID.data": "d31adec1",
"Contents/Resources/EntryID.index": "6eea272c",
"Contents/Resources/KeyText.data": "d4417c62",
"Contents/Resources/KeyText.index": "59f9ab67",
},
)
def convert_appledict_binary_to_txt(
self,
baseName: str,
files: list[str],
html_full: bool = False,
resFiles: "dict[str, str] | None" = None,
):
if resFiles is None:
resFiles = {}
self.glos = Glossary()
inputDirPath = self.downloadDir(
f"appledict-bin/{baseName}.dictionary",
files,
)
outputFilePath = self.newTempFilePath(f"{baseName}.txt")
expectedOutputFilePath = self.downloadFile(
f"appledict-bin/{baseName}.txt",
)
expectedStylePath = self.downloadFile(
f"appledict-bin/{baseName}.txt_res/style.css",
)
result = self.glos.convert(
inputFilename=inputDirPath,
outputFilename=outputFilePath,
inputFormat="AppleDictBin",
outputFormat="Tabfile",
readOptions={
"html_full": html_full,
},
)
self.assertIsNotNone(result)
self.assertEqual(result, outputFilePath)
self.compareTextFiles(
outputFilePath,
expectedOutputFilePath,
)
self.compareTextFiles(
join(outputFilePath + "_res", "style.css"),
expectedStylePath,
)
for relPath, inputRelPath in resFiles.items():
self.compareBinaryFiles(
join(outputFilePath + "_res", relPath),
join(inputDirPath, inputRelPath),
)
def test_appledict_binary_to_txt_0(self):
baseName = "002-simple"
files = [
"Contents/Body.data",
"Contents/DefaultStyle.css",
"Contents/EntryID.data",
"Contents/EntryID.index",
"Contents/Images/_internal_dictionary.png",
"Contents/Info.plist",
"Contents/KeyText.data",
"Contents/KeyText.index",
"Contents/MyDictionary.xsl",
"Contents/MyDictionary_prefs.html",
]
_internal = "Images/_internal_dictionary.png"
resFiles = {
_internal: f"Contents/{_internal}",
}
self.convert_appledict_binary_to_txt(baseName, files, resFiles=resFiles)
def test_appledict_binary_to_txt_1(self):
baseName = "006-en-oxfjord_v10.11_c2_t3"
files = [
"Contents/Info.plist",
"Contents/Resources/Body.data",
"Contents/Resources/DefaultStyle.css",
"Contents/Resources/EntryID.data",
"Contents/Resources/EntryID.index",
"Contents/Resources/KeyText.data",
"Contents/Resources/KeyText.index",
]
self.convert_appledict_binary_to_txt(baseName, files)
if __name__ == "__main__":
unittest.main()
| 3,869
|
Python
|
.py
| 122
| 28.098361
| 77
| 0.720332
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,689
|
option_test.py
|
ilius_pyglossary/tests/option_test.py
|
import sys
import unittest
from os.path import abspath, dirname
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
from pyglossary.option import (
BoolOption,
DictOption,
FileSizeOption,
FloatOption,
IntOption,
ListOption,
StrOption,
)
class TestOptionValidateBoolNumber(unittest.TestCase):
def caseOK(self, cls, raw: str, value: bool | None):
opt = cls()
valueActual, ok = opt.evaluate(raw)
self.assertTrue(ok, "evaluate failed")
self.assertEqual(valueActual, value)
ok2 = opt.validate(valueActual)
self.assertEqual(ok2, True, "validate failed")
def caseFailed(self, cls, raw: str, value: bool | None):
opt = cls()
valueActual, ok = opt.evaluate(raw)
self.assertFalse(ok)
self.assertEqual(valueActual, value)
def test_bool_ok(self):
self.caseOK(BoolOption, "True", True)
self.caseOK(BoolOption, "False", False)
self.caseOK(BoolOption, "true", True)
self.caseOK(BoolOption, "false", False)
self.caseOK(BoolOption, "TRUE", True)
self.caseOK(BoolOption, "FALSE", False)
self.caseOK(BoolOption, "1", True)
self.caseOK(BoolOption, "0", False)
self.caseOK(BoolOption, "yes", True)
self.caseOK(BoolOption, "no", False)
self.caseOK(BoolOption, "YES", True)
self.caseOK(BoolOption, "NO", False)
def test_bool_failed(self):
self.caseFailed(BoolOption, "Y", None)
self.caseFailed(BoolOption, "N", None)
self.caseFailed(BoolOption, "YESS", None)
self.caseFailed(BoolOption, "123", None)
self.caseFailed(BoolOption, "a", None)
def test_int_ok(self):
self.caseOK(IntOption, "0", 0)
self.caseOK(IntOption, "1", 1)
self.caseOK(IntOption, "-1", -1)
self.caseOK(IntOption, "1234", 1234)
def test_int_failed(self):
self.caseFailed(IntOption, "abc", None)
self.caseFailed(IntOption, "12f", None)
self.caseFailed(IntOption, "fff", None)
def test_file_size_ok(self):
self.caseOK(FileSizeOption, "0", 0)
self.caseOK(FileSizeOption, "1", 1)
self.caseOK(FileSizeOption, "1234", 1234)
self.caseOK(FileSizeOption, "123k", 123000)
self.caseOK(FileSizeOption, "123m", 123000000)
self.caseOK(FileSizeOption, "1.7g", 1700000000)
self.caseOK(FileSizeOption, "123kib", 123 * 1024)
self.caseOK(FileSizeOption, "123KiB", 123 * 1024)
self.caseOK(FileSizeOption, "123ki", 123 * 1024)
self.caseOK(FileSizeOption, "123Ki", 123 * 1024)
self.caseOK(FileSizeOption, "123mib", 123 * 1024**2)
self.caseOK(FileSizeOption, "123MiB", 123 * 1024**2)
self.caseOK(FileSizeOption, "123mi", 123 * 1024**2)
self.caseOK(FileSizeOption, "123Mi", 123 * 1024**2)
self.caseOK(FileSizeOption, "1.7gib", int(1.7 * 1024**3))
self.caseOK(FileSizeOption, "1.7GiB", int(1.7 * 1024**3))
self.caseOK(FileSizeOption, "1.7gi", int(1.7 * 1024**3))
self.caseOK(FileSizeOption, "1.7Gi", int(1.7 * 1024**3))
def test_file_size_failed(self):
self.caseFailed(FileSizeOption, "-1", None)
self.caseFailed(FileSizeOption, "123kg", None)
self.caseFailed(FileSizeOption, "123k.1", None)
def test_float_ok(self):
self.caseOK(FloatOption, "0", 0.0)
self.caseOK(FloatOption, "1", 1.0)
self.caseOK(FloatOption, "-1", -1.0)
self.caseOK(FloatOption, "1234", 1234.0)
self.caseOK(FloatOption, "1.5", 1.5)
self.caseOK(FloatOption, "-7.9", -7.9)
def test_float_failed(self):
self.caseFailed(FloatOption, "abc", None)
self.caseFailed(FloatOption, "12f", None)
self.caseFailed(FloatOption, "fff", None)
class TestOptionValidateStr(unittest.TestCase):
def newTester(self, customValue: bool, values: list[str]):
def test(raw: str, valid: bool):
opt = StrOption(customValue=customValue, values=values)
valueActual, evalOkActual = opt.evaluate(raw)
self.assertEqual(evalOkActual, True, "evaluate failed")
self.assertEqual(valueActual, raw)
validActual = opt.validate(valueActual)
self.assertEqual(validActual, valid, "validate failed")
return test
def test_1(self):
test = self.newTester(False, ["a", "b", "c"])
test("a", True)
test("b", True)
test("c", True)
test("d", False)
test("123", False)
def test_2(self):
test = self.newTester(True, ["a", "b", "3"])
test("a", True)
test("b", True)
test("c", True)
test("d", True)
test("123", True)
class TestOptionValidateDict(unittest.TestCase):
def caseOK(self, raw: str, value: dict | None):
opt = DictOption()
valueActual, ok = opt.evaluate(raw)
self.assertTrue(ok, "evaluate failed")
self.assertEqual(valueActual, value)
ok2 = opt.validate(valueActual)
self.assertEqual(ok2, True, "validate failed")
def caseEvalFail(self, raw: str):
opt = DictOption()
valueActual, ok = opt.evaluate(raw)
self.assertFalse(ok)
self.assertEqual(valueActual, None)
def test_dict_ok(self):
self.caseOK("", None)
self.caseOK("{}", {})
self.caseOK('{"a": 1}', {"a": 1})
self.caseOK('{"a": "b", "123":456}', {"a": "b", "123": 456})
def test_dict_syntaxErr(self):
self.caseEvalFail("123abc")
self.caseEvalFail("{")
self.caseEvalFail("(")
self.caseEvalFail('{"a": 1')
self.caseEvalFail('{"a": 1]')
self.caseEvalFail("][")
def test_dict_notDict(self):
self.caseEvalFail("123")
self.caseEvalFail("[]")
self.caseEvalFail("[1, 2, 3]")
self.caseEvalFail('["a", 2, 3.5]')
self.caseEvalFail("{10, 20, 30}")
class TestOptionValidateList(unittest.TestCase):
def caseOK(self, raw: str, value: dict | None):
opt = ListOption()
valueActual, ok = opt.evaluate(raw)
self.assertTrue(ok, "evaluate failed")
self.assertEqual(valueActual, value)
ok2 = opt.validate(valueActual)
self.assertEqual(ok2, True, "validate failed")
def caseEvalFail(self, raw: str):
opt = ListOption()
valueActual, ok = opt.evaluate(raw)
self.assertFalse(ok, f"evaluale did not fail, {valueActual=}")
self.assertEqual(valueActual, None)
def test_list_ok(self):
self.caseOK("", None)
self.caseOK("[]", [])
self.caseOK('["a", "b"]', ["a", "b"])
self.caseOK("[1, 2, 3]", [1, 2, 3])
self.caseOK('["a", 2, 3.5]', ["a", 2, 3.5])
def test_list_syntaxErr(self):
self.caseEvalFail("123abc")
self.caseEvalFail("{")
self.caseEvalFail("(")
self.caseEvalFail('{"a": 1')
self.caseEvalFail('{"a": 1]')
self.caseEvalFail("][")
def test_list_notList(self):
self.caseEvalFail("123")
self.caseEvalFail("{10, 20, 30}")
self.caseEvalFail('{"a": 1}')
self.caseEvalFail('{"a": "b", "123":456}')
if __name__ == "__main__":
unittest.main()
| 6,371
|
Python
|
.py
| 177
| 33.039548
| 64
| 0.700309
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,690
|
glossary_test.py
|
ilius_pyglossary/tests/glossary_test.py
|
import hashlib
import json
import logging
import os
import random
import sys
import tempfile
import tracemalloc
import unittest
import zipfile
from collections.abc import Callable
from os.path import abspath, dirname, isdir, isfile, join
from urllib.request import urlopen
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
from pyglossary.core import cacheDir, log, tmpDir
from pyglossary.glossary import Glossary
from pyglossary.os_utils import rmtree
from pyglossary.text_utils import crc32hex
__all__ = ["TestGlossaryBase", "appTmpDir"]
tracemalloc.start()
Glossary.init()
repo = os.getenv(
"PYGLOSSARY_TEST_REPO",
"ilius/pyglossary-test/main",
)
dataURL = f"https://raw.githubusercontent.com/{repo}/{{filename}}"
testCacheDir = join(cacheDir, "test")
appTmpDir = join(cacheDir, "tmp")
os.makedirs(testCacheDir, exist_ok=True)
os.chdir(testCacheDir)
os.makedirs(join(tmpDir, "pyglossary"), exist_ok=True)
class TestGlossaryBase(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.maxDiff = None
self.dataFileCRC32 = {
"004-bar.txt": "6775e590",
"004-bar-sort.txt": "fe861123",
"006-empty.txt": "07ff224b",
"006-empty-filtered.txt": "2b3c1c0f",
"100-en-de-v4.txt": "d420a669",
"100-en-fa.txt": "f5c53133",
"100-ja-en.txt": "93542e89",
"100-en-de-v4-remove_font_b.txt": "a3144e2f",
"100-en-de-v4.info": "f2cfb284",
"100-en-fa.info": "9bddb7bb",
"100-en-fa-v2.info": "7c0f646b",
"100-ja-en.info": "8cf5403c",
"300-rand-en-fa.txt": "586617c8",
"res/stardict.png": "7e1447fa",
"res/test.json": "41f8cf31",
}
def addDirCRC32(self, dirPath: str, files: "dict[str, str]") -> None:
for fpath, _hash in files.items():
self.dataFileCRC32[f"{dirPath}/{fpath}"] = _hash
# The setUp() and tearDown() methods allow you to define instructions that
# will be executed before and after each test method.
def setUp(self):
self.glos = None
self.tempDir = tempfile.mkdtemp(dir=join(tmpDir, "pyglossary"))
def tearDown(self):
if self.glos is not None:
self.glos.cleanup()
self.glos.clear()
if os.getenv("NO_CLEANUP"):
return
for direc in (
self.tempDir,
appTmpDir,
):
if isdir(direc):
rmtree(direc)
def fixDownloadFilename(self, filename):
return filename.replace("/", "__").replace("\\", "__")
def downloadFile(self, filename):
unixFilename = filename.replace("\\", "/")
_crc32 = self.dataFileCRC32[unixFilename]
fpath = join(testCacheDir, self.fixDownloadFilename(filename))
if isfile(fpath):
with open(fpath, mode="rb") as _file:
data = _file.read()
if crc32hex(data) != _crc32:
raise RuntimeError(f"CRC32 check failed for existing file: {fpath}")
return fpath
try:
with urlopen(dataURL.format(filename=unixFilename)) as res:
data = res.read()
except Exception as e:
print(f"{filename=}")
raise e from None
actual_crc32 = crc32hex(data)
if actual_crc32 != _crc32:
raise RuntimeError(
"CRC32 check failed for downloaded file: "
f"{filename}: {actual_crc32}",
)
with open(fpath, mode="wb") as _file:
_file.write(data)
return fpath
def downloadDir(self, dirName: str, files: list[str]) -> str:
dirPath = join(testCacheDir, self.fixDownloadFilename(dirName))
for fileRelPath in files:
newFilePath = join(dirPath, fileRelPath)
if isfile(newFilePath):
# TODO: check crc-32
continue
filePath = self.downloadFile(join(dirName, fileRelPath))
os.makedirs(dirname(newFilePath), exist_ok=True)
os.rename(filePath, newFilePath)
return dirPath
def newTempFilePath(self, filename):
fpath = join(self.tempDir, filename)
if isfile(fpath):
os.remove(fpath)
return fpath
def showGlossaryDiff(self, fpath1, fpath2) -> None:
from pyglossary.ui.tools.diff_glossary import diffGlossary
diffGlossary(fpath1, fpath2)
def compareTextFiles(self, fpath1, fpath2, showDiff=False):
self.assertTrue(isfile(fpath1), f"{fpath1 = }")
self.assertTrue(isfile(fpath2), f"{fpath2 = }")
with open(fpath1, encoding="utf-8") as file1:
text1 = file1.read().rstrip("\n")
with open(fpath2, encoding="utf-8") as file2:
text2 = file2.read().rstrip("\n")
try:
self.assertEqual(
len(text1),
len(text2),
msg=f"{fpath1} differs from {fpath2} in file size",
)
self.assertEqual(
text1,
text2,
msg=f"{fpath1} differs from {fpath2}",
)
except AssertionError as e:
if showDiff:
self.showGlossaryDiff(fpath1, fpath2)
raise e from None
def compareBinaryFiles(self, fpath1, fpath2):
self.assertTrue(isfile(fpath1), f"File {fpath1} does not exist")
self.assertTrue(isfile(fpath2), f"File {fpath2} does not exist")
with open(fpath1, mode="rb") as file1:
data1 = file1.read()
with open(fpath2, mode="rb") as file2:
data2 = file2.read()
self.assertEqual(len(data1), len(data2), msg=f"{fpath1}")
self.assertTrue(
data1 == data2,
msg=f"{fpath1} differs from {fpath2}",
)
def compareZipFiles(
self,
fpath1,
fpath2,
dataReplaceFuncs: "dict[str, Callable]",
):
zf1 = zipfile.ZipFile(fpath1)
zf2 = zipfile.ZipFile(fpath2)
pathList1 = zf1.namelist()
pathList2 = zf2.namelist()
if not self.assertEqual(pathList1, pathList2):
return
for zfpath in pathList1:
data1 = zf1.read(zfpath)
data2 = zf2.read(zfpath)
func = dataReplaceFuncs.get(zfpath)
if func is not None:
data1 = func(data1)
data2 = func(data2)
self.assertEqual(len(data1), len(data2), msg=f"{zfpath=}")
self.assertTrue(
data1 == data2,
msg=f"{zfpath=}",
)
def checkZipFileSha1sum(
self,
fpath,
sha1sumDict: "dict[str, str]",
dataReplaceFuncs: "dict[str, Callable] | None" = None,
):
if dataReplaceFuncs is None:
dataReplaceFuncs = {}
zf = zipfile.ZipFile(fpath)
# pathList = zf.namelist()
for zfpath, expectedSha1 in sha1sumDict.items():
data = zf.read(zfpath)
func = dataReplaceFuncs.get(zfpath)
if func is not None:
data = func(data)
actualSha1 = hashlib.sha1(data).hexdigest()
self.assertEqual(actualSha1, expectedSha1, msg=f"file: {zfpath}")
def convert( # noqa: PLR0913
self,
fname, # input file with extension
fname2, # output file with extension
testId="tmp", # noqa: ARG002
compareText="",
compareBinary="",
sha1sum=None,
md5sum=None,
config=None,
showDiff=False,
**convertArgs,
):
inputFilename = self.downloadFile(fname)
outputFilename = self.newTempFilePath(fname2)
glos = self.glos = Glossary()
if config is not None:
glos.config = config
res = glos.convert(
inputFilename=inputFilename,
outputFilename=outputFilename,
**convertArgs,
)
self.assertEqual(outputFilename, res)
if compareText:
self.compareTextFiles(
outputFilename,
self.downloadFile(compareText),
showDiff=showDiff,
)
elif compareBinary:
self.compareBinaryFiles(outputFilename, self.downloadFile(compareBinary))
elif sha1sum:
with open(outputFilename, mode="rb") as _file:
actualSha1 = hashlib.sha1(_file.read()).hexdigest()
self.assertEqual(actualSha1, sha1sum)
elif md5sum:
with open(outputFilename, mode="rb") as _file:
actualMd5 = hashlib.md5(_file.read()).hexdigest()
self.assertEqual(actualMd5, md5sum)
def convert_sqlite_both(self, *args, **kwargs):
for sqlite in (None, True, False):
self.convert(*args, sqlite=sqlite, **kwargs)
class TestGlossary(TestGlossaryBase):
def __init__(self, *args, **kwargs):
TestGlossaryBase.__init__(self, *args, **kwargs)
self.dataFileCRC32.update(
{
"100-en-fa-sort.txt": "d7a82dc8",
"100-en-fa-sort-headword.txt": "4067a29f",
"100-en-fa-sort-ebook.txt": "aa620d07",
"100-en-fa-sort-ebook3.txt": "5a20f140",
"100-en-fa-lower.txt": "62178940",
"100-en-fa-remove_html_all-v3.txt": "d611c978",
"100-en-fa-rtl.txt": "25ede1e8",
"300-rand-en-fa-sort-headword-w1256.txt": "06d83bac",
"300-rand-en-fa-sort-headword.txt": "df0f8020",
"300-rand-en-fa-sort-w1256.txt": "9594aab3",
"sort-locale/092-en-fa-alphabet-sample.txt": "b4856532",
"sort-locale/092-en-fa-alphabet-sample-sorted-default.txt": "e7b70589",
"sort-locale/092-en-fa-alphabet-sample-sorted-en.txt": "3d2bdf73",
"sort-locale/092-en-fa-alphabet-sample-sorted-fa.txt": "245419db",
"sort-locale/092-en-fa-alphabet-sample-sorted-latin-fa.txt": "261c03c0",
},
)
def setUp(self):
TestGlossaryBase.setUp(self)
self.prevLogLevel = log.level
log.setLevel(logging.ERROR)
def tearDown(self):
TestGlossaryBase.tearDown(self)
log.setLevel(self.prevLogLevel)
def test__str__1(self):
glos = self.glos = Glossary()
self.assertEqual(str(glos), "Glossary{filename: '', name: None}")
def test__str__2(self):
glos = self.glos = Glossary()
glos._filename = "test.txt"
self.assertEqual(str(glos), "Glossary{filename: 'test.txt', name: None}")
def test__str__3(self):
glos = self.glos = Glossary()
glos.setInfo("title", "Test Title")
self.assertEqual(
str(glos),
"Glossary{filename: '', name: 'Test Title'}",
)
def test__str__4(self):
glos = self.glos = Glossary()
glos._filename = "test.txt"
glos.setInfo("title", "Test Title")
self.assertEqual(
str(glos),
"Glossary{filename: 'test.txt', name: 'Test Title'}",
)
def test_info_1(self):
glos = self.glos = Glossary()
glos.setInfo("test", "ABC")
self.assertEqual(glos.getInfo("test"), "ABC")
def test_info_2(self):
glos = self.glos = Glossary()
glos.setInfo("bookname", "Test Glossary")
self.assertEqual(glos.getInfo("title"), "Test Glossary")
def test_info_3(self):
glos = self.glos = Glossary()
glos.setInfo("bookname", "Test Glossary")
glos.setInfo("title", "Test 2")
self.assertEqual(glos.getInfo("name"), "Test 2")
self.assertEqual(glos.getInfo("bookname"), "Test 2")
self.assertEqual(glos.getInfo("title"), "Test 2")
def test_info_4(self):
glos = self.glos = Glossary()
glos.setInfo("test", 123)
self.assertEqual(glos.getInfo("test"), "123")
def test_info_del_1(self):
glos = self.glos = Glossary()
glos.setInfo("test", "abc")
self.assertEqual(glos.getInfo("test"), "abc")
glos.setInfo("test", None)
self.assertEqual(glos.getInfo("test"), "")
def test_info_del_2(self):
glos = self.glos = Glossary()
glos.setInfo("test", None)
self.assertEqual(glos.getInfo("test"), "")
def test_setInfo_err1(self):
glos = self.glos = Glossary()
try:
glos.setInfo(1, "a")
except TypeError as e:
self.assertEqual(str(e), "invalid key=1, must be str")
else:
self.fail("must raise a TypeError")
def test_getInfo_err1(self):
glos = self.glos = Glossary()
try:
glos.getInfo(1)
except TypeError as e:
self.assertEqual(str(e), "invalid key=1, must be str")
else:
self.fail("must raise a TypeError")
def test_getExtraInfos_1(self):
glos = self.glos = Glossary()
glos.setInfo("a", "test 1")
glos.setInfo("b", "test 2")
glos.setInfo("c", "test 3")
glos.setInfo("d", "test 4")
glos.setInfo("name", "my name")
self.assertEqual(
glos.getExtraInfos(["b", "c", "title"]),
{"a": "test 1", "d": "test 4"},
)
def test_infoKeys_1(self):
glos = self.glos = Glossary()
glos.setInfo("a", "test 1")
glos.setInfo("b", "test 2")
glos.setInfo("name", "test name")
glos.setInfo("title", "test title")
self.assertEqual(
glos.infoKeys(),
["a", "b", "name"],
)
def test_config_attr_get(self):
glos = self.glos = Glossary()
try:
glos.config # noqa: B018
except NotImplementedError:
pass
else:
self.fail("must raise NotImplementedError")
def test_config_attr_set(self):
glos = self.glos = Glossary()
glos.config = {"lower": True}
self.assertEqual(glos.getConfig("lower", False), True)
def test_read_txt_1(self):
inputFilename = self.downloadFile("100-en-fa.txt")
glos = self.glos = Glossary()
res = glos.read(filename=inputFilename)
self.assertTrue(res)
self.assertEqual(glos.sourceLangName, "English")
self.assertEqual(glos.targetLangName, "Persian")
self.assertIn("Sample: ", glos.getInfo("name"))
self.assertEqual(len(glos), 100)
def test_read_txt_direct_1(self):
inputFilename = self.downloadFile("100-en-fa.txt")
glos = self.glos = Glossary()
res = glos.read(filename=inputFilename, direct=True)
self.assertTrue(res)
self.assertEqual(glos.sourceLangName, "English")
self.assertEqual(glos.targetLangName, "Persian")
self.assertIn("Sample: ", glos.getInfo("name"))
self.assertEqual(len(glos), 0)
def test_init_infoDict(self):
glos = self.glos = Glossary(info={"a": "b"})
self.assertEqual(list(glos.iterInfo()), [("a", "b")])
def test_init_infoOrderedDict(self):
from collections import OrderedDict
glos = self.glos = Glossary(
info=OrderedDict(
[
("y", "z"),
("a", "b"),
("1", "2"),
],
),
)
self.assertEqual(list(glos.iterInfo()), [("y", "z"), ("a", "b"), ("1", "2")])
def test_lang_1(self):
glos = self.glos = Glossary()
self.assertEqual(glos.sourceLangName, "")
self.assertEqual(glos.targetLangName, "")
glos.sourceLangName = "ru"
glos.targetLangName = "de"
self.assertEqual(glos.sourceLangName, "Russian")
self.assertEqual(glos.targetLangName, "German")
def test_lang_get_source(self):
glos = self.glos = Glossary()
glos.setInfo("sourcelang", "farsi")
self.assertEqual(glos.sourceLangName, "Persian")
def test_lang_get_target(self):
glos = self.glos = Glossary()
glos.setInfo("targetlang", "malay")
self.assertEqual(glos.targetLangName, "Malay")
def test_lang_set_source(self):
glos = self.glos = Glossary()
glos.sourceLangName = "en"
self.assertEqual(glos.sourceLangName, "English")
def test_lang_set_source_empty(self):
glos = self.glos = Glossary()
glos.sourceLangName = ""
self.assertEqual(glos.sourceLangName, "")
def test_lang_set_target(self):
glos = self.glos = Glossary()
glos.targetLangName = "fa"
self.assertEqual(glos.targetLangName, "Persian")
def test_lang_set_target_empty(self):
glos = self.glos = Glossary()
glos.targetLangName = ""
self.assertEqual(glos.targetLangName, "")
def test_lang_getObj_source(self):
glos = self.glos = Glossary()
glos.setInfo("sourcelang", "farsi")
self.assertEqual(glos.sourceLang.name, "Persian")
def test_lang_getObj_target(self):
glos = self.glos = Glossary()
glos.setInfo("targetlang", "malay")
self.assertEqual(glos.targetLang.name, "Malay")
def test_lang_detect_1(self):
glos = self.glos = Glossary()
glos.setInfo("name", "en-fa")
glos.detectLangsFromName()
self.assertEqual(
(glos.sourceLangName, glos.targetLangName),
("English", "Persian"),
)
def test_lang_detect_2(self):
glos = self.glos = Glossary()
glos.setInfo("name", "test-en-fa")
glos.detectLangsFromName()
self.assertEqual(
(glos.sourceLangName, glos.targetLangName),
("English", "Persian"),
)
def test_lang_detect_3(self):
glos = self.glos = Glossary()
glos.setInfo("name", "eng to per")
glos.detectLangsFromName()
self.assertEqual(
(glos.sourceLangName, glos.targetLangName),
("English", "Persian"),
)
def test_lang_detect_4(self):
glos = self.glos = Glossary()
glos.setInfo("name", "Test english to farsi")
glos.detectLangsFromName()
self.assertEqual(
(glos.sourceLangName, glos.targetLangName),
("English", "Persian"),
)
def test_lang_detect_5(self):
glos = self.glos = Glossary()
glos.setInfo("name", "freedict-eng-deu.index")
glos.detectLangsFromName()
self.assertEqual(
(glos.sourceLangName, glos.targetLangName),
("English", "German"),
)
def convert_txt_txt(
self,
fname, # input txt file without extension
fname2, # expected output txt file without extension
testId="tmp",
config=None,
**convertArgs,
):
self.convert(
f"{fname}.txt",
f"{fname2}-{testId}.txt",
compareText=f"{fname2}.txt",
testId=testId,
config=config,
**convertArgs,
)
def convert_to_txtZip(
self,
fname, # input file with extension
fname2, # expected output file without extensions
testId="tmp",
config=None,
**convertArgs,
):
inputFilename = self.downloadFile(fname)
outputTxtName = f"{fname2}-{testId}.txt"
outputFilename = self.newTempFilePath(f"{outputTxtName}.zip")
expectedFilename = self.downloadFile(f"{fname2}.txt")
glos = self.glos = Glossary()
if config is not None:
glos.config = config
res = glos.convert(
inputFilename=inputFilename,
outputFilename=outputFilename,
**convertArgs,
)
self.assertEqual(outputFilename, res)
zf = zipfile.ZipFile(outputFilename)
self.assertTrue(
outputTxtName in zf.namelist(),
msg=f"{outputTxtName} not in {zf.namelist()}",
)
with open(expectedFilename, encoding="utf-8") as expectedFile:
expectedText = expectedFile.read()
actualText = zf.read(outputTxtName).decode("utf-8")
self.assertEqual(len(actualText), len(expectedText))
self.assertEqual(actualText, expectedText)
def test_txt_txtZip_1(self):
self.convert_to_txtZip(
"100-en-fa.txt",
"100-en-fa",
testId="txt_txtZip_1",
infoOverride={"input_file_size": None},
)
def test_sort_1(self):
self.convert_txt_txt(
"100-en-fa",
"100-en-fa-sort",
testId="sort_1",
sort=True,
)
def test_sort_2(self):
self.convert_txt_txt(
"100-en-fa",
"100-en-fa-sort",
testId="sort_2",
sort=True,
sortKeyName="headword_lower",
)
def test_sort_3(self):
self.convert_txt_txt(
"100-en-fa",
"100-en-fa-sort-headword",
testId="sort_3",
sort=True,
sortKeyName="headword",
)
def test_sort_4(self):
self.convert_txt_txt(
"300-rand-en-fa",
"300-rand-en-fa-sort-headword",
testId="sort_4",
sort=True,
sortKeyName="headword",
)
def test_sort_5(self):
self.convert_txt_txt(
"300-rand-en-fa",
"300-rand-en-fa-sort-headword-w1256",
testId="sort_5",
sort=True,
sortKeyName="headword",
sortEncoding="windows-1256",
)
def test_sort_6(self):
self.convert_txt_txt(
"300-rand-en-fa",
"300-rand-en-fa-sort-w1256",
testId="sort_6",
sort=True,
sortKeyName="headword_lower",
sortEncoding="windows-1256",
)
def test_sort_7(self):
self.convert_txt_txt(
"100-en-fa",
"100-en-fa-sort-ebook",
testId="sort_7",
sort=True,
sortKeyName="ebook",
)
def test_sort_8(self):
self.convert_txt_txt(
"100-en-fa",
"100-en-fa-sort-ebook3",
testId="sort_8",
sort=True,
sortKeyName="ebook_length3",
)
def test_lower_1(self):
self.convert_txt_txt(
"100-en-fa",
"100-en-fa-lower",
testId="lower_1",
config={"lower": True},
)
def test_rtl_1(self):
self.convert_txt_txt(
"100-en-fa",
"100-en-fa-rtl",
testId="rtl_1",
config={"rtl": True},
)
def test_remove_html_all_1(self):
self.convert_txt_txt(
"100-en-fa",
"100-en-fa-remove_html_all-v3",
testId="remove_html_all_1",
config={"remove_html_all": True},
)
def test_remove_html_1(self):
self.convert_txt_txt(
"100-en-de-v4",
"100-en-de-v4-remove_font_b",
testId="remove_html_1",
config={"remove_html": "font,b"},
)
def test_save_info_json(self):
fname = "100-en-fa"
testId = "save_info_json"
infoPath = self.newTempFilePath(f"{fname}-{testId}.info")
self.convert_txt_txt(
fname,
fname,
testId=testId,
config={"save_info_json": True},
infoOverride={"input_file_size": None},
)
with open(infoPath, encoding="utf8") as _file:
infoDict = json.load(_file)
with open(self.downloadFile(f"{fname}-v2.info"), encoding="utf8") as _file:
infoDictExpected = json.load(_file)
for key, value in infoDictExpected.items():
self.assertIn(key, infoDict)
self.assertEqual(value, infoDict.get(key))
def test_convert_sqlite_direct_error(self):
glos = self.glos = Glossary()
try:
glos.convert(
inputFilename="foo.txt",
outputFilename="bar.txt",
direct=True,
sqlite=True,
)
except ValueError as e:
self.assertEqual(str(e), "Conflictng arguments: direct=True, sqlite=True")
else:
self.fail("must raise a ValueError")
def test_txt_txt_bar(self):
for direct in (None, False, True):
self.convert_txt_txt(
"004-bar",
"004-bar",
testId="bar",
direct=direct,
infoOverride={
"name": None,
"input_file_size": None,
},
)
def test_txt_txt_bar_sort(self):
for sqlite in (None, False, True):
self.convert_txt_txt(
"004-bar",
"004-bar-sort",
testId="bar_sort",
sort=True,
sqlite=sqlite,
)
def test_txt_txt_empty_filtered(self):
for direct in (None, False, True):
self.convert_txt_txt(
"006-empty",
"006-empty-filtered",
testId="empty_filtered",
direct=direct,
)
def test_txt_txt_empty_filtered_sqlite(self):
for sqlite in (None, False, True):
self.convert_txt_txt(
"006-empty",
"006-empty-filtered",
testId="empty_filtered_sqlite",
sqlite=sqlite,
)
def test_dataEntry_save(self):
glos = self.glos = Glossary()
tmpFname = "test_dataEntry_save"
entry = glos.newDataEntry(tmpFname, b"test")
saveFpath = entry.save(self.tempDir)
self.assertTrue(
isfile(saveFpath),
msg=f"saved file does not exist: {saveFpath}",
)
def test_dataEntry_getFileName(self):
glos = self.glos = Glossary()
tmpFname = "test_dataEntry_getFileName"
entry = glos.newDataEntry(tmpFname, b"test")
self.assertEqual(entry.getFileName(), tmpFname)
def test_cleanup_noFile(self):
glos = self.glos = Glossary()
glos.cleanup()
def test_cleanup_cleanup(self):
glos = self.glos = Glossary()
tmpFname = "test_cleanup_cleanup"
entry = glos.newDataEntry(tmpFname, b"test")
tmpFpath = entry._tmpPath
self.assertTrue(bool(tmpFpath), msg="entry tmpPath is empty")
self.assertTrue(
isfile(tmpFpath),
msg=f"tmp file does not exist: {tmpFpath}",
)
glos.cleanup()
self.assertTrue(
not isfile(tmpFpath),
msg=f"tmp file still exists: {tmpFpath}",
)
def test_cleanup_noCleanup(self):
glos = self.glos = Glossary()
tmpFname = "test_cleanup_noCleanup"
entry = glos.newDataEntry(tmpFname, b"test")
tmpFpath = entry._tmpPath
self.assertTrue(bool(tmpFpath), msg="entry tmpPath is empty")
self.assertTrue(isfile(tmpFpath), msg=f"tmp file does not exist: {tmpFpath}")
glos.config = {"cleanup": False}
glos.cleanup()
self.assertTrue(isfile(tmpFpath), msg=f"tmp file does not exist: {tmpFpath}")
def test_rawEntryCompress(self):
glos = self.glos = Glossary()
glos.setRawEntryCompress(True)
self.assertTrue(glos.rawEntryCompress)
glos.setRawEntryCompress(False)
self.assertFalse(glos.rawEntryCompress)
def addWordsList(self, glos, words, newDefiFunc=str, defiFormat=""):
wordsList = []
for index, line in enumerate(words):
words = line.rstrip().split("|")
wordsList.append(words)
glos.addEntryObj(
glos.newEntry(
words,
newDefiFunc(index),
defiFormat=defiFormat,
),
)
glos.updateIter()
return wordsList
def addWords(self, glos, wordsStr, **kwargs):
return self.addWordsList(glos, wordsStr.split("\n"), **kwargs)
tenWordsStr = """comedic
tubenose
organosol
adipocere
gid
next friend
bitter apple
caca|ca-ca
darkling beetle
japonica"""
tenWordsStr2 = """comedic
Tubenose
organosol
Adipocere
gid
Next friend
bitter apple
Caca|ca-ca
darkling beetle
Japonica"""
tenWordsStrFa = (
"بیمارانه\nگالوانومتر\nنقاهت\nرشک"
"مندی\nناکاستنی\nشگفتآفرینی\nچندپاری\nنامبارکی\nآماسش\nانگیزنده"
)
def test_addEntries_1(self):
glos = self.glos = Glossary()
wordsList = self.addWords(
glos,
self.tenWordsStr,
newDefiFunc=lambda _i: str(random.randint(0, 10000)),
)
self.assertEqual(wordsList, [entry.l_word for entry in glos])
def test_addEntries_2(self):
# entry filters don't apply to loaded entries (added with addEntryObj)
glos = self.glos = Glossary()
glos.addEntryObj(glos.newEntry(["a"], "test 1"))
glos.addEntryObj(glos.newEntry([""], "test 2"))
glos.addEntryObj(glos.newEntry(["b"], "test 3"))
glos.addEntryObj(glos.newEntry([], "test 4"))
glos.updateEntryFilters()
glos.updateIter()
self.assertEqual(
[["a"], [""], ["b"], []],
[entry.l_word for entry in glos],
)
def test_addEntries_3(self):
glos = self.glos = Glossary()
glos.addEntryObj(glos.newEntry(["a"], "test 1"))
glos.addEntryObj(glos.newEntry(["b"], "test 3"))
glos.addEntryObj(
glos.newDataEntry(
"file.bin",
b"hello\x00world",
),
)
glos.updateEntryFilters()
glos.updateIter()
wordListList = []
dataEntries = []
for entry in glos:
wordListList.append(entry.l_word)
if entry.isData():
dataEntries.append(entry)
self.assertEqual(
wordListList,
[["a"], ["b"], ["file.bin"]],
)
self.assertEqual(len(dataEntries), 1)
self.assertEqual(dataEntries[0].getFileName(), "file.bin")
self.assertEqual(dataEntries[0].data, b"hello\x00world")
def test_sortWords_1(self):
glos = self.glos = Glossary()
wordsList = self.addWords(
glos,
self.tenWordsStr,
newDefiFunc=lambda _i: str(random.randint(0, 10000)),
)
self.assertEqual(wordsList, [entry.l_word for entry in glos])
glos.sortWords()
self.assertEqual(sorted(wordsList), [entry.l_word for entry in glos])
def test_sortWords_2(self):
glos = self.glos = Glossary()
wordsList = self.addWords(
glos,
self.tenWordsStr2,
newDefiFunc=lambda _i: str(random.randint(0, 10000)),
)
self.assertEqual(wordsList, [entry.l_word for entry in glos])
glos.sortWords(sortKeyName="headword")
self.assertEqual(
[entry.l_word for entry in glos],
[
["Adipocere"],
["Caca", "ca-ca"],
["Japonica"],
["Next friend"],
["Tubenose"],
["bitter apple"],
["comedic"],
["darkling beetle"],
["gid"],
["organosol"],
],
)
def test_sortWords_3(self):
glos = self.glos = Glossary()
wordsList = self.addWords(
glos,
self.tenWordsStrFa,
newDefiFunc=lambda _i: str(random.randint(0, 10000)),
)
self.assertEqual(wordsList, [entry.l_word for entry in glos])
glos.sortWords(sortKeyName="headword")
ls1 = ["آماسش", "انگیزنده", "بیمارانه", "رشکمندی", "شگفتآفرینی"]
ls2 = ["نامبارکی", "ناکاستنی", "نقاهت", "چندپاری", "گالوانومتر"]
self.assertEqual(
[entry.s_word for entry in glos],
ls1 + ls2,
)
def test_sortWords_4(self):
glos = self.glos = Glossary()
wordsList = self.addWords(
glos,
self.tenWordsStrFa,
newDefiFunc=lambda _i: str(random.randint(0, 10000)),
)
self.assertEqual(wordsList, [entry.l_word for entry in glos])
glos.sortWords(
sortKeyName="headword",
sortEncoding="windows-1256",
)
ls1 = ["چندپاری", "گالوانومتر", "آماسش", "انگیزنده", "بیمارانه"]
ls2 = ["رشکمندی", "شگفتآفرینی", "ناکاستنی", "نامبارکی", "نقاهت"]
self.assertEqual(
[entry.s_word for entry in glos],
ls1 + ls2,
)
def test_sortWords_5(self):
glos = self.glos = Glossary()
alphabetW1256 = "ءآأئابتثجحخدذرزسشصضطظعغـفقكلمنهوىي"
alphabetW1256_shuf = "مفزنصـذرخوآظسقلدغطيعحءأتىئاجهضثشكب"
wordsList = self.addWordsList(
glos,
list(alphabetW1256_shuf),
newDefiFunc=lambda _i: str(random.randint(0, 10000)),
)
self.assertEqual(wordsList, [entry.l_word for entry in glos])
glos.sortWords(
sortKeyName="headword",
sortEncoding="windows-1256",
)
self.assertEqual(
[entry.s_word for entry in glos],
list(alphabetW1256),
)
def test_sortWords_exc_1(self):
fname = "100-en-fa.txt"
glos = self.glos = Glossary()
glos.read(self.downloadFile(fname), direct=True)
try:
glos.sortWords()
except NotImplementedError as e:
self.assertEqual(str(e), "can not use sortWords in direct mode")
else:
self.fail("must raise NotImplementedError")
def test_read_filename(self):
glos = self.glos = Glossary()
glos.read(self.downloadFile("004-bar.txt"))
self.assertEqual(glos.filename, join(testCacheDir, "004-bar"))
def test_wordTitleStr_em1(self):
glos = self.glos = Glossary()
self.assertEqual(glos.wordTitleStr(""), "")
def test_wordTitleStr_em2(self):
glos = self.glos = Glossary()
glos._defiHasWordTitle = True
self.assertEqual(glos.wordTitleStr("test1"), "")
def test_wordTitleStr_b1(self):
glos = self.glos = Glossary()
self.assertEqual(glos.wordTitleStr("test1"), "<b>test1</b><br>")
def test_wordTitleStr_b2(self):
glos = self.glos = Glossary()
self.assertEqual(
glos.wordTitleStr("test1", _class="headword"),
'<b class="headword">test1</b><br>',
)
def test_wordTitleStr_cjk1(self):
glos = self.glos = Glossary()
self.assertEqual(
glos.wordTitleStr("test1", sample="くりかえし"),
"<big>test1</big><br>",
)
def test_wordTitleStr_cjk2(self):
glos = self.glos = Glossary()
self.assertEqual(
glos.wordTitleStr("くりかえし"),
"<big>くりかえし</big><br>",
)
def test_convert_sortLocale_default_1(self):
name = "092-en-fa-alphabet-sample"
self.convert_sqlite_both(
f"sort-locale/{name}.txt",
f"{name}-sorted-default.txt",
compareText=f"sort-locale/{name}-sorted-default.txt",
testId="sorted-default",
sort=True,
sortKeyName="headword_lower",
)
def test_convert_sortLocale_en_1(self):
name = "092-en-fa-alphabet-sample"
self.convert_sqlite_both(
f"sort-locale/{name}.txt",
f"{name}-sorted-en.txt",
compareText=f"sort-locale/{name}-sorted-en.txt",
testId="sorted-en",
sort=True,
sortKeyName="headword_lower:en_US.UTF-8",
)
def test_convert_sortLocale_fa_1(self):
name = "092-en-fa-alphabet-sample"
self.convert_sqlite_both(
f"sort-locale/{name}.txt",
f"{name}-sorted-fa.txt",
compareText=f"sort-locale/{name}-sorted-fa.txt",
testId="sorted-fa",
sort=True,
sortKeyName="headword_lower:fa_IR.UTF-8",
)
def test_convert_sortLocale_fa_2(self):
name = "092-en-fa-alphabet-sample"
self.convert_sqlite_both(
f"sort-locale/{name}.txt",
f"{name}-sorted-latin-fa.txt",
compareText=f"sort-locale/{name}-sorted-latin-fa.txt",
testId="sorted-latin-fa",
sort=True,
sortKeyName="headword_lower:fa-u-kr-latn-arab",
)
if __name__ == "__main__":
unittest.main()
| 30,310
|
Python
|
.py
| 997
| 26.685055
| 79
| 0.698291
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,691
|
g_freedict_test.py
|
ilius_pyglossary/tests/g_freedict_test.py
|
import unittest
from glossary_v2_test import TestGlossaryBase
class TestGlossaryFreeDict(TestGlossaryBase):
def __init__(self, *args, **kwargs):
TestGlossaryBase.__init__(self, *args, **kwargs)
self.dataFileCRC32.update(
{
"100-en-de.tei": "542c210e",
"100-en-de-v4.txt": "d420a669",
},
)
def convert_tei_txt(self, fname, fname2, **convertArgs):
self.convert(
f"{fname}.tei",
f"{fname}-2.txt",
compareText=f"{fname2}.txt",
**convertArgs,
)
def test_convert_tei_txt_1(self):
self.convert_tei_txt(
"100-en-de",
"100-en-de-v4",
readOptions={"auto_comma": False},
)
self.convert_tei_txt(
"100-en-de",
"100-en-de-v4",
readOptions={"auto_comma": True},
)
if __name__ == "__main__":
unittest.main()
| 766
|
Python
|
.py
| 31
| 21.322581
| 57
| 0.656121
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,692
|
text_utils_test.py
|
ilius_pyglossary/tests/text_utils_test.py
|
import os
import struct
import sys
import unittest
from os.path import abspath, dirname
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
from pyglossary.text_utils import (
crc32hex,
escapeNTB,
fixUtf8,
joinByBar,
replacePostSpaceChar,
splitByBar,
splitByBarUnescapeNTB,
uint32FromBytes,
uint32ToBytes,
uintFromBytes,
unescapeBar,
unescapeNTB,
urlToPath,
)
class TestTextUtils(unittest.TestCase):
def test_fixUtf8(self):
f = fixUtf8
# Since entries already keep words and defi as string, fixUtf8 does not
# do much. It just removes zero bytes between valid characters
# (and not within characters)
# If there were encoding errors in input file, Reader class would
# most likely fail to read and raise exception.
# This feature was useful in Python 2.x, but not much anymore!
self.assertEqual(f("\x00س\x00لام"), "سلام")
def test_unescapeNTB(self):
self.assertEqual("a", unescapeNTB("a", bar=False))
self.assertEqual("a\t", unescapeNTB("a\\t", bar=False))
self.assertEqual("a\n", unescapeNTB("a\\n", bar=False))
self.assertEqual("\ta", unescapeNTB("\\ta", bar=False))
self.assertEqual("\na", unescapeNTB("\\na", bar=False))
self.assertEqual("a\tb\n", unescapeNTB("a\\tb\\n", bar=False))
self.assertEqual("a\\b", unescapeNTB("a\\\\b", bar=False))
self.assertEqual("a\\\tb", unescapeNTB("a\\\\\\tb", bar=False))
self.assertEqual("a|b\tc", unescapeNTB("a|b\\tc", bar=False))
self.assertEqual("a\\|b\tc", unescapeNTB("a\\|b\\tc", bar=False))
self.assertEqual("a\\|b\tc", unescapeNTB("a\\\\|b\\tc", bar=False))
self.assertEqual("|", unescapeNTB("\\|", bar=True))
self.assertEqual("a|b", unescapeNTB("a\\|b", bar=True))
self.assertEqual("a|b\tc", unescapeNTB("a\\|b\\tc", bar=True))
def test_escapeNTB(self):
self.assertEqual(escapeNTB("a", bar=False), "a")
self.assertEqual(escapeNTB("a\t", bar=False), "a\\t")
self.assertEqual(escapeNTB("a\n", bar=False), "a\\n")
self.assertEqual(escapeNTB("\ta", bar=False), "\\ta")
self.assertEqual(escapeNTB("\na", bar=False), "\\na")
self.assertEqual(escapeNTB("a\tb\n", bar=False), "a\\tb\\n")
self.assertEqual(escapeNTB("a\\b", bar=False), "a\\\\b")
self.assertEqual(escapeNTB("a\\\tb", bar=False), "a\\\\\\tb")
self.assertEqual(escapeNTB("a|b\tc", bar=False), "a|b\\tc")
self.assertEqual(escapeNTB("a\\|b\tc", bar=False), "a\\\\|b\\tc")
self.assertEqual(escapeNTB("|", bar=True), "\\|")
self.assertEqual(escapeNTB("a|b", bar=True), "a\\|b")
self.assertEqual(escapeNTB("a|b\tc", bar=True), "a\\|b\\tc")
def test_splitByBarUnescapeNTB(self):
f = splitByBarUnescapeNTB
self.assertEqual(f(""), [""])
self.assertEqual(f("|"), ["", ""])
self.assertEqual(f("a"), ["a"])
self.assertEqual(f("a|"), ["a", ""])
self.assertEqual(f("|a"), ["", "a"])
self.assertEqual(f("a|b"), ["a", "b"])
self.assertEqual(f("a\\|b|c"), ["a|b", "c"])
self.assertEqual(f("a\\\\1|b|c"), ["a\\1", "b", "c"])
# self.assertEqual(f("a\\\\|b|c"), ["a\\", "b", "c"]) # FIXME
self.assertEqual(f("a\\\\1|b\\n|c\\t"), ["a\\1", "b\n", "c\t"])
def test_unescapeBar(self):
f = unescapeBar
self.assertEqual("", f(""))
self.assertEqual("|", f("\\|"))
self.assertEqual("a|b", f("a\\|b"))
self.assertEqual("a|b\tc", f("a\\|b\tc"))
self.assertEqual("a|b\\t\\nc", f("a\\|b\\t\\nc"))
self.assertEqual("\\", f("\\\\"))
self.assertEqual("\\|", f("\\\\\\|"))
def test_splitByBar(self):
f = splitByBar
self.assertEqual(f(""), [""])
self.assertEqual(f("|"), ["", ""])
self.assertEqual(f("a"), ["a"])
self.assertEqual(f("a|"), ["a", ""])
self.assertEqual(f("|a"), ["", "a"])
self.assertEqual(f("a|b"), ["a", "b"])
self.assertEqual(f("a\\|b"), ["a|b"])
self.assertEqual(f("a\\|b|c"), ["a|b", "c"])
self.assertEqual(f("a\\\\1|b|c"), ["a\\1", "b", "c"])
# self.assertEqual(f("a\\\\|b|c"), ["a\\", "b", "c"]) # FIXME
def test_joinByBar(self):
f = joinByBar
self.assertEqual("", f([""]))
self.assertEqual("|", f(["", ""]))
self.assertEqual("a", f(["a"]))
self.assertEqual("a|", f(["a", ""]))
self.assertEqual("|a", f(["", "a"]))
self.assertEqual("a|b", f(["a", "b"]))
self.assertEqual("a\\|b", f(["a|b"]))
self.assertEqual("a\\|b|c", f(["a|b", "c"]))
self.assertEqual("a\\\\1|b|c", f(["a\\1", "b", "c"]))
def test_uint32ToBytes(self):
f = uint32ToBytes
outOfRangeError = "'I' format requires 0 <= number <= 4294967295"
if os.sep == "\\":
outOfRangeError = "argument out of range"
self.assertEqual(f(0), bytes([0, 0, 0, 0]))
self.assertEqual(f(0x3E8), bytes([0, 0, 0x03, 0xE8]))
self.assertEqual(f(0x186A0), bytes([0, 1, 0x86, 0xA0]))
self.assertEqual(f(0x3B9ACA00), bytes([0x3B, 0x9A, 0xCA, 0x00]))
self.assertEqual(f(0xFFFFFFFF), bytes([0xFF, 0xFF, 0xFF, 0xFF]))
with self.assertRaises(struct.error) as ctx:
f(0xFFFFFFFF + 1)
self.assertEqual(
str(ctx.exception),
outOfRangeError,
)
with self.assertRaises(struct.error) as ctx:
f(10000000000)
self.assertEqual(
str(ctx.exception),
outOfRangeError,
)
with self.assertRaises(struct.error) as ctx:
f(-1)
if sys.version_info >= (3, 12):
self.assertEqual(
str(ctx.exception),
"'I' format requires 0 <= number <= 4294967295",
)
else:
self.assertEqual(str(ctx.exception), "argument out of range")
def test_uint32FromBytes(self):
f = uint32FromBytes
self.assertEqual(0, f(bytes([0, 0, 0, 0])))
self.assertEqual(0x3E8, f(bytes([0, 0, 0x03, 0xE8])))
self.assertEqual(0x186A0, f(bytes([0, 1, 0x86, 0xA0])))
self.assertEqual(0x3B9ACA00, f(bytes([0x3B, 0x9A, 0xCA, 0x00])))
self.assertEqual(0xFFFFFFFF, f(bytes([0xFF, 0xFF, 0xFF, 0xFF])))
with self.assertRaises(struct.error) as ctx:
f(bytes([0x01, 0xFF, 0xFF, 0xFF, 0xFF]))
self.assertEqual(str(ctx.exception), "unpack requires a buffer of 4 bytes")
def test_uintFromBytes(self):
f = uintFromBytes
self.assertEqual(0, f(bytes([0, 0, 0, 0])))
self.assertEqual(0x3E8, f(bytes([0, 0, 0x03, 0xE8])))
self.assertEqual(0x186A0, f(bytes([0, 1, 0x86, 0xA0])))
self.assertEqual(0x3B9ACA00, f(bytes([0x3B, 0x9A, 0xCA, 0x00])))
self.assertEqual(0xFFFFFFFF, f(bytes([0xFF, 0xFF, 0xFF, 0xFF])))
self.assertEqual(
0xFFABCDEF5542,
f(bytes([0xFF, 0xAB, 0xCD, 0xEF, 0x55, 0x42])),
)
def test_crc32hex(self):
f = crc32hex
self.assertEqual(f(b""), "00000000")
self.assertEqual(f(b"\x00"), "d202ef8d")
self.assertEqual(f(b"\x00\x00"), "41d912ff")
self.assertEqual(
f(bytes.fromhex("73c3bbc38b7459360ac3a9c2b3c2a2")),
"bbfb1610",
)
def test_urlToPath(self):
f = urlToPath
self.assertEqual(
f("https://github.com/ilius/pyglossary"),
"https://github.com/ilius/pyglossary",
)
self.assertEqual(
f("file:///home/test/abc.txt"),
"/home/test/abc.txt",
)
self.assertEqual(
f("file:///home/test/%D8%AA%D8%B3%D8%AA.txt"),
"/home/test/تست.txt",
)
def test_replacePostSpaceChar(self):
f = replacePostSpaceChar
self.assertEqual(
f("First sentence .Second sentence.", "."),
"First sentence. Second sentence.",
)
self.assertEqual(
f("First ,second.", ","),
"First, second.",
)
if __name__ == "__main__":
unittest.main()
| 7,173
|
Python
|
.py
| 192
| 34.276042
| 77
| 0.645695
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,693
|
g_sql_test.py
|
ilius_pyglossary/tests/g_sql_test.py
|
import unittest
from glossary_v2_test import TestGlossaryBase
class TestGlossarySQL(TestGlossaryBase):
def __init__(self, *args, **kwargs):
TestGlossaryBase.__init__(self, *args, **kwargs)
self.dataFileCRC32.update(
{
"100-en-fa.txt": "f5c53133",
"100-en-fa.txt-v2.sql": "70cd0514",
},
)
def convert_txt_sql(self, fname, fname2, **convertArgs):
self.convert(
f"{fname}.txt",
f"{fname}-2.sql",
compareText=f"{fname2}.sql",
**convertArgs,
)
def test_convert_txt_sql_1(self):
self.convert_txt_sql(
"100-en-fa",
"100-en-fa.txt-v2",
)
if __name__ == "__main__":
unittest.main()
| 631
|
Python
|
.py
| 25
| 21.92
| 57
| 0.662207
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,694
|
g_dict_org_test.py
|
ilius_pyglossary/tests/g_dict_org_test.py
|
import unittest
from glossary_v2_test import TestGlossaryBase
class TestGlossaryDictOrg(TestGlossaryBase):
def __init__(self, *args, **kwargs):
TestGlossaryBase.__init__(self, *args, **kwargs)
self.dataFileCRC32.update(
{
"100-en-fa.txt.dict": "02abe5dc",
"100-en-fa.txt.index": "b10efcb4",
"100-en-fa.txt.index.txt": "6c9d527c",
},
)
def convert_txt_dict_org(self, fname, fname2, **convertArgs):
self.convert(
f"{fname}.txt",
f"{fname}-2.index",
compareText=f"{fname2}.index",
**convertArgs,
)
def convert_dict_org_txt(self, fname, fname2, **convertArgs):
self.downloadFile(f"{fname}.dict")
self.convert(
f"{fname}.index",
f"{fname}-2.txt",
compareText=f"{fname2}.txt",
**convertArgs,
)
def test_convert_txt_dict_org_1(self):
self.convert_txt_dict_org(
"100-en-fa",
"100-en-fa.txt",
writeOptions={"install": False},
)
def test_convert_dict_org_txt_1(self):
self.convert_dict_org_txt(
"100-en-fa.txt",
"100-en-fa.txt.index",
infoOverride={"input_file_size": None},
)
if __name__ == "__main__":
unittest.main()
| 1,110
|
Python
|
.py
| 41
| 23.634146
| 62
| 0.668555
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,695
|
g_appledict_test.py
|
ilius_pyglossary/tests/g_appledict_test.py
|
import plistlib
import sys
import unittest
from os.path import abspath, dirname, join
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
from glossary_v2_test import TestGlossaryBase
from pyglossary.glossary import Glossary
class TestGlossaryAppleDict(TestGlossaryBase):
def __init__(self, *args, **kwargs):
TestGlossaryBase.__init__(self, *args, **kwargs)
hashDict = {
"appledict-src/002-no-morphology-v3.txt": "d8086fe8",
"appledict-src/002-no-morphology-v3a/002-no-morphology-v3a.css": "6818c1e5",
"appledict-src/002-no-morphology-v3a/002-no-morphology-v3a.plist": "706d1d9c",
"appledict-src/002-no-morphology-v3a/002-no-morphology-v3a.xml": "707994d6",
"appledict-src/002-no-morphology-v3a/Makefile": "ecd42350",
}
self.dataFileCRC32.update(hashDict)
def comparePlist(self, fpath1, fpath2):
with open(fpath1, "rb") as _file:
data1 = plistlib.loads(_file.read())
with open(fpath2, "rb") as _file:
data2 = plistlib.loads(_file.read())
self.assertEqual(data1, data2)
def test_tabfile_without_morpho_to_appledict_source(self):
self.glos = Glossary()
inputName = "002-no-morphology-v3"
outputName = "002-no-morphology-v3a"
inputFilepath = self.downloadFile(f"appledict-src/{inputName}.txt")
outputDirPath = self.newTempFilePath(f"{outputName}")
expectedFiles = {
name: self.downloadFile(f"appledict-src/{outputName}/{name}")
for name in [
f"{outputName}.xml",
f"{outputName}.css",
"Makefile",
]
}
result = self.glos.convert(
inputFilename=inputFilepath,
outputFilename=outputDirPath,
inputFormat="Tabfile",
outputFormat="AppleDict",
)
self.assertIsNotNone(result)
self.assertEqual(result, outputDirPath)
for fname, fpath in expectedFiles.items():
self.compareTextFiles(
join(outputDirPath, fname),
fpath,
)
self.comparePlist(
join(outputDirPath, f"{outputName}.plist"),
self.downloadFile(f"appledict-src/{outputName}/{outputName}.plist"),
)
if __name__ == "__main__":
unittest.main()
| 2,041
|
Python
|
.py
| 58
| 31.862069
| 81
| 0.736789
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,696
|
html_utils_test.py
|
ilius_pyglossary/tests/html_utils_test.py
|
# -*- coding: utf-8 -*-
import sys
import unittest
from os.path import abspath, dirname
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
from pyglossary.html_utils import unescape_unicode
class UnescapeUnicodeTest(unittest.TestCase):
def case(self, text, expected):
actual = unescape_unicode(text)
self.assertEqual(actual, expected)
def test(self):
self.case("<", "<")
self.case(">", ">")
self.case("&", "&")
self.case(""", """)
self.case("'", "'")
self.case(" ", " ")
self.case(" ", " ")
self.case("<á>", "<á>")
self.case("/wəːkiŋtiːm/", "/wəːkiŋtiːm/")
# Babylon dictionaries contain a lot of non-standard entity,
# references for example, csdot, fllig, nsm, cancer, thlig,
# tsdot, upslur...
self.case("<&etilde;", "<ẽ")
self.case("<⅓", "<⅓")
self.case("<⅔", "<⅔")
self.case("<ĩ", "<ĩ")
self.case("<&ldash;", "<–")
self.case("<ů", "<ů")
self.case("<ũ", "<ũ")
self.case("<&wring;", "<ẘ")
self.case("<&xfrac13;", "<⅓")
self.case("<ŷ", "<ŷ")
self.case("<&ygrave;", "<ỳ")
self.case("<&yring;", "<ẙ")
self.case("<&ytilde;", "<ỹ")
def benchmark_main():
import timeit
from random import choice
from english_words import english_words_set
english_words_list = list(english_words_set)
textList = []
for _ in range(20):
text = ""
for _ in range(10):
text += choice(english_words_list) + " "
textList.append(text)
print("avg length:", sum(len(text) for text in textList) / len(textList))
def run_benchmark1():
for text in textList:
unescape_unicode(text)
print("benchmark 1:", timeit.timeit("run_benchmark1()", globals=locals()))
if __name__ == "__main__":
if "-b" in sys.argv:
benchmark_main()
else:
unittest.main()
| 1,982
|
Python
|
.py
| 58
| 30.913793
| 75
| 0.63951
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,697
|
g_ebook_epub2_test.py
|
ilius_pyglossary/tests/g_ebook_epub2_test.py
|
import re
import sys
import unittest
from os.path import abspath, dirname
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
from glossary_v2_test import TestGlossaryBase
from pyglossary.glossary import Glossary
skip_module = False
class TestGlossaryEPUB2(TestGlossaryBase):
def __init__(self, *args, **kwargs):
TestGlossaryBase.__init__(self, *args, **kwargs)
self.dataFileCRC32.update(
{
"100-en-fa-res.slob": "0216d006",
"100-en-fa-res-slob-v2.epub": "304d174d",
"100-en-fa-prefix3-v2.epub": "1b7244ca",
"300-rand-en-fa-prefix3-v2.epub": "b5dd9ec6",
},
)
def setUp(self):
if skip_module:
self.skipTest("module is skipped")
TestGlossaryBase.setUp(self)
def remove_toc_uid(self, data):
return re.sub(
b'<meta name="dtb:uid" content="[0-9a-f]{32}" />',
b'<meta name="dtb:uid" content="" />',
data,
)
def remove_content_extra(self, data):
data = re.sub(
b'<dc:identifier id="uid" opf:scheme="uuid">[0-9a-f]{32}</dc:identifier>',
b'<dc:identifier id="uid" opf:scheme="uuid"></dc:identifier>',
data,
)
return re.sub(
b'<dc:date opf:event="creation">[0-9-]{10}</dc:date>',
b'<dc:date opf:event="creation"></dc:date>',
data,
)
def convert_to_epub(
self,
inputFname,
outputFname,
testId,
**convertArgs,
):
inputFilename = self.downloadFile(f"{inputFname}")
outputFilename = self.newTempFilePath(
f"{inputFname.replace('.', '_')}-{testId}.epub",
)
expectedFilename = self.downloadFile(f"{outputFname}.epub")
glos = self.glos = Glossary()
res = glos.convert(
inputFilename=inputFilename,
outputFilename=outputFilename,
**convertArgs,
)
self.assertEqual(outputFilename, res)
self.compareZipFiles(
outputFilename,
expectedFilename,
{
"OEBPS/toc.ncx": self.remove_toc_uid,
"OEBPS/content.opf": self.remove_content_extra,
},
)
def test_convert_to_epub_1(self):
self.convert_to_epub(
"100-en-fa-res.slob",
"100-en-fa-res-slob-v2",
"1",
)
def test_convert_to_epub_2(self):
for sort in (True, False):
self.convert_to_epub(
"100-en-fa-res.slob",
"100-en-fa-res-slob-v2",
"2",
sort=sort,
)
def test_convert_to_epub_3(self):
for sqlite in (True, False):
self.convert_to_epub(
"100-en-fa-res.slob",
"100-en-fa-res-slob-v2",
"3",
sqlite=sqlite,
)
def test_convert_to_epub_4(self):
for direct in (True, False):
self.convert_to_epub(
"100-en-fa-res.slob",
"100-en-fa-res-slob-v2",
"4",
direct=direct,
)
def test_convert_to_epub_5(self):
for sqlite in (True, False):
self.convert_to_epub(
"100-en-fa.txt",
"100-en-fa-prefix3-v2",
"5",
sqlite=sqlite,
writeOptions={"group_by_prefix_length": 3},
)
def test_convert_to_epub_6(self):
self.convert_to_epub(
"300-rand-en-fa.txt",
"300-rand-en-fa-prefix3-v2",
"6",
sqlite=True,
writeOptions={"group_by_prefix_length": 3},
)
def test_convert_to_epub_7(self):
self.convert_to_epub(
"300-rand-en-fa.txt",
"300-rand-en-fa-prefix3-v2",
"7",
sqlite=False,
writeOptions={"group_by_prefix_length": 3},
)
if __name__ == "__main__":
unittest.main()
| 3,205
|
Python
|
.py
| 125
| 22.056
| 77
| 0.664814
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,698
|
g_xdxf_lax_test.py
|
ilius_pyglossary/tests/g_xdxf_lax_test.py
|
import unittest
from glossary_v2_test import TestGlossaryBase
class TestGlossaryXDXFLax(TestGlossaryBase):
def __init__(self, *args, **kwargs):
TestGlossaryBase.__init__(self, *args, **kwargs)
self.dataFileCRC32.update(
{
"100-cyber_lexicon_en-es.xdxf": "8d9ba394",
"100-cyber_lexicon_en-es-v3.txt": "4aa05086",
},
)
def convert_xdxf_txt(self, fname, fname2, **convertArgs):
self.convert(
f"{fname}.xdxf",
f"{fname}-tmp.txt",
compareText=f"{fname2}.txt",
inputFormat="XdxfLax",
**convertArgs,
)
def test_convert_xdxf_txt_1(self):
self.convert_xdxf_txt(
"100-cyber_lexicon_en-es",
"100-cyber_lexicon_en-es-v3",
)
if __name__ == "__main__":
unittest.main()
| 716
|
Python
|
.py
| 26
| 24.192308
| 58
| 0.684751
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|
6,699
|
entry_test.py
|
ilius_pyglossary/tests/entry_test.py
|
import sys
import unittest
from os.path import abspath, dirname
rootDir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, rootDir)
from pyglossary.entry import Entry
class TestEntryBasic(unittest.TestCase):
def test_exc_1(self):
try:
Entry(b"word", "defi")
except TypeError as e:
self.assertEqual(str(e), "invalid word type <class 'bytes'>")
else:
self.fail("must raise TypeError")
def test_exc_2(self):
Entry(("word",), "defi")
def test_exc_3(self):
try:
Entry("word", b"defi")
except TypeError as e:
self.assertEqual(str(e), "invalid defi type <class 'bytes'>")
else:
self.fail("must raise TypeError")
def test_exc_4(self):
try:
Entry("word", ("defi",))
except TypeError as e:
self.assertEqual(str(e), "invalid defi type <class 'tuple'>")
else:
self.fail("must raise TypeError")
def test_exc_5(self):
try:
Entry("word", "defi", "b")
except ValueError as e:
self.assertEqual(str(e), "invalid defiFormat 'b'")
else:
self.fail("must raise ValueError")
def test_1(self):
entry = Entry("test1", "something")
self.assertEqual(entry.l_word, ["test1"])
self.assertEqual(entry.defi, "something")
def test_2(self):
entry = Entry(["test1"], "something")
self.assertEqual(entry.l_word, ["test1"])
self.assertEqual(entry.defi, "something")
def test_3(self):
entry = Entry("test1", ["something"])
self.assertEqual(entry.l_word, ["test1"])
self.assertEqual(entry.defi, "something")
def test_repr_1(self):
entry = Entry("test1", "something")
self.assertEqual(
repr(entry),
"Entry('test1', 'something', defiFormat='m')",
)
def test_repr_2(self):
entry = Entry("test1", "something", defiFormat="h")
self.assertEqual(
repr(entry),
"Entry('test1', 'something', defiFormat='h')",
)
def test_defiFormat_1(self):
entry = Entry("test1", "something")
self.assertEqual(entry.defiFormat, "m")
def test_defiFormat_2(self):
entry = Entry("test1", "something", defiFormat="h")
self.assertEqual(entry.defiFormat, "h")
def test_defiFormat_3(self):
entry = Entry("test1", "something", defiFormat="h")
entry.defiFormat = "x"
self.assertEqual(entry.defiFormat, "x")
def test_addAlt_1(self):
entry = Entry("test1", "something")
self.assertEqual(entry.l_word, ["test1"])
entry.addAlt("test 1")
self.assertEqual(entry.l_word, ["test1", "test 1"])
class TestEntryDetectDefiFormat(unittest.TestCase):
def test_1(self):
entry = Entry("test1", "something")
entry.detectDefiFormat()
self.assertEqual(entry.defiFormat, "m")
def test_2(self):
entry = Entry("test1", "something", defiFormat="h")
entry.detectDefiFormat()
self.assertEqual(entry.defiFormat, "h")
def test_3(self):
entry = Entry("test1", "something", defiFormat="x")
entry.detectDefiFormat()
self.assertEqual(entry.defiFormat, "x")
def test_4(self):
entry = Entry("test1", "<b>something</b>")
entry.detectDefiFormat()
self.assertEqual(entry.defiFormat, "h")
def test_5(self):
entry = Entry("test1", "<k>title</k>something")
entry.detectDefiFormat()
self.assertEqual(entry.defiFormat, "x")
class TestEntryStripFullHtml(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
def setUp(self):
pass
def tearDown(self):
pass
def case(
self,
word: str,
origDefi: str,
fixedDefi: str,
error: str | None = None,
):
entry = Entry(word, origDefi)
actualError = entry.stripFullHtml()
self.assertEqual(entry.defi, fixedDefi)
self.assertEqual(actualError, error)
def test_1(self):
self.case(
word="test1",
origDefi="plain text",
fixedDefi="plain text",
error=None,
)
def test_2(self):
self.case(
word="test2",
origDefi="<p>simple <i>html</i> text</p>",
fixedDefi="<p>simple <i>html</i> text</p>",
error=None,
)
def test_3(self):
self.case(
word="test3",
origDefi=(
"<!DOCTYPE html><html><head></head><body>simple "
"<i>html</i></body></html>"
),
fixedDefi="simple <i>html</i>",
error=None,
)
def test_4(self):
self.case(
word="test4",
origDefi="<html><head></head><body>simple <i>html</i></body></html>",
fixedDefi="simple <i>html</i>",
error=None,
)
def test_5(self):
self.case(
word="test5",
origDefi="<!DOCTYPE html><html><head></head>simple <i>html</i></html>",
fixedDefi="<!DOCTYPE html><html><head></head>simple <i>html</i></html>",
error="<body not found",
)
def test_6(self):
self.case(
word="test6",
origDefi="<html><head></head>no <body",
fixedDefi="<html><head></head>no <body",
error="'>' after <body not found",
)
def test_7(self):
self.case(
word="test7",
origDefi="<html><head></head><body>",
fixedDefi="<html><head></head><body>",
error="</body close not found",
)
if __name__ == "__main__":
unittest.main()
| 4,862
|
Python
|
.py
| 169
| 25.568047
| 75
| 0.675902
|
ilius/pyglossary
| 2,176
| 238
| 22
|
GPL-3.0
|
9/5/2024, 5:10:09 PM (Europe/Amsterdam)
|