source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
locusts.py
|
import io
import multiprocessing
import os
import sys
from httprunner.logger import color_print
from httprunner.testcase import load_test_file
from locust.main import main
def parse_locustfile(file_path):
""" parse testcase file and return locustfile path.
if file_path is a Python file, assume it is a locustfile
if file_path is a YAML/JSON file, convert it to locustfile
"""
if not os.path.isfile(file_path):
color_print("file path invalid, exit.", "RED")
sys.exit(1)
file_suffix = os.path.splitext(file_path)[1]
if file_suffix == ".py":
locustfile_path = file_path
elif file_suffix in ['.yaml', '.yml', '.json']:
locustfile_path = gen_locustfile(file_path)
else:
# '' or other suffix
color_print("file type should be YAML/JSON/Python, exit.", "RED")
sys.exit(1)
return locustfile_path
def gen_locustfile(testcase_file_path):
""" generate locustfile from template.
"""
locustfile_path = 'locustfile.py'
template_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"templates",
"locustfile_template"
)
testset = load_test_file(testcase_file_path)
host = testset.get("config", {}).get("request", {}).get("base_url", "")
with io.open(template_path, encoding='utf-8') as template:
with io.open(locustfile_path, 'w', encoding='utf-8') as locustfile:
template_content = template.read()
template_content = template_content.replace("$HOST", host)
template_content = template_content.replace("$TESTCASE_FILE", testcase_file_path)
locustfile.write(template_content)
return locustfile_path
def start_master(sys_argv):
sys_argv.append("--master")
sys.argv = sys_argv
main()
def start_slave(sys_argv):
if "--slave" not in sys_argv:
sys_argv.extend(["--slave"])
sys.argv = sys_argv
main()
def run_locusts_on_cpu_cores(sys_argv, cpu_cores_num_value):
processes = []
manager = multiprocessing.Manager()
for _ in range(cpu_cores_num_value):
p_slave = multiprocessing.Process(target=start_slave, args=(sys_argv,))
p_slave.daemon = True
p_slave.start()
processes.append(p_slave)
try:
if "--slave" in sys_argv:
[process.join() for process in processes]
else:
start_master(sys_argv)
except KeyboardInterrupt:
manager.shutdown()
|
test_user_secrets.py
|
import json
import os
import subprocess
import threading
import unittest
from http.server import BaseHTTPRequestHandler, HTTPServer
from test.support import EnvironmentVarGuard
from urllib.parse import urlparse
from datetime import datetime, timedelta
import mock
from google.auth.exceptions import DefaultCredentialsError
from google.cloud import bigquery
from kaggle_secrets import (GcpTarget, UserSecretsClient,
NotFoundError, ValidationError)
from kaggle_web_client import (_KAGGLE_URL_BASE_ENV_VAR_NAME,
_KAGGLE_USER_SECRETS_TOKEN_ENV_VAR_NAME,
CredentialError, BackendError)
_TEST_JWT = 'test-secrets-key'
class UserSecretsHTTPHandler(BaseHTTPRequestHandler):
def set_request(self):
raise NotImplementedError()
def get_response(self):
raise NotImplementedError()
def do_HEAD(s):
s.send_response(200)
def do_POST(s):
s.set_request()
s.send_response(200)
s.send_header("Content-type", "application/json")
s.end_headers()
s.wfile.write(json.dumps(s.get_response()).encode("utf-8"))
class TestUserSecrets(unittest.TestCase):
SERVER_ADDRESS = urlparse(os.getenv(_KAGGLE_URL_BASE_ENV_VAR_NAME, default="http://127.0.0.1:8001"))
def _test_client(self, client_func, expected_path, expected_body, secret=None, success=True):
_request = {}
class AccessTokenHandler(UserSecretsHTTPHandler):
def set_request(self):
_request['path'] = self.path
content_len = int(self.headers.get('Content-Length'))
_request['body'] = json.loads(self.rfile.read(content_len))
_request['headers'] = self.headers
def get_response(self):
if success:
return {'result': {'secret': secret, 'secretType': 'refreshToken', 'secretProvider': 'google', 'expiresInSeconds': 3600}, 'wasSuccessful': "true"}
else:
return {'wasSuccessful': "false", 'errors': ['No user secrets exist for kernel']}
env = EnvironmentVarGuard()
env.set(_KAGGLE_USER_SECRETS_TOKEN_ENV_VAR_NAME, _TEST_JWT)
with env:
with HTTPServer((self.SERVER_ADDRESS.hostname, self.SERVER_ADDRESS.port), AccessTokenHandler) as httpd:
threading.Thread(target=httpd.serve_forever).start()
try:
client_func()
finally:
httpd.shutdown()
path, headers, body = _request['path'], _request['headers'], _request['body']
self.assertEqual(
path,
expected_path,
msg="Fake server did not receive the right request from the UserSecrets client.")
self.assertEqual(
body,
expected_body,
msg="Fake server did not receive the right body from the UserSecrets client.")
def test_no_token_fails(self):
env = EnvironmentVarGuard()
env.unset(_KAGGLE_USER_SECRETS_TOKEN_ENV_VAR_NAME)
with env:
with self.assertRaises(CredentialError):
client = UserSecretsClient()
def test_get_secret_succeeds(self):
secret = '12345'
def call_get_secret():
client = UserSecretsClient()
secret_response = client.get_secret("secret_label")
self.assertEqual(secret_response, secret)
self._test_client(call_get_secret,
'/requests/GetUserSecretByLabelRequest', {'Label': "secret_label"},
secret=secret)
def test_get_secret_handles_unsuccessful(self):
def call_get_secret():
client = UserSecretsClient()
with self.assertRaises(BackendError):
secret_response = client.get_secret("secret_label")
self._test_client(call_get_secret,
'/requests/GetUserSecretByLabelRequest', {'Label': "secret_label"},
success=False)
def test_get_secret_validates_label(self):
env = EnvironmentVarGuard()
env.set(_KAGGLE_USER_SECRETS_TOKEN_ENV_VAR_NAME, _TEST_JWT)
with env:
client = UserSecretsClient()
with self.assertRaises(ValidationError):
secret_response = client.get_secret("")
def test_get_gcloud_secret_succeeds(self):
secret = '{"client_id":"gcloud","type":"authorized_user"}'
def call_get_secret():
client = UserSecretsClient()
secret_response = client.get_gcloud_credential()
self.assertEqual(secret_response, secret)
self._test_client(call_get_secret,
'/requests/GetUserSecretByLabelRequest', {'Label': "__gcloud_sdk_auth__"},
secret=secret)
def test_get_gcloud_secret_handles_unsuccessful(self):
def call_get_secret():
client = UserSecretsClient()
with self.assertRaises(NotFoundError):
secret_response = client.get_gcloud_credential()
self._test_client(call_get_secret,
'/requests/GetUserSecretByLabelRequest', {'Label': "__gcloud_sdk_auth__"},
success=False)
def test_set_gcloud_credentials_succeeds(self):
secret = '{"client_id":"gcloud","type":"authorized_user","refresh_token":"refresh_token"}'
project = 'foo'
account = 'bar'
def get_gcloud_config_value(field):
result = subprocess.run(['gcloud', 'config', 'get-value', field], capture_output=True)
result.check_returncode()
return result.stdout.strip().decode('ascii')
def test_fn():
client = UserSecretsClient()
client.set_gcloud_credentials(project=project, account=account)
self.assertEqual(project, os.environ['GOOGLE_CLOUD_PROJECT'])
self.assertEqual(project, get_gcloud_config_value('project'))
self.assertEqual(account, os.environ['GOOGLE_ACCOUNT'])
self.assertEqual(account, get_gcloud_config_value('account'))
expected_creds_file = '/tmp/gcloud_credential.json'
self.assertEqual(expected_creds_file, os.environ['GOOGLE_APPLICATION_CREDENTIALS'])
self.assertEqual(expected_creds_file, get_gcloud_config_value('auth/credential_file_override'))
with open(expected_creds_file, 'r') as f:
self.assertEqual(secret, '\n'.join(f.readlines()))
self._test_client(test_fn, '/requests/GetUserSecretByLabelRequest', {'Label': "__gcloud_sdk_auth__"}, secret=secret)
@mock.patch('kaggle_secrets.datetime')
def test_get_access_token_succeeds(self, mock_dt):
secret = '12345'
now = datetime(1993, 4, 24)
mock_dt.utcnow = mock.Mock(return_value=now)
def call_get_bigquery_access_token():
client = UserSecretsClient()
secret_response = client.get_bigquery_access_token()
self.assertEqual(secret_response, (secret, now + timedelta(seconds=3600)))
def call_get_gcs_access_token():
client = UserSecretsClient()
secret_response = client._get_gcs_access_token()
self.assertEqual(secret_response, (secret, now + timedelta(seconds=3600)))
def call_get_cloudai_access_token():
client = UserSecretsClient()
secret_response = client._get_cloudai_access_token()
self.assertEqual(secret_response, (secret, now + timedelta(seconds=3600)))
def call_get_translation_access_token():
client = UserSecretsClient()
secret_response = client._get_translation_access_token()
self.assertEqual(secret_response, (secret, now + timedelta(seconds=3600)))
def call_get_natural_lang_access_token():
client = UserSecretsClient()
secret_response = client._get_natural_language_access_token()
self.assertEqual(secret_response, (secret, now + timedelta(seconds=3600)))
def call_get_video_intell_access_token():
client = UserSecretsClient()
secret_response = client._get_video_intelligence_access_token()
self.assertEqual(secret_response, (secret, now + timedelta(seconds=3600)))
def call_get_vision_access_token():
client = UserSecretsClient()
secret_response = client._get_vision_access_token()
self.assertEqual(secret_response, (secret, now + timedelta(seconds=3600)))
self._test_client(call_get_bigquery_access_token,
'/requests/GetUserSecretRequest', {'Target': GcpTarget.BIGQUERY.target},
secret=secret)
self._test_client(call_get_gcs_access_token,
'/requests/GetUserSecretRequest', {'Target': GcpTarget.GCS.target},
secret=secret)
self._test_client(call_get_cloudai_access_token,
'/requests/GetUserSecretRequest', {'Target': GcpTarget.CLOUDAI.target},
secret=secret)
def test_get_access_token_handles_unsuccessful(self):
def call_get_access_token():
client = UserSecretsClient()
with self.assertRaises(BackendError):
client.get_bigquery_access_token()
self._test_client(call_get_access_token,
'/requests/GetUserSecretRequest', {'Target': GcpTarget.BIGQUERY.target}, success=False)
|
test_basic.py
|
import pytest
import time
import threading
WINDOWS_DATA = {
"signal": None,
"stacktraces": [
{
"registers": {"eip": "0x0000000001509530"},
"frames": [{"instruction_addr": "0x749e8630"}],
}
],
"modules": [
{
"type": "pe",
"debug_id": "ff9f9f78-41db-88f0-cded-a9e1e9bff3b5-1",
"code_file": "C:\\Windows\\System32\\kernel32.dll",
"debug_file": "C:\\Windows\\System32\\wkernel32.pdb",
"image_addr": "0x749d0000",
"image_size": 851_968,
}
],
}
SUCCESS_WINDOWS = {
"stacktraces": [
{
"registers": {"eip": "0x1509530"},
"frames": [
{
"status": "symbolicated",
"original_index": 0,
"instruction_addr": "0x749e8630",
"lineno": 0,
"package": "C:\\Windows\\System32\\kernel32.dll",
"function": "@BaseThreadInitThunk@12",
"symbol": "@BaseThreadInitThunk@12",
"sym_addr": "0x749e8630",
}
],
}
],
"modules": [
{
"type": "pe",
"debug_id": "ff9f9f78-41db-88f0-cded-a9e1e9bff3b5-1",
"code_file": "C:\\Windows\\System32\\kernel32.dll",
"debug_file": "C:\\Windows\\System32\\wkernel32.pdb",
"debug_status": "found",
"features": {
"has_debug_info": True,
"has_sources": False,
"has_symbols": True,
"has_unwind_info": True,
},
"arch": "x86",
"image_addr": "0x749d0000",
"image_size": 851_968,
}
],
"status": "completed",
}
def _make_unsuccessful_result(status):
return {
"stacktraces": [
{
"registers": {"eip": "0x1509530"},
"frames": [
{
"status": status,
"original_index": 0,
"package": "C:\\Windows\\System32\\kernel32.dll",
"instruction_addr": "0x749e8630",
}
],
}
],
"modules": [
{
"type": "pe",
"debug_id": "ff9f9f78-41db-88f0-cded-a9e1e9bff3b5-1",
"code_file": "C:\\Windows\\System32\\kernel32.dll",
"debug_file": "C:\\Windows\\System32\\wkernel32.pdb",
"debug_status": status,
"features": {
"has_debug_info": False,
"has_sources": False,
"has_symbols": False,
"has_unwind_info": False,
},
"arch": "unknown",
"image_addr": "0x749d0000",
"image_size": 851_968,
}
],
"status": "completed",
}
MISSING_FILE = _make_unsuccessful_result("missing")
MALFORMED_FILE = _make_unsuccessful_result("malformed")
@pytest.fixture(params=[True, False])
def cache_dir_param(tmpdir, request):
if request.param:
return tmpdir.mkdir("caches")
@pytest.mark.parametrize(
"is_public", [True, False], ids=["global_cache", "local_cache"]
)
def test_basic_windows(symbolicator, cache_dir_param, is_public, hitcounter):
scope = "myscope"
input = dict(
**WINDOWS_DATA,
sources=[
{
"type": "http",
"id": "microsoft",
"layout": {"type": "symstore"},
"filters": {"filetypes": ["pdb", "pe"]},
"url": f"{hitcounter.url}/msdl/",
"is_public": is_public,
}
],
)
# i = 0: Cache miss
# i = 1: Cache hit
# i = 2: Assert that touching the file during cache hit did not destroy the cache
for i in range(3):
service = symbolicator(cache_dir=cache_dir_param)
service.wait_healthcheck()
response = service.post(f"/symbolicate?scope={scope}", json=input)
response.raise_for_status()
assert response.json() == SUCCESS_WINDOWS
if cache_dir_param:
stored_in_scope = "global" if is_public else scope
assert {
o.basename: o.size()
for o in cache_dir_param.join("objects").join(stored_in_scope).listdir()
} == {
"microsoft_wkernel32_pdb_FF9F9F7841DB88F0CDEDA9E1E9BFF3B51_wkernel32_pd_": 0,
"microsoft_wkernel32_pdb_FF9F9F7841DB88F0CDEDA9E1E9BFF3B51_wkernel32_pdb": 846_848,
}
(symcache,) = (
cache_dir_param.join("symcaches").join(stored_in_scope).listdir()
)
assert (
symcache.basename
== "microsoft_wkernel32_pdb_FF9F9F7841DB88F0CDEDA9E1E9BFF3B51_wkernel32_pdb"
)
assert symcache.size() > 0
if cache_dir_param:
hit_count = miss_count = 1
else:
miss_count = i + 1
# XXX(markus): Symbolicator opens a cachefile twice if it maps
# successfully. With caches this doesn't matter, but without caches
# enabled Symbolicator effectively downloads every item twice
hit_count = 2 * (i + 1)
assert hitcounter.hits == {
"/msdl/wkernel32.pdb/FF9F9F7841DB88F0CDEDA9E1E9BFF3B51/wkernel32.pd_": miss_count,
"/msdl/wkernel32.pdb/FF9F9F7841DB88F0CDEDA9E1E9BFF3B51/wkernel32.pdb": hit_count,
}
def test_no_sources(symbolicator, cache_dir_param):
input = dict(**WINDOWS_DATA, sources=[])
service = symbolicator(cache_dir=cache_dir_param)
service.wait_healthcheck()
response = service.post("/symbolicate", json=input)
response.raise_for_status()
assert response.json() == MISSING_FILE
if cache_dir_param:
assert not cache_dir_param.join("objects/global").exists()
assert not cache_dir_param.join("symcaches/global").exists()
@pytest.mark.parametrize("is_public", [True, False])
def test_lookup_deduplication(symbolicator, hitcounter, is_public):
input = dict(
**WINDOWS_DATA,
sources=[
{
"type": "http",
"id": "microsoft",
"filters": {"filetypes": ["pdb", "pe"]},
"layout": {"type": "symstore"},
"url": f"{hitcounter.url}/msdl/",
"is_public": is_public,
}
],
)
service = symbolicator(cache_dir=None)
service.wait_healthcheck()
responses = []
def f():
response = service.post("/symbolicate", json=input)
response.raise_for_status()
responses.append(response.json())
ts = []
for _ in range(20):
t = threading.Thread(target=f)
t.start()
ts.append(t)
for t in ts:
t.join()
assert responses == [SUCCESS_WINDOWS] * 20
assert set(hitcounter.hits) == {
"/msdl/wkernel32.pdb/FF9F9F7841DB88F0CDEDA9E1E9BFF3B51/wkernel32.pd_",
"/msdl/wkernel32.pdb/FF9F9F7841DB88F0CDEDA9E1E9BFF3B51/wkernel32.pdb",
}
for key, count in hitcounter.hits.items():
assert count < 20, (key, count)
def test_sources_filetypes(symbolicator, hitcounter):
input = dict(
sources=[
{
"type": "http",
"id": "microsoft",
"filters": {"filetypes": ["elf_code"]},
"layout": {"type": "symstore"},
"url": f"{hitcounter.url}/msdl/",
}
],
**WINDOWS_DATA,
)
service = symbolicator()
service.wait_healthcheck()
response = service.post("/symbolicate", json=input)
response.raise_for_status()
assert response.json() == MISSING_FILE
assert not hitcounter.hits
def test_timeouts(symbolicator, hitcounter):
hitcounter.before_request = lambda: time.sleep(3)
request_id = None
responses = []
service = symbolicator()
service.wait_healthcheck()
for _ in range(10):
if request_id:
response = service.get("/requests/{}?timeout=1".format(request_id))
else:
input = dict(
sources=[
{
"type": "http",
"id": "microsoft",
"filters": {"filetypes": ["pdb", "pe"]},
"layout": {"type": "symstore"},
"url": f"{hitcounter.url}/msdl/",
}
],
**WINDOWS_DATA,
)
response = service.post("/symbolicate?timeout=1", json=input)
response.raise_for_status()
response = response.json()
responses.append(response)
if response["status"] == "completed":
break
elif response["status"] == "pending":
request_id = response["request_id"]
else:
assert False
for response in responses[:-1]:
assert response["status"] == "pending"
assert response["request_id"] == request_id
assert responses[-1] == SUCCESS_WINDOWS
assert len(responses) > 1
assert hitcounter.hits == {
"/msdl/wkernel32.pdb/FF9F9F7841DB88F0CDEDA9E1E9BFF3B51/wkernel32.pd_": 1,
# XXX(markus): Symbolicator opens a cachefile twice if it maps
# successfully. With caches this doesn't matter, but without caches
# enabled Symbolicator effectively downloads every item twice
"/msdl/wkernel32.pdb/FF9F9F7841DB88F0CDEDA9E1E9BFF3B51/wkernel32.pdb": 2,
}
@pytest.mark.parametrize("bucket_type", ["http", "sentry"])
@pytest.mark.parametrize("statuscode", [400, 500, 404])
def test_unreachable_bucket(symbolicator, hitcounter, statuscode, bucket_type):
input = dict(
sources=[
{
"type": bucket_type,
"id": "broken",
"layout": {"type": "symstore"}, # only relevant for http type
"url": f"{hitcounter.url}/respond_statuscode/{statuscode}/",
"token": "123abc", # only relevant for sentry type
}
],
**WINDOWS_DATA,
)
service = symbolicator()
service.wait_healthcheck()
response = service.post("/symbolicate", json=input)
response.raise_for_status()
response = response.json()
# TODO(markus): Better error reporting
assert response == MISSING_FILE
def test_malformed_objects(symbolicator, hitcounter):
input = dict(
sources=[
{
"type": "http",
"id": "broken",
"layout": {"type": "symstore"},
"url": f"{hitcounter.url}/garbage_data/",
}
],
**WINDOWS_DATA,
)
service = symbolicator()
service.wait_healthcheck()
response = service.post("/symbolicate", json=input)
response.raise_for_status()
response = response.json()
assert response == MALFORMED_FILE
@pytest.mark.parametrize(
"patterns,output",
[
[["?:/windows/**"], SUCCESS_WINDOWS],
[["?:/windows/*"], SUCCESS_WINDOWS],
[[], SUCCESS_WINDOWS],
[["?:/windows/"], MISSING_FILE],
[["d:/windows/**"], MISSING_FILE],
],
)
def test_path_patterns(symbolicator, hitcounter, patterns, output):
input = dict(
sources=[
{
"type": "http",
"id": "microsoft",
"layout": {"type": "symstore"},
"filters": {"path_patterns": patterns},
"url": f"{hitcounter.url}/msdl/",
}
],
**WINDOWS_DATA,
)
service = symbolicator()
service.wait_healthcheck()
response = service.post("/symbolicate", json=input)
response.raise_for_status()
assert response.json() == output
def test_redirects(symbolicator, hitcounter):
input = dict(
sources=[
{
"type": "http",
"id": "microsoft",
"layout": {"type": "symstore"},
"url": f"{hitcounter.url}/redirect/msdl/",
}
],
**WINDOWS_DATA,
)
service = symbolicator()
service.wait_healthcheck()
response = service.post("/symbolicate", json=input)
response.raise_for_status()
assert response.json() == SUCCESS_WINDOWS
@pytest.mark.parametrize("value", [True, False])
@pytest.mark.parametrize("hostname", ["dev.getsentry.net", "localhost", "127.0.0.1"])
def test_reserved_ip_addresses(symbolicator, hitcounter, value, hostname):
service = symbolicator(connect_to_reserved_ips=value)
service.wait_healthcheck()
url = hitcounter.url.replace("localhost", hostname).replace("127.0.0.1", hostname)
assert hostname in url
input = dict(
sources=[
{
"type": "http",
"id": "microsoft",
"layout": {"type": "symstore"},
"url": f"{url}/msdl/",
}
],
**WINDOWS_DATA,
)
response = service.post("/symbolicate", json=input)
response.raise_for_status()
if value:
assert response.json() == SUCCESS_WINDOWS
else:
assert response.json() == MISSING_FILE
|
controller.py
|
import re
import re
import shutil
import time
import traceback
from subprocess import Popen, STDOUT
from threading import Thread
from typing import List, Set, Type, Tuple, Dict
from waffles.api.abstract.controller import SoftwareManager, SearchResult, ApplicationContext, UpgradeRequirements, \
UpgradeRequirement, TransactionResult, SoftwareAction
from waffles.api.abstract.disk import DiskCacheLoader
from waffles.api.abstract.handler import ProcessWatcher, TaskManager
from waffles.api.abstract.model import SoftwarePackage, PackageUpdate, PackageHistory, PackageSuggestion, \
CustomSoftwareAction
from waffles.api.abstract.view import ViewComponent, TabGroupComponent, MessageType
from waffles.api.exception import NoInternetException
from waffles.commons.boot import CreateConfigFile
from waffles.commons.html import bold
from waffles.view.core.config import CoreConfigManager
from waffles.view.core.settings import GenericSettingsManager
from waffles.view.core.update import check_for_update
from waffles.view.util import resource
from waffles.view.util.resource import get_path
from waffles.view.util.util import clean_app_files, restart_app
RE_IS_URL = re.compile(r'^https?://.+')
class GenericUpgradeRequirements(UpgradeRequirements):
def __init__(self, to_install: List[UpgradeRequirement], to_remove: List[UpgradeRequirement],
to_upgrade: List[UpgradeRequirement], cannot_upgrade: List[SoftwarePackage],
sub_requirements: Dict[SoftwareManager, UpgradeRequirements]):
super(GenericUpgradeRequirements, self).__init__(to_install=to_install, to_upgrade=to_upgrade,
to_remove=to_remove, cannot_upgrade=cannot_upgrade)
self.sub_requirements = sub_requirements
class GenericSoftwareManager(SoftwareManager):
def __init__(self, managers: List[SoftwareManager], context: ApplicationContext, config: dict,
settings_manager: GenericSettingsManager = None):
super(GenericSoftwareManager, self).__init__(context=context)
self.managers = managers
self.map = {t: m for m in self.managers for t in m.get_managed_types()}
self._available_cache = {} if config['system']['single_dependency_checking'] else None
self.thread_prepare = None
self.i18n = context.i18n
self.disk_loader_factory = context.disk_loader_factory
self.logger = context.logger
self._already_prepared = []
self.working_managers = []
self.config = config
self.settings_manager = settings_manager
self.http_client = context.http_client
self.configman = CoreConfigManager()
self.extra_actions = [CustomSoftwareAction(i18n_label_key='action.reset',
i18n_status_key='action.reset.status',
manager_method='reset',
manager=self,
icon_path=resource.get_path('img/logo.svg'),
requires_root=False,
refresh=False)]
self.dynamic_extra_actions = {CustomSoftwareAction(i18n_label_key='action.backups',
i18n_status_key='action.backups.status',
manager_method='launch_timeshift',
manager=self,
icon_path='timeshift',
requires_root=False,
refresh=False): self.is_backups_action_available}
def _is_timeshift_launcher_available(self) -> bool:
return bool(shutil.which('timeshift-launcher'))
def is_backups_action_available(self, app_config: dict) -> bool:
return bool(app_config['backup']['enabled']) and self._is_timeshift_launcher_available()
def reset_cache(self):
if self._available_cache is not None:
self._available_cache = {}
self.working_managers.clear()
def launch_timeshift(self, root_password: str, watcher: ProcessWatcher):
if self._is_timeshift_launcher_available():
try:
Popen(['timeshift-launcher'], stderr=STDOUT)
return True
except:
traceback.print_exc()
watcher.show_message(title=self.i18n["error"].capitalize(),
body=self.i18n['action.backups.tool_error'].format(bold('Timeshift')),
type_=MessageType.ERROR)
return False
else:
watcher.show_message(title=self.i18n["error"].capitalize(),
body=self.i18n['action.backups.tool_error'].format(bold('Timeshift')),
type_=MessageType.ERROR)
return False
def _sort(self, apps: List[SoftwarePackage], word: str) -> List[SoftwarePackage]:
exact_name_matches, contains_name_matches, others = [], [], []
for app in apps:
lower_name = app.name.lower()
if word == lower_name:
exact_name_matches.append(app)
elif word in lower_name:
contains_name_matches.append(app)
else:
others.append(app)
res = []
for app_list in (exact_name_matches, contains_name_matches, others):
app_list.sort(key=lambda a: a.name.lower())
res.extend(app_list)
return res
def _can_work(self, man: SoftwareManager):
if self._available_cache is not None:
available = False
for t in man.get_managed_types():
available = self._available_cache.get(t)
if available is None:
available = man.is_enabled() and man.can_work()
self._available_cache[t] = available
if available:
available = True
else:
available = man.is_enabled() and man.can_work()
if available:
if man not in self.working_managers:
self.working_managers.append(man)
else:
if man in self.working_managers:
self.working_managers.remove(man)
return available
def _search(self, word: str, is_url: bool, man: SoftwareManager, disk_loader, res: SearchResult):
if self._can_work(man):
mti = time.time()
apps_found = man.search(words=word, disk_loader=disk_loader, is_url=is_url)
mtf = time.time()
self.logger.info(man.__class__.__name__ + " took {0:.8f} seconds".format(mtf - mti))
res.installed.extend(apps_found.installed)
res.new.extend(apps_found.new)
def search(self, words: str, disk_loader: DiskCacheLoader = None, limit: int = -1, is_url: bool = False) -> SearchResult:
ti = time.time()
self._wait_to_be_ready()
res = SearchResult.empty()
if self.context.is_internet_available():
norm_word = words.strip().lower()
url_words = RE_IS_URL.match(norm_word)
disk_loader = self.disk_loader_factory.new()
disk_loader.start()
threads = []
for man in self.managers:
t = Thread(target=self._search, args=(norm_word, url_words, man, disk_loader, res))
t.start()
threads.append(t)
for t in threads:
t.join()
if disk_loader:
disk_loader.stop_working()
disk_loader.join()
res.installed = self._sort(res.installed, norm_word)
res.new = self._sort(res.new, norm_word)
else:
raise NoInternetException()
res.update_total()
tf = time.time()
self.logger.info('Took {0:.8f} seconds'.format(tf - ti))
return res
def _wait_to_be_ready(self):
if self.thread_prepare:
self.thread_prepare.join()
self.thread_prepare = None
def set_enabled(self, enabled: bool):
pass
def can_work(self) -> bool:
return True
def _get_package_lower_name(self, pkg: SoftwarePackage):
return pkg.name.lower()
def read_installed(self, disk_loader: DiskCacheLoader = None, limit: int = -1, only_apps: bool = False, pkg_types: Set[Type[SoftwarePackage]] = None, internet_available: bool = None) -> SearchResult:
ti = time.time()
self._wait_to_be_ready()
res = SearchResult([], None, 0)
disk_loader = None
net_available = self.context.is_internet_available()
if not pkg_types: # any type
for man in self.managers:
if self._can_work(man):
if not disk_loader:
disk_loader = self.disk_loader_factory.new()
disk_loader.start()
mti = time.time()
man_res = man.read_installed(disk_loader=disk_loader, pkg_types=None, internet_available=net_available)
mtf = time.time()
self.logger.info(man.__class__.__name__ + " took {0:.2f} seconds".format(mtf - mti))
res.installed.extend(man_res.installed)
res.total += man_res.total
else:
man_already_used = []
for t in pkg_types:
man = self.map.get(t)
if man and (man not in man_already_used) and self._can_work(man):
if not disk_loader:
disk_loader = self.disk_loader_factory.new()
disk_loader.start()
mti = time.time()
man_res = man.read_installed(disk_loader=disk_loader, pkg_types=None, internet_available=net_available)
mtf = time.time()
self.logger.info(man.__class__.__name__ + " took {0:.2f} seconds".format(mtf - mti))
res.installed.extend(man_res.installed)
res.total += man_res.total
if disk_loader:
disk_loader.stop_working()
disk_loader.join()
if res.installed:
for p in res.installed:
if p.is_update_ignored():
if p.categories is None:
p.categories = ['updates_ignored']
elif 'updates_ignored' not in p.categories:
p.categories.append('updates_ignored')
res.installed.sort(key=self._get_package_lower_name)
tf = time.time()
self.logger.info('Took {0:.2f} seconds'.format(tf - ti))
return res
def downgrade(self, app: SoftwarePackage, root_password: str, handler: ProcessWatcher) -> bool:
man = self._get_manager_for(app)
if man and app.can_be_downgraded():
mti = time.time()
res = man.downgrade(app, root_password, handler)
mtf = time.time()
self.logger.info('Took {0:.2f} seconds'.format(mtf - mti))
return res
else:
raise Exception("downgrade is not possible for {}".format(app.__class__.__name__))
def clean_cache_for(self, app: SoftwarePackage):
man = self._get_manager_for(app)
if man:
return man.clean_cache_for(app)
def upgrade(self, requirements: GenericUpgradeRequirements, root_password: str, handler: ProcessWatcher) -> bool:
for man, man_reqs in requirements.sub_requirements.items():
res = man.upgrade(man_reqs, root_password, handler)
if not res:
return False
return True
def _fill_post_transaction_status(self, pkg: SoftwarePackage, installed: bool):
pkg.installed = installed
pkg.update = False
if pkg.latest_version:
pkg.version = pkg.latest_version
def _update_post_transaction_status(self, res: TransactionResult):
if res.success:
if res.installed:
for p in res.installed:
self._fill_post_transaction_status(p, True)
if res.removed:
for p in res.removed:
self._fill_post_transaction_status(p, False)
def uninstall(self, pkg: SoftwarePackage, root_password: str, handler: ProcessWatcher, disk_loader: DiskCacheLoader = None) -> TransactionResult:
man = self._get_manager_for(pkg)
if man:
ti = time.time()
disk_loader = self.disk_loader_factory.new()
disk_loader.start()
self.logger.info("Uninstalling {}".format(pkg.name))
try:
res = man.uninstall(pkg, root_password, handler, disk_loader)
disk_loader.stop_working()
disk_loader.join()
self._update_post_transaction_status(res)
return res
except:
traceback.print_exc()
return TransactionResult(success=False, installed=[], removed=[])
finally:
tf = time.time()
self.logger.info('Uninstallation of {}'.format(pkg) + 'took {0:.2f} minutes'.format((tf - ti) / 60))
def install(self, app: SoftwarePackage, root_password: str, disk_loader: DiskCacheLoader, handler: ProcessWatcher) -> TransactionResult:
man = self._get_manager_for(app)
if man:
ti = time.time()
disk_loader = self.disk_loader_factory.new()
disk_loader.start()
try:
self.logger.info('Installing {}'.format(app))
res = man.install(app, root_password, disk_loader, handler)
disk_loader.stop_working()
disk_loader.join()
self._update_post_transaction_status(res)
return res
except:
traceback.print_exc()
return TransactionResult(success=False, installed=[], removed=[])
finally:
tf = time.time()
self.logger.info('Installation of {}'.format(app) + 'took {0:.2f} minutes'.format((tf - ti)/60))
def get_info(self, app: SoftwarePackage):
man = self._get_manager_for(app)
if man:
return man.get_info(app)
def get_history(self, app: SoftwarePackage) -> PackageHistory:
man = self._get_manager_for(app)
if man:
mti = time.time()
history = man.get_history(app)
mtf = time.time()
self.logger.info(man.__class__.__name__ + " took {0:.2f} seconds".format(mtf - mti))
return history
def get_managed_types(self) -> Set[Type[SoftwarePackage]]:
available_types = set()
for man in self.get_working_managers():
available_types.update(man.get_managed_types())
return available_types
def is_enabled(self):
return True
def _get_manager_for(self, app: SoftwarePackage) -> SoftwareManager:
man = self.map[app.__class__]
return man if man and self._can_work(man) else None
def cache_to_disk(self, pkg: SoftwarePackage, icon_bytes: bytes, only_icon: bool):
if pkg.supports_disk_cache():
man = self._get_manager_for(pkg)
if man:
return man.cache_to_disk(pkg, icon_bytes=icon_bytes, only_icon=only_icon)
def requires_root(self, action: SoftwareAction, app: SoftwarePackage) -> bool:
if app is None:
if self.managers:
for man in self.managers:
if self._can_work(man):
if man.requires_root(action, app):
return True
return False
else:
man = self._get_manager_for(app)
if man:
return man.requires_root(action, app)
def prepare(self, task_manager: TaskManager, root_password: str, internet_available: bool):
ti = time.time()
self.logger.info("Initializing")
taskman = task_manager if task_manager else TaskManager() # empty task manager to prevent null pointers
create_config = CreateConfigFile(taskman=taskman, configman=self.configman, i18n=self.i18n,
task_icon_path=get_path('img/logo.svg'), logger=self.logger)
create_config.start()
if self.managers:
internet_on = self.context.is_internet_available()
prepare_tasks = []
for man in self.managers:
if man not in self._already_prepared and self._can_work(man):
t = Thread(target=man.prepare, args=(taskman, root_password, internet_on), daemon=True)
t.start()
prepare_tasks.append(t)
self._already_prepared.append(man)
for t in prepare_tasks:
t.join()
tf = time.time()
self.logger.info("Finished. Took {0:.2f} seconds".format(tf - ti))
def cache_available_managers(self):
if self.managers:
for man in self.managers:
self._can_work(man)
def list_updates(self, internet_available: bool = None) -> List[PackageUpdate]:
self._wait_to_be_ready()
updates = []
if self.managers:
net_available = self.context.is_internet_available()
for man in self.managers:
if self._can_work(man):
man_updates = man.list_updates(internet_available=net_available)
if man_updates:
updates.extend(man_updates)
return updates
def list_warnings(self, internet_available: bool = None) -> List[str]:
warnings = []
int_available = self.context.is_internet_available()
if int_available:
updates_msg = check_for_update(self.logger, self.http_client, self.i18n)
if updates_msg:
warnings.append(updates_msg)
if self.managers:
for man in self.managers:
if man.is_enabled():
man_warnings = man.list_warnings(internet_available=int_available)
if man_warnings:
if warnings is None:
warnings = []
warnings.extend(man_warnings)
return warnings
def _fill_suggestions(self, suggestions: list, man: SoftwareManager, limit: int, filter_installed: bool):
if self._can_work(man):
mti = time.time()
man_sugs = man.list_suggestions(limit=limit, filter_installed=filter_installed)
mtf = time.time()
self.logger.info(man.__class__.__name__ + ' took {0:.5f} seconds'.format(mtf - mti))
if man_sugs:
if 0 < limit < len(man_sugs):
man_sugs = man_sugs[0:limit]
suggestions.extend(man_sugs)
def list_suggestions(self, limit: int, filter_installed: bool) -> List[PackageSuggestion]:
if bool(self.config['suggestions']['enabled']):
if self.managers and self.context.is_internet_available():
suggestions, threads = [], []
for man in self.managers:
t = Thread(target=self._fill_suggestions, args=(suggestions, man, int(self.config['suggestions']['by_type']), filter_installed))
t.start()
threads.append(t)
for t in threads:
t.join()
if suggestions:
suggestions.sort(key=lambda s: s.priority.value, reverse=True)
return suggestions
return []
def execute_custom_action(self, action: CustomSoftwareAction, pkg: SoftwarePackage, root_password: str, watcher: ProcessWatcher):
if action.requires_internet and not self.context.is_internet_available():
raise NoInternetException()
man = action.manager if action.manager else self._get_manager_for(pkg)
if man:
return eval('man.{}({}root_password=root_password, watcher=watcher)'.format(action.manager_method, 'pkg=pkg, ' if pkg else ''))
def is_default_enabled(self) -> bool:
return True
def launch(self, pkg: SoftwarePackage):
self._wait_to_be_ready()
man = self._get_manager_for(pkg)
if man:
self.logger.info('Launching {}'.format(pkg))
man.launch(pkg)
def get_screenshots(self, pkg: SoftwarePackage):
man = self._get_manager_for(pkg)
if man:
return man.get_screenshots(pkg)
def get_working_managers(self):
return [m for m in self.managers if self._can_work(m)]
def get_settings(self, screen_width: int, screen_height: int) -> ViewComponent:
if self.settings_manager is None:
self.settings_manager = GenericSettingsManager(managers=self.managers,
working_managers=self.working_managers,
logger=self.logger,
i18n=self.i18n,
file_downloader=self.context.file_downloader,
configman=self.configman)
else:
self.settings_manager.managers = self.managers
self.settings_manager.working_managers = self.working_managers
return self.settings_manager.get_settings(screen_width=screen_width, screen_height=screen_height)
def save_settings(self, component: TabGroupComponent) -> Tuple[bool, List[str]]:
return self.settings_manager.save_settings(component)
def _map_pkgs_by_manager(self, pkgs: List[SoftwarePackage], pkg_filters: list = None) -> Dict[SoftwareManager, List[SoftwarePackage]]:
by_manager = {}
for pkg in pkgs:
if pkg_filters and not all((1 for f in pkg_filters if f(pkg))):
continue
man = self._get_manager_for(pkg)
if man:
man_pkgs = by_manager.get(man)
if man_pkgs is None:
man_pkgs = []
by_manager[man] = man_pkgs
man_pkgs.append(pkg)
return by_manager
def get_upgrade_requirements(self, pkgs: List[SoftwarePackage], root_password: str, watcher: ProcessWatcher) -> UpgradeRequirements:
by_manager = self._map_pkgs_by_manager(pkgs)
res = GenericUpgradeRequirements([], [], [], [], {})
if by_manager:
for man, pkgs in by_manager.items():
ti = time.time()
man_reqs = man.get_upgrade_requirements(pkgs, root_password, watcher)
tf = time.time()
self.logger.info(man.__class__.__name__ + " took {0:.2f} seconds".format(tf - ti))
if not man_reqs:
return # it means the process should be stopped
if man_reqs:
res.sub_requirements[man] = man_reqs
if man_reqs.to_install:
res.to_install.extend(man_reqs.to_install)
if man_reqs.to_remove:
res.to_remove.extend(man_reqs.to_remove)
if man_reqs.to_upgrade:
res.to_upgrade.extend(man_reqs.to_upgrade)
if man_reqs.cannot_upgrade:
res.cannot_upgrade.extend(man_reqs.cannot_upgrade)
return res
def reset(self, root_password: str, watcher: ProcessWatcher) -> bool:
body = '<p>{}</p><p>{}</p>'.format(self.i18n['action.reset.body_1'].format(bold(self.context.app_name)),
self.i18n['action.reset.body_2'])
if watcher.request_confirmation(title=self.i18n['action.reset'],
body=body,
confirmation_label=self.i18n['proceed'].capitalize(),
deny_label=self.i18n['cancel'].capitalize()):
try:
clean_app_files(managers=self.managers, logs=False)
restart_app()
except:
return False
return True
def get_custom_actions(self) -> List[CustomSoftwareAction]:
actions = []
if self.managers:
working_managers = []
for man in self.managers:
if self._can_work(man):
working_managers.append(man)
if working_managers:
working_managers.sort(key=lambda m: m.__class__.__name__)
for man in working_managers:
man_actions = man.get_custom_actions()
if man_actions:
actions.extend(man_actions)
app_config = self.configman.get_config()
for action, available in self.dynamic_extra_actions.items():
if available(app_config):
actions.append(action)
actions.extend(self.extra_actions)
return actions
def _fill_sizes(self, man: SoftwareManager, pkgs: List[SoftwarePackage]):
ti = time.time()
man.fill_sizes(pkgs)
tf = time.time()
self.logger.info(man.__class__.__name__ + " took {0:.2f} seconds".format(tf - ti))
def fill_sizes(self, pkgs: List[SoftwarePackage]):
by_manager = self._map_pkgs_by_manager(pkgs, pkg_filters=[lambda p: p.size is None])
if by_manager:
threads = []
for man, man_pkgs in by_manager.items():
if man_pkgs:
t = Thread(target=self._fill_sizes, args=(man, man_pkgs), daemon=True)
t.start()
threads.append(t)
for t in threads:
t.join()
def ignore_update(self, pkg: SoftwarePackage):
manager = self._get_manager_for(pkg)
if manager:
manager.ignore_update(pkg)
if pkg.is_update_ignored():
if pkg.categories is None:
pkg.categories = ['updates_ignored']
elif 'updates_ignored' not in pkg.categories:
pkg.categories.append('updates_ignored')
def revert_ignored_update(self, pkg: SoftwarePackage):
manager = self._get_manager_for(pkg)
if manager:
manager.revert_ignored_update(pkg)
if not pkg.is_update_ignored() and pkg.categories and 'updates_ignored' in pkg.categories:
pkg.categories.remove('updates_ignored')
|
sphinx_.py
|
"""Interface with Sphinx."""
import datetime
import logging
import multiprocessing
import os
import sys
from shutil import copyfile, rmtree
from sphinx import application, locale
from sphinx.cmd.build import build_main, make_main
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.config import Config as SphinxConfig
from sphinx.errors import SphinxError
from sphinx.jinja2glue import SphinxFileSystemLoader
from sphinx.util.i18n import format_date
from sphinxcontrib.versioning import __version__
from sphinxcontrib.versioning.lib import Config, HandledError, TempDir
from sphinxcontrib.versioning.versions import Versions
SC_VERSIONING_VERSIONS = list() # Updated after forking.
STATIC_DIR = os.path.join(os.path.dirname(__file__), '_static')
class EventHandlers(object):
"""Hold Sphinx event handlers as static or class methods.
:ivar multiprocessing.queues.Queue ABORT_AFTER_READ: Communication channel to parent process.
:ivar bool BANNER_GREATEST_TAG: Banner URLs point to greatest/highest (semver) tag.
:ivar str BANNER_MAIN_VERSION: Banner URLs point to this remote name (from Versions.__getitem__()).
:ivar bool BANNER_RECENT_TAG: Banner URLs point to most recently committed tag.
:ivar str CURRENT_VERSION: Current version being built.
:ivar bool IS_ROOT: Value for context['scv_is_root'].
:ivar bool SHOW_BANNER: Display the banner.
:ivar sphinxcontrib.versioning.versions.Versions VERSIONS: Versions class instance.
"""
ABORT_AFTER_READ = None
BANNER_GREATEST_TAG = False
BANNER_MAIN_VERSION = None
BANNER_RECENT_TAG = False
CURRENT_VERSION = None
IS_ROOT = False
SHOW_BANNER = False
VERSIONS = None
@staticmethod
def builder_inited(app):
"""Update the Sphinx builder.
:param sphinx.application.Sphinx app: Sphinx application object.
"""
# Add this extension's _templates directory to Sphinx.
templates_dir = os.path.join(os.path.dirname(__file__), '_templates')
if app.builder.name != "latex":
app.builder.templates.pathchain.insert(0, templates_dir)
app.builder.templates.loaders.insert(0, SphinxFileSystemLoader(templates_dir))
app.builder.templates.templatepathlen += 1
# Add versions.html to sidebar.
if '**' not in app.config.html_sidebars:
# default_sidebars was deprecated in Sphinx 1.6+, so only use it if possible (to maintain
# backwards compatibility), else don't use it.
try:
app.config.html_sidebars['**'] = StandaloneHTMLBuilder.default_sidebars + ['versions.html']
except AttributeError:
app.config.html_sidebars['**'] = ['versions.html']
elif 'versions.html' not in app.config.html_sidebars['**']:
app.config.html_sidebars['**'].append('versions.html')
@classmethod
def env_updated(cls, app, env):
"""Abort Sphinx after initializing config and discovering all pages to build.
:param sphinx.application.Sphinx app: Sphinx application object.
:param sphinx.environment.BuildEnvironment env: Sphinx build environment.
"""
if cls.ABORT_AFTER_READ:
config = {n: getattr(app.config, n) for n in (a for a in dir(app.config) if a.startswith('scv_'))}
config['found_docs'] = tuple(str(d) for d in env.found_docs)
config['master_doc'] = str(app.config.master_doc)
cls.ABORT_AFTER_READ.put(config)
sys.exit(0)
@classmethod
def html_page_context(cls, app, pagename, templatename, context, doctree):
"""Update the Jinja2 HTML context, exposes the Versions class instance to it.
:param sphinx.application.Sphinx app: Sphinx application object.
:param str pagename: Name of the page being rendered (without .html or any file extension).
:param str templatename: Page name with .html.
:param dict context: Jinja2 HTML context.
:param docutils.nodes.document doctree: Tree of docutils nodes.
"""
assert templatename or doctree # Unused, for linting.
cls.VERSIONS.context = context
versions = cls.VERSIONS
this_remote = versions[cls.CURRENT_VERSION]
banner_main_remote = versions[cls.BANNER_MAIN_VERSION] if cls.SHOW_BANNER else None
# Update Jinja2 context.
context['bitbucket_version'] = cls.CURRENT_VERSION
context['current_version'] = cls.CURRENT_VERSION
context['github_version'] = cls.CURRENT_VERSION
context['html_theme'] = app.config.html_theme
context['scv_banner_greatest_tag'] = cls.BANNER_GREATEST_TAG
context['scv_banner_main_ref_is_branch'] = banner_main_remote['kind'] == 'heads' if cls.SHOW_BANNER else None
context['scv_banner_main_ref_is_tag'] = banner_main_remote['kind'] == 'tags' if cls.SHOW_BANNER else None
context['scv_banner_main_version'] = banner_main_remote['name'] if cls.SHOW_BANNER else None
context['scv_banner_recent_tag'] = cls.BANNER_RECENT_TAG
context['scv_is_branch'] = this_remote['kind'] == 'heads'
context['scv_is_greatest_tag'] = this_remote == versions.greatest_tag_remote
context['scv_is_recent_branch'] = this_remote == versions.recent_branch_remote
context['scv_is_recent_ref'] = this_remote == versions.recent_remote
context['scv_is_recent_tag'] = this_remote == versions.recent_tag_remote
context['scv_is_root'] = cls.IS_ROOT
context['scv_is_tag'] = this_remote['kind'] == 'tags'
context['scv_show_banner'] = cls.SHOW_BANNER
context['versions'] = versions
context['vhasdoc'] = versions.vhasdoc
context['vpathto'] = versions.vpathto
# Insert banner into body.
if cls.SHOW_BANNER and 'body' in context:
parsed = app.builder.templates.render('banner.html', context)
context['body'] = parsed + context['body']
# Handle overridden css_files.
css_files = context.setdefault('css_files', list())
if '_static/banner.css' not in css_files:
css_files.append('_static/banner.css')
# Handle overridden html_static_path.
if STATIC_DIR not in app.config.html_static_path:
app.config.html_static_path.append(STATIC_DIR)
# Reset last_updated with file's mtime (will be last git commit authored date).
if app.config.html_last_updated_fmt is not None:
file_path = app.env.doc2path(pagename)
if os.path.isfile(file_path):
lufmt = app.config.html_last_updated_fmt or getattr(locale, '_')('%b %d, %Y')
mtime = datetime.datetime.fromtimestamp(os.path.getmtime(file_path))
context['last_updated'] = format_date(lufmt, mtime, language=app.config.language)
def setup(app):
"""Called by Sphinx during phase 0 (initialization).
:param sphinx.application.Sphinx app: Sphinx application object.
:returns: Extension version.
:rtype: dict
"""
# Used internally. For rebuilding all pages when one or versions fail.
app.add_config_value('sphinxcontrib_versioning_versions', SC_VERSIONING_VERSIONS, 'html')
# Needed for banner.
app.config.html_static_path.append(STATIC_DIR)
app.add_stylesheet('banner.css')
# Tell Sphinx which config values can be set by the user.
for name, default in Config():
app.add_config_value('scv_{}'.format(name), default, 'html')
# Event handlers.
app.connect('builder-inited', EventHandlers.builder_inited)
app.connect('env-updated', EventHandlers.env_updated)
app.connect('html-page-context', EventHandlers.html_page_context)
return dict(version=__version__)
class ConfigInject(SphinxConfig):
"""Inject this extension info self.extensions. Append after user's extensions."""
def __init__(self, *args):
"""Constructor."""
super(ConfigInject, self).__init__(*args)
self.extensions.append('sphinxcontrib.versioning.sphinx_')
def _build(argv, config, versions, current_name, is_root):
"""Build Sphinx docs via multiprocessing for isolation.
:param tuple argv: Arguments to pass to Sphinx.
:param sphinxcontrib.versioning.lib.Config config: Runtime configuration.
:param sphinxcontrib.versioning.versions.Versions versions: Versions class instance.
:param str current_name: The ref name of the current version being built.
:param bool is_root: Is this build in the web root?
"""
# Patch.
application.Config = ConfigInject
if config.show_banner:
EventHandlers.BANNER_GREATEST_TAG = config.banner_greatest_tag
EventHandlers.BANNER_MAIN_VERSION = config.banner_main_ref
EventHandlers.BANNER_RECENT_TAG = config.banner_recent_tag
EventHandlers.SHOW_BANNER = True
EventHandlers.CURRENT_VERSION = current_name
EventHandlers.IS_ROOT = is_root
EventHandlers.VERSIONS = versions
SC_VERSIONING_VERSIONS[:] = [p for r in versions.remotes for p in sorted(r.items()) if p[0] not in ('sha', 'date')]
# Update argv.
if config.verbose > 1:
argv += ('-v',) * (config.verbose - 1)
if config.no_colors:
argv += ('-N',)
if config.overflow:
argv += config.overflow
# Build.
result = build_main(argv)
if result != 0:
raise SphinxError
# Build pdf if required
if config.pdf_file:
args = list(argv)
args.insert(0,"latexpdf") # Builder type
args.insert(0,"ignore") # Will be ignored
result = make_main(args)
# Copy to _static dir of src
latexDir = argv[1] + "/latex/";
copyfile( latexDir + config.pdf_file, argv[1] + "/_static/" + config.pdf_file)
rmtree(latexDir)
if result != 0:
raise SphinxError
def _read_config(argv, config, current_name, queue):
"""Read the Sphinx config via multiprocessing for isolation.
:param tuple argv: Arguments to pass to Sphinx.
:param sphinxcontrib.versioning.lib.Config config: Runtime configuration.
:param str current_name: The ref name of the current version being built.
:param multiprocessing.queues.Queue queue: Communication channel to parent process.
"""
# Patch.
EventHandlers.ABORT_AFTER_READ = queue
# Run.
_build(argv, config, Versions(list()), current_name, False)
def build(source, target, versions, current_name, is_root):
"""Build Sphinx docs for one version. Includes Versions class instance with names/urls in the HTML context.
:raise HandledError: If sphinx-build fails. Will be logged before raising.
:param str source: Source directory to pass to sphinx-build.
:param str target: Destination directory to write documentation to (passed to sphinx-build).
:param sphinxcontrib.versioning.versions.Versions versions: Versions class instance.
:param str current_name: The ref name of the current version being built.
:param bool is_root: Is this build in the web root?
"""
log = logging.getLogger(__name__)
argv = (source, target)
config = Config.from_context()
log.debug('Running sphinx-build for %s with args: %s', current_name, str(argv))
child = multiprocessing.Process(target=_build, args=(argv, config, versions, current_name, is_root))
child.start()
child.join() # Block.
if child.exitcode != 0:
log.error('sphinx-build failed for branch/tag: %s', current_name)
raise HandledError
def read_config(source, current_name):
"""Read the Sphinx config for one version.
:raise HandledError: If sphinx-build fails. Will be logged before raising.
:param str source: Source directory to pass to sphinx-build.
:param str current_name: The ref name of the current version being built.
:return: Specific Sphinx config values.
:rtype: dict
"""
log = logging.getLogger(__name__)
queue = multiprocessing.Queue()
config = Config.from_context()
with TempDir() as temp_dir:
argv = (source, temp_dir)
log.debug('Running sphinx-build for config values with args: %s', str(argv))
child = multiprocessing.Process(target=_read_config, args=(argv, config, current_name, queue))
child.start()
child.join() # Block.
if child.exitcode != 0:
log.error('sphinx-build failed for branch/tag while reading config: %s', current_name)
raise HandledError
config = queue.get()
return config
|
chat_server.py
|
"""
Author: Levi
Email: lvze@tedu.cn
Time : 2020-12-15
Env : Python3.6
socket and process exercise
"""
from socket import *
from multiprocessing import Process
# 服务器地址
HOST = "0.0.0.0"
PORT = 8888
ADDR = (HOST, PORT)
# 存储用户信息的结构 {name:address}
user = {}
def do_login(sock, name, address):
if name in user or "管理" in name:
sock.sendto(b"FAIL", address)
else:
sock.sendto(b"OK", address)
# 先通知其他人
msg = "欢迎 %s 进入聊天室" % name
for key, value in user.items():
sock.sendto(msg.encode(), value)
# 加入用户
user[name] = address
def do_chat(sock, name, content):
msg = "%s : %s" % (name, content)
for key, value in user.items():
# 不是本人
if key != name:
sock.sendto(msg.encode(), value)
def do_exit(sock, name):
del user[name] # 从字典删除
msg = "%s 退出了聊天" % name
# 通知其他人
for key, value in user.items():
sock.sendto(msg.encode(), value)
def handle(sock):
# 循环接收用户请求
while True:
data, addr = sock.recvfrom(1024)
tmp = data.decode().split(' ', 2)
# 根据请求,分情况讨论
if tmp[0] == "LOGIN":
# tmp->[LOGIN,name]
do_login(sock, tmp[1], addr)
elif tmp[0] == "CHAT":
# tmp->[CHAT,name,xxxx]
do_chat(sock, tmp[1], tmp[2])
elif tmp[0] == "EXIT":
# tmp->[EXIT,name]
do_exit(sock, tmp[1])
# 搭建总体逻辑结构
def main():
# 创建udp套接字
sock = socket(AF_INET, SOCK_DGRAM)
sock.bind(ADDR)
p = Process(target=handle, args=(sock,), daemon=True)
p.start()
# 父进程发送管理员消息
while True:
content = input("管理员消息:")
if content == "exit":
break
msg = "CHAT 管理员消息 "+content
# 发送给子进程
sock.sendto(msg.encode(),ADDR)
if __name__ == '__main__':
main() # 启动
|
load_testing.py
|
import requests
import threading
import datetime
def req():
for i in range(40):
pload = {"input": "def foo():\n\tprint()", "in_lang": "py", "out_lang": "js"}
url = 'https://cjsback.herokuapp.com/'
r = requests.post(url, data = pload)
print(r.json())
threads = []
begin_time = 0
for i in range(10):
if i == 1:
begin_time = datetime.datetime.now()
t = threading.Thread(target=req)
threads.append(t)
t.start()
for thread in threads:
thread.join()
print(datetime.datetime.now()-begin_time)
|
main.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019/1/14 4:35 PM
# @Author : w8ay
# @File : main.py
import os
import sys
import threading
import time
from config import THREAD_NUM, DEBUG, NODE_NAME
from lib.data import PATHS, logger
from lib.engine import Schedular
from lib.redis import redis_con
from thirdpart.requests import patch_all
def module_path():
"""
This will get us the program's directory
"""
return os.path.dirname(os.path.realpath(__file__))
def main():
PATHS.ROOT_PATH = module_path()
PATHS.PLUGIN_PATH = os.path.join(PATHS.ROOT_PATH, "pocs")
PATHS.OUTPUT_PATH = os.path.join(PATHS.ROOT_PATH, "output")
PATHS.DATA_PATH = os.path.join(PATHS.ROOT_PATH, "data")
patch_all()
logger.info("Hello W12SCAN !")
# domain域名整理(统一格式:无论是域名还是二级目录,右边没有 /),ip cidr模式识别,ip整理
# 访问redis获取目标
def redis_get():
list_name = "w12scan_scanned"
while 1:
target = redis_con.blpop(list_name)[1]
schedular.put_target(target)
def debug_get():
target = "http://stun.tuniu.com"
schedular.put_target(target)
def node_register():
first_blood = True
while 1:
if first_blood:
dd = {
"last_time": time.time(),
"tasks": 0,
"running": 0,
"finished": 0
}
redis_con.hmset(NODE_NAME, dd)
first_blood = False
else:
redis_con.hset(NODE_NAME, "last_time", time.time())
time.sleep(50 * 5)
schedular = Schedular(threadnum=THREAD_NUM)
schedular.start()
# 启动任务分发调度器
if DEBUG:
func_target = debug_get
else:
func_target = redis_get
node = threading.Thread(target=node_register)
node.start()
t = threading.Thread(target=func_target, name='LoopThread')
t.start()
while 1:
schedular.run()
if __name__ == '__main__':
try:
main()
except Exception as e:
logger.error("main error:{} {}".format(Exception, e))
logger.error(repr(sys.exc_info()))
|
main.py
|
from pynput.keyboard import Key, Listener
import sys
import os
import time
from threading import Thread
import json
save_file = "spc-clicker.sav"
space_counter = 0
bot_number = 0
bot_price = 10
click_multi_number = 0
click_multi_price = 100
# Load from the save file
if os.path.exists(save_file):
with open(save_file, "r") as f:
# Convert the JSON to a dictionary
a = f.read()
dict = json.loads(a)
# Asign all of the variables with the info from the file
space_counter = float(dict["space"])
bot_number = int(dict["bot_num"])
bot_price = int(dict["bot_price"])
click_multi_number = int(dict["click_multi"])
click_multi_price = int(dict["click_multi_price"])
def save(space_counter, bot_number, bot_price, click_multi_number, click_multi_price):
# Write the regular info into the file
with open(save_file, "w") as f:
write_text = f"space {space_counter}\nbot_num {bot_number}\nbot_price {bot_price}\nclick_multi {click_multi_number}\nclick_multi_price {click_multi_price}"
f.write(write_text)
# Read what was previously read and convert it to JSON
dict = {}
with open(save_file, "r") as f:
for line in f:
a, b = line.strip().split(None, 1)
dict[a] = b.strip()
# Write the JSON to the file, removing the original text in it
with open(save_file, "w") as f:
json.dump(dict, f, indent = 4, sort_keys = False)
# Get OS
clear = ""
os_ = sys.platform
if os_ == "linux" or os_ == "darwin":
clear = "clear"
elif os_ == "win32":
clear = "cls"
def release(key):
global space_counter, t, bot_number, bot_price, click_multi_price, click_multi_number
# Calculate the bot price depending on the number of bots
# Increase the price every 5 bots
bot_price = 10 + (round(bot_number/5)*30)
# Calculate the click multiplier
click_multi_price = click_multi_number * 1000
if click_multi_price == 0:
click_multi_price = 100
if key == Key.space:
# Add a point for every space clicked
space_counter += click_multi_number*5
if click_multi_number == 0:
space_counter+=1
return space_counter
elif key == Key.esc:
# Exit
save(space_counter, bot_number, bot_price, click_multi_number, click_multi_price)
display.terminate()
t.join()
sys.exit("Exit")
try:
if key.char == 'b':
# Buy a bot
if space_counter >= bot_price:
bot_number += 1
space_counter -= bot_price
else:
print("Not enough points")
if key.char == 'n':
# Buy a click multiplier
if space_counter >= click_multi_price:
click_multi_number += 1
space_counter -= click_multi_price
else:
print("Not enough points")
except AttributeError:
pass
# Create a terminatable thread that shows the space_counter variable
class display_class:
def __init__(self):
self._running = True
def terminate(self):
self._running = False
def display(self):
while self._running:
global space_counter, bot_number, bot_price, click_multi_number, click_multi_price
os.system(clear)
# Add points based on the amount bots
space_counter = round(space_counter + int(bot_number)/10, 1)
str_space_counter = str(int(round(space_counter)))
if space_counter >= 1000:
str_space_counter = str(round(space_counter/1000, 1)) + "K"
print(str_space_counter)
print('\nBuy a bot with "b"')
print("Bot price: " + str(int(bot_price)))
print("Bot clicks at the rate : 1 per sec")
print(f"All bots together click at the rate: {bot_number} per sec")
print('\nBuy a click multiplier with "n"')
print("Click multiplier price: " + str(int(click_multi_price)))
click = click_multi_number*5
if click == 0:
click = 1
print(f"1 click = {str(click)} points")
time.sleep(0.1)
display = display_class()
t = Thread(target = display.display)
t.start()
# Listen for key presses
with Listener(on_release=release) as listener:
listener.join()
|
UDPNode.py
|
#!/usr/bin/env python3
import sys
import struct
import socket
import queue
import threading
import utility
import time
# Message types
PKT_TYPE_UPDATE = 1
PKT_TYPE_KEEP_ALIVE = 2
PKT_TYPE_ACK_KEEP_ALIVE = 3
PKT_TYPE_FLOOD = 4
PKT_TYPE_DATA_MSG = 5
PKT_TYPE_COST_CHANGE = 6
PKT_TYPE_DEAD = 7
# Hops executed during a flood
HOP_NUMBER = 10
SKIPPED_UPDATES_AFTER_FLOOD = 3
# Data size definitions in bytes
TUPLE_COUNT_SIZE = 2
TUPLE_SIZE = 10
PKT_TYPE_SIZE = 1
BUFFER_SIZE = 2048 # Will be used when reading from a socket
# Time intervals in seconds
SEND_NODE_AWAKEN_INTERVAL = 0.5
SEND_TABLE_UPDATE_INTERVAL = 30
SEND_KEEP_ALIVE_INTERVAL = SEND_TABLE_UPDATE_INTERVAL * 2
IGNORE_AFTER_FLOOD_INTERVAL = SEND_TABLE_UPDATE_INTERVAL * 3
# Various timeouts in seconds
SOCKET_TIMEOUT = 1
KEEP_ALIVE_TIMEOUT = 200
KEEP_ALIVE_RETRIES = 100
class UDPNode:
def __init__(self, ip, mask, port, neighbors):
# Simple data
self.port = port
self.ip = ip
self.mask = mask
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind((self.ip, self.port))
self.sock.setblocking(True)
self.sock.settimeout(SOCKET_TIMEOUT)
# Turns off many frequent prints, will not avoid logging
self.print_updates = True
# Structures
# Reachability table: ip, port : mask, (ip, port), cost
self.reachability_table = {}
# Neighbors: ip, port : mask, cost, current_retries (0 if node is dead), Timer obj
self.neighbors = {}
for (n_ip, n_mask, n_port), n_cost in neighbors.items():
self.neighbors[(n_ip, n_port)] = (n_mask, n_cost, 0, None)
# Queue to hold incoming messages
# Will be flushed when encountering a flood
self.message_queue = queue.Queue()
# Used when waking nodes
self.unawakened_neighbors = list(self.neighbors.keys())
# Locks
self.reachability_table_lock = threading.Lock()
self.neighbors_lock = threading.Lock()
self.message_queue_lock = threading.Lock()
self.unawakened_neighbors_lock = threading.Lock()
# Events
self.stopper = threading.Event()
self.ignore_updates = threading.Event()
self.continue_keep_alives = threading.Event()
# Threads
self.connection_handler_thread = threading.Thread(target=self.handle_incoming_connections_loop)
self.message_reader_thread = threading.Thread(target=self.read_messages_loop)
self.keep_alive_handler_thread = threading.Thread(target=self.send_keep_alive_loop)
self.update_handler_thread = threading.Thread(target=self.send_updates_loop)
self.command_handler_thread = threading.Thread(target=self.handle_console_commands)
# Prints identifying the node
utility.log_message(f"Welcome to node {ip}:{port}/{mask}!", self)
utility.log_message(f"\nThis node's neighbors:", self)
self.print_neighbors_table()
utility.log_message("\nAvailable commands are:", self)
utility.log_message(" sendMessage <ip> <port> <message>", self)
utility.log_message(" exit", self)
utility.log_message(" change cost <neighbor ip> <neighbor port> <new cost>", self)
utility.log_message(" printOwn", self)
utility.log_message(" printTable", self)
utility.log_message(" printNeighbors", self)
utility.log_message(" prints <on|off>\n", self)
def start_node(self):
# Start the thread that handles incoming messages
self.connection_handler_thread.start()
# Start the thread that reads messages and puts them in a queue
self.message_reader_thread.start()
# Check for live neighbors
self.find_awake_neighbors()
# Start the thread that will listen and respond to console commands
self.command_handler_thread.start()
# Start the thread that loops to manage subsequuent keep alives
self.keep_alive_handler_thread.start()
# Start the thread that periodically sends updates
self.update_handler_thread.start()
def read_messages_loop(self):
while not self.stopper.is_set():
try:
message, address = self.sock.recvfrom(BUFFER_SIZE)
except socket.timeout:
continue
except ConnectionResetError:
continue
if self.ignore_updates.is_set():
# Continue without putting the message in the queue if a flood occurred recently
continue
message_type = int.from_bytes(message[0:PKT_TYPE_SIZE], byteorder='big', signed=False)
if message_type == PKT_TYPE_FLOOD or message_type == PKT_TYPE_DEAD:
# Flood messages have more priority and the queue will need to be no matter what so delete it and put
# the flood message first
with self.message_queue_lock:
self.message_queue = queue.Queue()
self.message_queue.put((message, address))
else:
# All other messages have the same priority
with self.message_queue_lock:
self.message_queue.put((message, address))
utility.log_message("Finished the read messages loop!", self)
def find_awake_neighbors(self):
# Halt infinite keep alives
self.continue_keep_alives.clear()
# Assume all neighbors are dead
with self.unawakened_neighbors_lock:
self.unawakened_neighbors = list(self.neighbors.keys())
# Try to find neighbors until they are all alive or the maximum amount of retries is met
current_tries = 0
while current_tries < KEEP_ALIVE_RETRIES and self.unawakened_neighbors:
current_tries += 1
with self.unawakened_neighbors_lock:
for (ip, port) in self.unawakened_neighbors:
utility.log_message(f"Waking {ip}:{port}", self)
self.send_keep_alive(ip, port)
# Sleep for the timeout duration before trying again
time.sleep(KEEP_ALIVE_TIMEOUT)
with self.unawakened_neighbors_lock:
if not self.unawakened_neighbors:
utility.log_message("All neighbors have awakened!", self)
else:
unawakened_str = "Unawoken neighbors: "
for (ip, port) in self.unawakened_neighbors:
# Set nodes as dead
with self.neighbors_lock:
neighbor = self.neighbors[ip, port]
self.neighbors[ip, port] = (neighbor[0], neighbor[1], 0, None)
# Add to string to inform user
unawakened_str += f"{ip}:{port} "
utility.log_message_force(unawakened_str, self)
# Continue infinite keep alives
self.continue_keep_alives.set()
def send_updates_loop(self):
self.send_update()
while not self.stopper.wait(SEND_TABLE_UPDATE_INTERVAL):
self.send_update()
utility.log_message("Finished the update sending loop!", self)
def send_update(self):
for (ip, port) in self.neighbors:
self.send_reachability_table(ip, port)
def send_keep_alive_loop(self):
while not self.stopper.wait(SEND_KEEP_ALIVE_INTERVAL):
self.continue_keep_alives.wait()
with self.neighbors_lock:
for (ip, port), (mask, cost, current_retries, _) in self.neighbors.items():
if current_retries > 0:
utility.log_message(f"Sending keep alive to {ip}:{port}...", self)
# Create a timer to implement the timeout, will execute code to handle the timeout after it
# triggers
# If an ack is received this timer will be cancelled
timeout_timer = threading.Timer(KEEP_ALIVE_TIMEOUT,
self.handle_keep_alive_timeout, [], {"ip": ip, "port": port})
timeout_timer.start()
# Save the timer in that neighbor's tuple so it can be retrieved and cancelled if/when necessary
self.neighbors[(ip, port)] = (mask, cost, current_retries, timeout_timer)
self.send_keep_alive(ip, port)
utility.log_message("Finished the send keep alive loop!", self)
def handle_keep_alive_timeout(self, **kwargs):
# Get the parameters from the kwargs dictionary
ip = kwargs["ip"]
port = kwargs["port"]
with self.neighbors_lock:
# Check the neighbor's retry status
neighbor = self.neighbors[ip, port]
if neighbor[2] == 1:
# If decreasing the remaining retries would set it to 0, remove the entry and start a flood
utility.log_message(f"Keep alive message to {ip}:{port} timed out! No more retries remaining, deleting "
f"entry and starting flood...", self)
self.neighbors[ip, port] = (neighbor[0], neighbor[1], neighbor[2] - 1, None)
self.remove_reachability_table_entry(ip, port)
self.send_flood_message(HOP_NUMBER)
elif neighbor[2] > 0:
# If the neighbor is not already at 0 retries, decrease the remaining retries
self.neighbors[ip, port] = (neighbor[0], neighbor[1], neighbor[2]-1, None)
utility.log_message(f"Keep alive message to {ip}:{port} timed out! {neighbor[2]} retries remaining...",
self)
def handle_incoming_connections_loop(self):
while not self.stopper.is_set():
self.receive_message()
utility.log_message("Finished the handle incoming connections loop!", self)
def receive_message(self):
# Read enough bytes for the message, a standard packet does not exceed 1500 bytes
try:
message, address = self.message_queue.get(block=True, timeout=SOCKET_TIMEOUT)
except queue.Empty:
return
message_type = int.from_bytes(message[0:PKT_TYPE_SIZE], byteorder='big', signed=False)
if message_type == PKT_TYPE_UPDATE:
tuple_count = struct.unpack('!H', message[PKT_TYPE_SIZE:PKT_TYPE_SIZE + 2])[0]
utility.log_message(f"Received a table update from {address[0]}:{address[1]} of size "
f"{len(message)} with {tuple_count} tuples.", self)
# Decode the received tuples and update the reachability table if necessary
self.decode_tuples(message[PKT_TYPE_SIZE + TUPLE_COUNT_SIZE:], address)
elif message_type == PKT_TYPE_KEEP_ALIVE:
utility.log_message(f"Received a keep alive from {address[0]}:{address[1]}.", self)
self.send_ack_keep_alive(address[0], address[1])
elif message_type == PKT_TYPE_ACK_KEEP_ALIVE:
utility.log_message(f"Received a keep alive ack from {address[0]}:{address[1]}.", self)
# Check if this is the first time the node has replied
if address in self.unawakened_neighbors:
with self.unawakened_neighbors_lock:
self.unawakened_neighbors.remove(address)
with self.neighbors_lock:
# Cancel the timer
neighbor = self.neighbors[address]
try:
neighbor[3].cancel()
except AttributeError:
pass
# If the node was thought dead re-add it to the reachability table
with self.reachability_table_lock:
self.reachability_table[address] = (neighbor[0], address, neighbor[1])
# Reset the retry number
self.neighbors[address] = (neighbor[0], neighbor[1], KEEP_ALIVE_RETRIES, None)
elif message_type == PKT_TYPE_FLOOD:
hops = struct.unpack("!B", message[1:2])[0]
utility.log_message_force(f"Received a FLOOD with {hops} hops remaining from {address[0]}:{address[1]}."
f"\nFlushing reachability table..."
f"\nWill ignore updates for {IGNORE_AFTER_FLOOD_INTERVAL} seconds.", self)
# Continue the flood with one less hop
if hops < 0:
utility.log_message_force(f"Received a flood with too few hops from {address}!")
elif hops > 255:
utility.log_message_force(f"Received a flood with too many hops from {address}!")
elif hops != 0:
self.send_flood_message(hops - 1)
elif message_type == PKT_TYPE_DATA_MSG:
ip_bytes = message[1:5]
ip = f"{ip_bytes[0]}.{ip_bytes[1]}.{ip_bytes[2]}.{ip_bytes[3]}"
port = int.from_bytes(message[5:7], byteorder='big', signed=False)
size = int.from_bytes(message[7:8], byteorder='big', signed=False)
str_message = message[8:].decode()
if ip == self.ip and port == self.port:
utility.log_message_force(f"Received the data message {str_message} from {address[0]}:{address[1]}!", self)
else:
utility.log_message_force(f"Received the message {str_message} headed for {ip}:{port} from "
f"{address[0]}:{address[1]}! Rerouting...", self)
self.send_data_message(ip, port, str_message)
elif message_type == PKT_TYPE_DEAD:
utility.log_message(f"Neighbor {address[0]}:{address[1]} will DIE!"
f"\nFlushing reachability table and starting flood..."
f"\nWill ignore updates for {IGNORE_AFTER_FLOOD_INTERVAL} seconds.", self)
# Start a flood with neighbors
self.send_flood_message(HOP_NUMBER)
elif message_type == PKT_TYPE_COST_CHANGE:
new_cost = int.from_bytes(message[1:4], byteorder='big', signed=False)
# Change the cost
neighbor = self.neighbors[address]
self.neighbors[address] = (neighbor[0], new_cost, neighbor[2], neighbor[3])
# Check if the cost went up or down:
with self.neighbors_lock:
if neighbor[1] > new_cost:
# Cost went down, no problem
utility.log_message(f"Cost of neighbor {address[0]}:{address[1]} went down to {new_cost}!", self)
# Change the cost of the neighbor in the reachability table, as this will not propagate
# automatically since nodes do receive themselves in an update
with self.reachability_table_lock:
table_entry = self.reachability_table[address]
self.reachability_table[address] = (table_entry[0], table_entry[1], new_cost)
else:
# Cost went up, catastrophe
utility.log_message(f"Cost of neighbor {address[0]}:{address[1]} went up to {new_cost}!"
f"\nFlushing reachability table and starting flood...", self)
self.send_flood_message(HOP_NUMBER)
def reset_ignore_updates(self):
utility.log_message("Resuming message listening...", self)
# Continue reading messages
self.ignore_updates.clear()
# Awaken neighbors again
self.find_awake_neighbors()
def send_message(self, ip, port, message):
self.sock.sendto(message, (ip, port))
def send_reachability_table(self, ip, port):
self.reachability_table_lock.acquire()
table_size = len(self.reachability_table)
# Should not send an entry with the receiver's own address
if (ip, port) in self.reachability_table:
table_size -= 1
if table_size <= 0:
self.reachability_table_lock.release()
return
encoded_message = bytearray(PKT_TYPE_SIZE + TUPLE_COUNT_SIZE + TUPLE_SIZE * table_size)
# Message type
struct.pack_into("!B", encoded_message, 0, PKT_TYPE_UPDATE)
# 2 bytes for the amount of tuples
struct.pack_into("!H", encoded_message, PKT_TYPE_SIZE, table_size)
# Iterate the reachability table, writing each tuple to the encoded_message buffer
offset = PKT_TYPE_SIZE + TUPLE_COUNT_SIZE # will to the next empty space in the buffer
for (r_ip, r_port), (r_mask, _, r_cost) in self.reachability_table.items():
# Add entry to message only if it does not refer to the receiving node
if r_ip == ip and r_port == port:
continue
ip_tuple = tuple([int(tok) for tok in r_ip.split('.')])
encoded_message[offset:offset + TUPLE_SIZE] = utility.encode_tuple(ip_tuple, r_port, r_mask, r_cost)
offset += TUPLE_SIZE
self.reachability_table_lock.release()
utility.log_message(f"Sending reachability table of {len(encoded_message)} bytes to {ip}:{port}", self)
self.send_message(ip, port, encoded_message)
def handle_console_commands(self):
while not self.stopper.is_set():
try:
pass
command = input("Enter your command...\n> ")
except EOFError:
utility.log_message(f"EOFile while expecting user input...", self)
continue
command = command.strip().split(" ")
if len(command) == 0:
utility.log_message_force("Please enter a valid command.", self)
continue
elif command[0] == "sendMessage":
if len(command) != 4:
utility.log_message_force("Please enter a valid command.", self)
continue
else:
self.send_data_message(command[1], int(command[2]), command[3])
elif command[0] == "exit" or command[0] == "deleteNode":
self.stop_node()
elif command[0] == "printTable":
self.print_reachability_table()
elif command[0] == "printOwn":
utility.log_message_force(f"This node's information: {self.ip}:{self.port}/{self.mask}", self)
elif command[0] == "printNeighbors":
self.print_neighbors_table()
elif command[0] == "changeCost":
if len(command) != 4:
utility.log_message_force("Please enter a valid command.", self)
else:
ip = command[1]
port = int(command[2])
new_cost = int(command[3])
with self.neighbors_lock:
if (ip, port) not in self.neighbors:
utility.log_message_force(f"The node {ip}:{port} is not a neighbor, try again.", self)
else:
# Change the cost
neighbor = self.neighbors[(ip, port)]
self.neighbors[(ip, port)] = (neighbor[0], new_cost, neighbor[2], neighbor[3])
# Notify the node
self.send_cost_change(ip, port, new_cost)
elif command[0] == "prints":
if len(command) != 2:
utility.log_message_force("Please enter a valid command.", self)
elif command[1] == "on":
self.print_updates = True
elif command[1] == "off":
self.print_updates = False
else:
utility.log_message_force("Please enter a valid command.", self)
else:
utility.log_message_force("Unrecognized command, try again.", self)
def decode_tuples(self, message, origin_node):
# Ignore updates that do not originate from a neighbor
if origin_node not in self.neighbors:
utility.log_message(f"Discarding update from {origin_node[0]}:{origin_node[1]} as it is not a neighbor.",
self)
return
offset = 0
while offset < len(message):
# Unpack the binary
tuple_bytes = struct.unpack('!BBBBBBBBBB', message[offset:offset + TUPLE_SIZE])
# Get each of the tuple's values
ip_bytes = tuple_bytes[:4]
ip = f"{ip_bytes[0]}.{ip_bytes[1]}.{ip_bytes[2]}.{ip_bytes[3]}"
mask = tuple_bytes[4]
port = int.from_bytes(tuple_bytes[5:7], byteorder='big', signed=False)
cost = int.from_bytes(tuple_bytes[7:], byteorder='big', signed=False)
offset += TUPLE_SIZE
# utility.log_message(f"ADDRESS: {ip}, SUBNET MASK: {mask}, COST: {cost}", self)
self.update_reachability_table(ip, port, mask, cost, origin_node)
def update_reachability_table(self, ip, port, mask, cost, through_node):
with self.neighbors_lock:
total_cost = cost + self.neighbors[through_node][1]
# Write to the reachability table,
# as many threads may perform read/write we need to lock it
with self.reachability_table_lock:
if (ip, port) not in self.reachability_table or self.reachability_table[(ip, port)][2] > total_cost:
utility.log_message(f"Changing cost of {ip}:{port} passing through {through_node}.", self)
self.reachability_table[(ip, port)] = (mask, through_node, total_cost)
def remove_reachability_table_entry(self, ip, port):
with self.reachability_table_lock:
if (ip, port) in self.reachability_table:
del self.reachability_table[(ip, port)]
utility.log_message(f"DISCONNECT: Deleted {ip}:{port} from the reachability table.", self)
def send_flood_message(self, hops):
message = bytearray(2)
struct.pack_into("!B", message, 0, PKT_TYPE_FLOOD)
struct.pack_into("!B", message, 1, hops)
for ip, port in self.neighbors:
utility.log_message(f"Sending flood message of {len(message)} bytes to {ip}:{port}", self)
self.send_message(ip, port, message)
# Set the event to indicate that updates should be ignored
self.ignore_updates.set()
# Halt keep alives
self.continue_keep_alives.clear()
# Clear the reachability table and message queue
with self.reachability_table_lock:
self.reachability_table.clear()
with self.message_queue_lock:
self.message_queue = queue.Queue()
# Start a timer to clear the previous event so updates can continue
continue_updates_timer = threading.Timer(IGNORE_AFTER_FLOOD_INTERVAL, self.reset_ignore_updates)
continue_updates_timer.start()
def send_ack_keep_alive(self, ip, port):
message = bytearray(1)
struct.pack_into("!B", message, 0, PKT_TYPE_ACK_KEEP_ALIVE)
utility.log_message(f"Sending keep alive ACK message to {len(message)} bytes to {ip}:{port}", self)
self.send_message(ip, port, message)
def send_cost_change(self, ip, port, new_cost):
message = bytearray(1)
struct.pack_into("!B", message, 0, PKT_TYPE_COST_CHANGE)
new_cost_bytes = bytearray(4)
struct.pack_into("!I", new_cost_bytes, 0, new_cost)
utility.log_message(f"Sending cost change message of {len(message)} bytes to {ip}:{port}", self)
self.send_message(ip, port, message+new_cost_bytes[1:])
def send_keep_alive(self, ip, port):
message = bytearray(1)
struct.pack_into("!B", message, 0, PKT_TYPE_KEEP_ALIVE)
utility.log_message(f"Sending keep alive message of {len(message)} bytes to {ip}:{port}", self)
self.send_message(ip, port, message)
def send_data_message(self, ip, port, str_message):
bytes_message = str_message.encode()
ip_tuple = tuple([int(tok) for tok in ip.split('.')])
header = bytearray(8)
struct.pack_into("!B", header, 0, PKT_TYPE_DATA_MSG)
struct.pack_into("!BBBB", header, 1, ip_tuple[0], ip_tuple[1], ip_tuple[2], ip_tuple[3])
struct.pack_into("!H", header, 5, port)
struct.pack_into("!B", header, 7, len(bytes_message))
if (ip, port) in self.reachability_table:
route_address = self.reachability_table[ip, port][1]
utility.log_message_force(f"Routing the message {str_message} through node {route_address[0]}:"
f"{route_address[1]}", self)
self.send_message(route_address[0], route_address[1], header+bytes_message)
else:
utility.log_message_force(f"Received a message headed for {ip}:{port} but this node cannot reach it!", self)
def send_node_death_message(self, ip, port):
message = bytearray(1)
struct.pack_into("!B", message, 0, PKT_TYPE_DEAD)
utility.log_message(f"Sending node death message of {len(message)} bytes to {ip}:{port}", self)
self.send_message(ip, port, message)
def stop_node(self):
utility.log_message_force("Killing node, waiting for threads to finish...", self)
# Set this flag to false, stopping all loops
self.stopper.set()
# Join all threads except command console handler, as this is that thread
self.connection_handler_thread.join()
self.message_reader_thread.join()
# Clear event that halts keep alives so that thread can join
self.continue_keep_alives.set()
try:
self.keep_alive_handler_thread.join()
except RuntimeError:
utility.log_message_force("Keep alive thread had not been started, no join needed.", self)
try:
self.update_handler_thread.join()
except RuntimeError:
utility.log_message_force("Update handler thread had not been started, no join needed.", self)
# Send a message to all neighbors indicating that this node will die
self.reachability_table_lock.acquire()
for ip, port in self.neighbors:
self.send_node_death_message(ip, port)
self.reachability_table_lock.release()
def print_reachability_table(self):
utility.log_message_force("Current reachability table:", self)
self.reachability_table_lock.acquire()
if not self.reachability_table:
utility.log_message_force("The reachability table is empty.", self)
else:
i = 1
for (ip, port), (mask, (ip2, port2), cost) in self.reachability_table.items():
utility.log_message_force(f"{i} - Destiny: {ip}:{port}/{mask}, through: {ip2}:{port2}, cost: {cost}.", self)
i += 1
self.reachability_table_lock.release()
def print_neighbors_table(self):
utility.log_message_force("Neighbors:", self)
self.reachability_table_lock.acquire()
if not self.neighbors:
utility.log_message_force("The neighbors table is empty.", self)
else:
for (ip, port), (mask, cost, current_retries, _) in self.neighbors.items():
utility.log_message_force(f"Address: {ip}:{port}, mask: {mask}, cost: {cost}, current keep alive "
f"retries: {current_retries}/{KEEP_ALIVE_RETRIES}", self)
self.reachability_table_lock.release()
if __name__ == "__main__":
if len(sys.argv) < 4:
print("Incorrect argument number, exiting...")
sys.exit(1)
# Parse neighbors
neighbors_string = {}
for i in range(1, (len(sys.argv) - 4) // 4 + 1):
index = i*4
neighbors_string[sys.argv[index], int(sys.argv[index + 1]), int(sys.argv[index + 2])] = int(sys.argv[index + 3])
node = UDPNode(sys.argv[1], int(sys.argv[2]), int(sys.argv[3]), neighbors_string)
node.start_node()
|
app.py
|
import socket
import subprocess
import os
import pwd
import pdb
import sys
import traceback
from threading import Thread
from library import get_username
from client_thread import client_threading
sockfd=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockfd.bind(('',12348))
sockfd.listen(4)
print("My echo server is listening on port:12348")
while True:
conn, client_addr = sockfd.accept()
print("Conencted with"+client_addr[0])
try:
Thread(target=client_threading,args=(conn,client_addr)).start()
except:
print("Thread did not start.")
traceback.print_exc()
sockfd.close()
|
test_qt_notifications.py
|
import threading
import warnings
from concurrent.futures import Future
from unittest.mock import patch
import dask.array as da
import pytest
from qtpy.QtCore import Qt, QThread
from qtpy.QtWidgets import QPushButton
from napari._qt.dialogs.qt_notification import NapariQtNotification
from napari._tests.utils import DEFAULT_TIMEOUT_SECS
from napari.utils.notifications import (
ErrorNotification,
Notification,
NotificationSeverity,
notification_manager,
)
def _threading_warn():
thr = threading.Thread(target=_warn)
thr.start()
thr.join(timeout=DEFAULT_TIMEOUT_SECS)
def _warn():
warnings.warn('warning!')
def _threading_raise():
thr = threading.Thread(target=_raise)
thr.start()
thr.join(timeout=DEFAULT_TIMEOUT_SECS)
def _raise():
raise ValueError("error!")
@pytest.fixture
def clean_current(monkeypatch, qtbot):
from napari._qt.qt_main_window import _QtMainWindow
def none_return(*_, **__):
return None
base_show = NapariQtNotification.show
def store_widget(self, *args, **kwargs):
qtbot.addWidget(self)
base_show(self, *args, **kwargs)
# monkeypatch.setattr(qt_notification.QPropertyAnimation, "start", none_return)
monkeypatch.setattr(_QtMainWindow, "current", none_return)
monkeypatch.setattr(NapariQtNotification, "show", store_widget)
@pytest.mark.parametrize(
"raise_func,warn_func",
[(_raise, _warn), (_threading_raise, _threading_warn)],
)
def test_notification_manager_via_gui(
qtbot, raise_func, warn_func, clean_current
):
"""
Test that the notification_manager intercepts `sys.excepthook`` and
`threading.excepthook`.
"""
errButton = QPushButton()
warnButton = QPushButton()
errButton.clicked.connect(raise_func)
warnButton.clicked.connect(warn_func)
with notification_manager:
for btt, expected_message in [
(errButton, 'error!'),
(warnButton, 'warning!'),
]:
notification_manager.records = []
qtbot.mouseClick(btt, Qt.LeftButton)
assert len(notification_manager.records) == 1
assert notification_manager.records[0].message == expected_message
notification_manager.records = []
@patch('napari._qt.dialogs.qt_notification.QDialog.show')
def test_show_notification_from_thread(mock_show, monkeypatch, qtbot):
from napari.settings import get_settings
settings = get_settings()
monkeypatch.setattr(
settings.application,
'gui_notification_level',
NotificationSeverity.INFO,
)
class CustomThread(QThread):
def run(self):
notif = Notification(
'hi',
NotificationSeverity.INFO,
actions=[('click', lambda x: None)],
)
res = NapariQtNotification.show_notification(notif)
assert isinstance(res, Future)
assert res.result(timeout=DEFAULT_TIMEOUT_SECS) is None
mock_show.assert_called_once()
thread = CustomThread()
with qtbot.waitSignal(thread.finished):
thread.start()
@pytest.mark.parametrize('severity', NotificationSeverity.__members__)
@patch('napari._qt.dialogs.qt_notification.QDialog.show')
def test_notification_display(mock_show, severity, monkeypatch, qtbot):
"""Test that NapariQtNotification can present a Notification event.
NOTE: in napari.utils._tests.test_notification_manager, we already test
that the notification manager successfully overrides sys.excepthook,
and warnings.showwarning... and that it emits an event which is an instance
of napari.utils.notifications.Notification.
in `get_app()`, we connect `notification_manager.notification_ready` to
`NapariQtNotification.show_notification`, so all we have to test here is
that show_notification is capable of receiving various event types.
(we don't need to test that )
"""
from napari.settings import get_settings
settings = get_settings()
monkeypatch.delenv('NAPARI_CATCH_ERRORS', raising=False)
monkeypatch.setattr(
settings.application,
'gui_notification_level',
NotificationSeverity.INFO,
)
notif = Notification('hi', severity, actions=[('click', lambda x: None)])
NapariQtNotification.show_notification(notif)
if NotificationSeverity(severity) >= NotificationSeverity.INFO:
mock_show.assert_called_once()
else:
mock_show.assert_not_called()
dialog = NapariQtNotification.from_notification(notif)
assert not dialog.property('expanded')
dialog.toggle_expansion()
assert dialog.property('expanded')
dialog.toggle_expansion()
assert not dialog.property('expanded')
dialog.close()
dialog.deleteLater()
@patch('napari._qt.dialogs.qt_notification.QDialog.show')
def test_notification_error(mock_show, monkeypatch, clean_current):
from napari.settings import get_settings
settings = get_settings()
monkeypatch.delenv('NAPARI_CATCH_ERRORS', raising=False)
monkeypatch.setattr(
settings.application,
'gui_notification_level',
NotificationSeverity.INFO,
)
try:
raise ValueError('error!')
except ValueError as e:
notif = ErrorNotification(e)
dialog = NapariQtNotification.from_notification(notif)
bttn = dialog.row2_widget.findChild(QPushButton)
assert bttn.text() == 'View Traceback'
mock_show.assert_not_called()
bttn.click()
mock_show.assert_called_once()
@pytest.mark.sync_only
def test_notifications_error_with_threading(make_napari_viewer):
"""Test notifications of `threading` threads, using a dask example."""
random_image = da.random.random((50, 50))
with notification_manager:
viewer = make_napari_viewer()
viewer.add_image(random_image)
result = da.divide(random_image, da.zeros((50, 50)))
viewer.add_image(result)
assert len(notification_manager.records) >= 1
notification_manager.records = []
|
scan.py
|
#!/usr/bin/env python
# encoding=utf-8
#codeby 道长且阻
#email ydhcui@suliu.net/QQ664284092
import socket
import re
import os
import time
import json
import urllib
import threading
import datetime
import copy
import queue
from lib import requests
from lib.dns.resolver import Resolver
from core.websearch import BaiduEngine,SogouEngine,SoEngine,BingEngine
from core.plugin import PluginsManage,BaseHttpPlugin,BaseWebPlugin,BaseHostPlugin
from core.crawler import Crawler
#from core.nmapscan import PortScan
from core.portscan import PortScan
from core.util import gethosts,getfiles,getports,getdomain,CoroutinePool
from core.base import BaseHost,BaseWebSite
from core.log import logging
from service import app
import models
import settings
requests.packages.urllib3.disable_warnings()
class BaseScan(object):
def __init__(self,taskid):
M = models.ScanTask
self.Q = M.get(M.task_id==taskid)
self.T = app.AsyncResult(taskid)
self.settings = {}
self.target = None
self.args = {}
av = {}
for d in str(self.Q.task_args).strip().split():
if d is None or d == 'None':
continue
d = d[1:].split('=')
av[d[0]] = d[1]
self.args = av
self.target = str(self.Q.task_host).strip()
'''
int(self.args.get('filter',1)) #是否使用过滤
int(self.args.get('write',1)) #是否覆盖原先的扫描结果
int(self.args.get('ping',0)) #是否使用ping过滤主机
int(self.args.get('threads',100)) #扫描线程数
int(self.args.get('timeout',10)) #超时时间
str(self.args.get('port','')) #要扫描的端口
int(self.args.get('level',1)) #是否扫描POST请求
str(self.args.get('plug','')) #扫描的插件
dict(self.args.get('headers',"{}")) #自定义请求头
'''
def scan(self):
pass
def set_settings(self,*args,**kwargs):
self.settings.update(kwargs)
def start(self):
self.Q.task_code = 'working'
self.Q.task_pid = str(os.getpid())
self.Q.save()
print(self.Q.task_pid)
try:
self.auths = self.get_auth()
self.scan()
except Exception as e:
print(e)
self.Q.task_code = str(e)
finally:
self.Q.finishdate = datetime.datetime.now()
self.Q.task_pid = '0'
self.Q.task_code = 'finish'
self.Q.save()
def get_auth(self,pwds=None):
'''获取项目用户名密码'''
pwds = getfiles(settings.DATAPATH + '/pass.txt')
MD = models.DictResult
auths = set()
#读取库中本项目的用户名和密码
userquery = MD.select().where((MD.projectid == self.Q.projectid)&(MD.dict_key == 'user'))
pwdquery = MD.select().where((MD.projectid == self.Q.projectid)&(MD.dict_key == 'pwd'))
for u in userquery:
for p in pwdquery:
auths.add((str(u.dict_value),str(p.dict_value)))
#for u in userquery:
# auths.add((str(u.dict_value),None))
for p in pwdquery:
auths.add((None,str(p.dict_value)))
if pwds:#本地密码
for pwd in pwds:
auths.add((None,pwd))
return auths
def payloadverify(self,plug,host):
'''插件验证'''
logging.info('check %s-%s-%s-%s-%s'%(plug.__class__,host.host,host.port))
filter = int(self.args.get('filter',1)) #是否需要过滤、
try:
socket.setdefaulttimeout(360)
if not filter or plug.filter(host):
logging.info('filter %s-%s-%s-%s-%s'%(plug.__class__,host.host,host.port))
for user,pwd in self.auths if plug.BRUTE else [('','123456')]:
if user:
verify = plug.verify(host,user=user,pwd=pwd)
else:
verify = plug.verify(host,pwd=pwd)
if verify:
logging.warn('verify %s-%s-%s-%s-%s'%(plug.__class__,host.host,host.port,user,pwd))
return self.callback_bug(plug)
except Exception as e:
logging.error(str(e))
def selecthttp(self,q):
'''获取http服务的headers信息'''
h = str(q.host)
p = str(q.port)
pto = 'https' if '443' in p else 'http'
url = '%s://%s:%s/'%(pto, h, p)
self.writewebsite(BaseWebSite(url,load=False))
q.port_type = 'tcp/http'
q.save()
def writewebsite(self,w):
logging.info("Writewebsite %s %s %s %s "%(w.status_code,w.host,w.port,w.domain))
if w.status_code != 0:
r,cd = models.HttpResult.get_or_create(host_ip=w.host,port=w.port)
r.state = w.status_code
r.banner = w.server
r.domain = w.domain
r.xpoweredby= w.xpoweredby
r.title = w.title
r.headers = w.headers
r.content = w.content
r.updatedate= datetime.datetime.now()
r.save()
return w.status_code
def callback_bug(self,payload):
'''回调写入漏洞信息'''
RV,cd = models.Vulnerable.get_or_create(vul_name = payload.bugname)
if cd:
RV.vul_desc = payload.bugdesc
RV.vul_plan = payload.bugplan
RV.vul_rank = payload.bugrank
RV.vul_owasp = payload.bugowasp
RV.vul_number = payload.bugnumber
RV.save()
addr = payload.bugaddr
RB,cd = models.BugResult.get_or_create(
taskid = self.Q,
projectid = self.Q.projectid,
userid = self.Q.projectid.project_user,
vulid = RV,
bug_addr = addr)
RB.bug_tag = payload.bugtag
RB.bug_note = payload.bugnote
RB.request = payload.bugreq
RB.response = payload.bugres
RB.updatedate = datetime.datetime.now()
RB.save()
def writehost(self,ret):
'''写入端口扫描结果'''
for host,value in ret.items():
RH,created = models.HostResult.get_or_create(projectid = self.Q.projectid, host_ip = host)
RH.userid = self.Q.projectid.project_user
RH.host_name = value['hostname']
#RH.os_version = value['status']
RH.mac_addr = value['mac']
RH.updatedate = datetime.datetime.now()
RH.note = value['status']
RH.os_type = value['ostype']
RH.save()
for host,port,protocol,state,service,product,extrainfo,version,data in value['ports']:
RP,created = models.PortResult.get_or_create(hostid=RH,port=port)
RP.host = RH.host_ip
RP.port_type = protocol
RP.port_state = state
RP.service_name = service
RP.soft_name = product
RP.soft_type = extrainfo
RP.soft_ver = version
RP.response = str(data)
RP.updatedate = datetime.datetime.now()
RP.save()
def portscan(self,target):
'''端口扫描'''
write = int(self.args.get('write',1))
ping = int(self.args.get('ping',0))
threads = int(self.args.get('threads',100))
timeout = int(self.args.get('timeout',5))
ports = self.args.get('port',None)
logging.info('[portscan][host:%s][port:%s][write:%s][ping:%s][threads:%s][timeout:%s]'%(target,ports,write,ping,threads,timeout))
ps = PortScan(
target,
ports = ports,
neping = ping,
threads = threads,
timeout = timeout)
self.writehost(ps.scan())
class HttpScan(BaseScan):
def webscan(self):
for payload in BaseWebPlugin.payloads():
self.payloadverify(payload,self.crawle.website)
def httpscan(self):
while self.crawle.ISSTART or not self.crawle.ResQueue.empty():
try:
req,res = self.crawle.ResQueue.get(block=False)
req = copy.deepcopy(req)
res = copy.deepcopy(res)
for payload in BaseHttpPlugin.payloads():
payload.filter(self.crawle,req,res) \
and payload.verify(self.crawle,req,res) \
and self.callback_bug(payload)
except queue.Empty:
pass
except Exception as e:
logging.error(str(e))
def scan(self):
level = int(self.args.get('level',1)) #post 扫描
if not self.target.startswith(('http','HTTP')):
self.target = 'http://' + self.target
if not self.target.endswith('/'):
self.target += '/'
'''
for target in gethosts(self.target):
self.portscan(target)
pass
'''
headers = json.loads(self.args.get('headers',"{}"))
self.crawle = Crawler(self.target,headers=headers)
self.crawle.settings.update(level=level)
#self.crawle.settings.update(proxy={'http':'http://127.0.0.1:1111','https':'http://127.0.0.1:1111'})
self.crawle.settings.update(self.args)
th=[]
th.append(threading.Thread(target=self.crawle.run1))
th.append(threading.Thread(target=self.webscan))
th.append(threading.Thread(target=self.httpscan))
for t in th:
#t.daemon = True
t.start()
for t in th:
t.join()
#扫描完成写入httpret结果
self.writewebsite(self.crawle.website)
class ServiceScan(BaseScan):
def scan(self):
#不使用存活扫描时将IP分开来单个扫描保证进度能完整保存
ping = int(self.args.get('ping',1))
for target in [self.target] if ping else gethosts(self.target):
self.portscan(target)
'''
MP = models.PortResult
sw = MP.port_type != 'tcp/http'
sw &= MP.service_name == 'http'
#pool = CoroutinePool(10)
for q in MP.select().where(sw):
#pool.spawn(self.selecthttp,q)
self.selecthttp(q)
#pool.join()
'''
class HostsScan(BaseScan):
def scan(self):
MP = models.Project
MH = models.HostResult
MR = models.PortResult
'''
ping = int(self.args.get('ping',0))
for target in [self.target] if ping else gethosts(self.target):
self.portscan(target)
'''
ret = []
payloads = BaseHostPlugin.payloads() + BaseWebPlugin.payloads()
for plug in payloads:
for H in gethosts(self.target):
for P in MR.select().join(MH).where((MH.host_ip == H)&(MH.projectid == self.Q.projectid)):
if isinstance(plug,BaseHostPlugin):
host = BaseHost(str(P.host),str(P.port),service=str(P.service_name))
ret.append((plug,host))
elif str(P.service_name) == 'http':
hp = 'https' if '443' in str(P.port) else 'http'
url = '%s://%s:%s/'%(hp,str(P.host),str(P.port))
host = BaseWebSite(url)
ret.append((plug,host))
pool = CoroutinePool(len(payloads))
for plug,host in ret:
pool.spawn(self.payloadverify,plug,host)
pool.join()
class PluginsScan(BaseScan):
def scan(self):
MP = models.Project
MH = models.HostResult
MR = models.PortResult
plug_names = self.args.get('plug','').split(',')
for plug_name in plug_names:
logging.info('Scan plug name: %s'%plug_name)
hosts = self.target
ret = []
try:
R = MP.get(MP.project_id == hosts)
for H in MH.select().where(MH.projectid == R):
ret.append(str(H.host_ip))
except MP.DoesNotExist:
for H in gethosts(self.target):
ret.append(H)
wret = []
hret = []
for H in ret:
for P in MR.select().join(MH).where((MH.host_ip == H)&(MH.projectid == self.Q.projectid)):
if str(P.service_name) == 'http':
hp = 'https' if '443' in str(P.port) else 'http'
url = '%s://%s:%s/'%(hp,str(P.host),str(P.port))
host = BaseWebSite(url)
wret.append(host)
else:
host = BaseHost(str(P.host),str(P.port),service=str(P.service_name))
hret.append(host)
ret = []
for plug in PluginsManage.get_plugins(plug_name):
if isinstance(plug,BaseHostPlugin):
for host in hret:
ret.append((plug,host))
elif isinstance(plug,BaseWebPlugin):
for host in wret:
ret.append((plug,host))
pool = CoroutinePool(10)
for plug,host in ret:
pool.spawn(self.payloadverify,plug,host)
pool.join()
class DomainScan(BaseScan):
namelist = getfiles(settings.DATAPATH + '/subdomain.txt')
def recv(self,domain):
try:
answers = self.resolvers.query(domain)
except:
answers = []
return answers
def baiduce(self,target):
try:
res = requests.get('http://ce.baidu.com/index/getRelatedSites?site_address=%s'%target)
res = json.loads(res.text)
for subdomain in [v.get('domain') for v in res.get('data',[])]:
for answer in self.recv(subdomain):
self.result.add((subdomain,answer.address))
except:pass
def brute(self,target):
target = target.strip()
for subdomain in self.namelist:
subdomain = subdomain.strip() + '.' + target
for answer in self.recv(subdomain):
self.result.add((subdomain,answer.address))
def scan(self):
h = self.target
h = h if 'http' in h else 'http://%s'%h
target = getdomain(h)
self.resolvers = Resolver()
self.answers = []
self.result = set()
self.baiduce(target)
self.brute(target)
self.writehost([(h,80,1,'http','',d) for d,h in self.result])
|
store.py
|
import datetime
import json
import threading
import uuid
from collections import defaultdict
from copy import deepcopy
from dictdiffer import diff
from inspect import signature
from threading import Lock
from pathlib import Path
from tzlocal import get_localzone
from .logger import logger
from .settings import CACHE_DIR
from .utils import extract_id
class MissingClass(object):
def __bool__(self):
return False
Missing = MissingClass()
class Callback(object):
def __init__(
self, callback, record, callback_id=None, extra_kwargs={}, watch_children=True
):
self.callback = callback
self.record = record
self.callback_id = callback_id or str(uuid.uuid4())
self.extra_kwargs = extra_kwargs
def __call__(self, difference, old_val, new_val):
kwargs = {}
kwargs.update(self.extra_kwargs)
kwargs["record"] = self.record
kwargs["callback_id"] = self.callback_id
kwargs["difference"] = difference
kwargs["changes"] = self.record._convert_diff_to_changelist(
difference, old_val, new_val
)
logger.debug("Firing callback {} with kwargs: {}".format(self.callback, kwargs))
# trim down the parameters we'll be passing, to include only those the callback will accept
params = signature(self.callback).parameters
if not any(["**" in str(param) for param in params.values()]):
# there's no "**kwargs" in the callback signature, so remove any unaccepted params
for arg in list(kwargs.keys()):
if arg not in params:
del kwargs[arg]
# perform the callback, gracefully handling any exceptions
try:
# trigger the callback within its own thread, so it won't block others if it's long-running
threading.Thread(target=self.callback, kwargs=kwargs, daemon=True).start()
except Exception as e:
logger.error(
"Error while processing callback for {}: {}".format(
repr(self.record), repr(e)
)
)
def __eq__(self, val):
if isinstance(val, str):
return self.callback_id.startswith(val)
elif isinstance(val, Callback):
return self.callback_id == val.callback_id
else:
return False
class RecordStore(object):
def __init__(self, client, cache_key=None):
self._mutex = Lock()
self._client = client
self._cache_key = cache_key
self._values = defaultdict(lambda: defaultdict(dict))
self._role = defaultdict(lambda: defaultdict(str))
self._collection_row_ids = {}
self._callbacks = defaultdict(lambda: defaultdict(list))
self._records_to_refresh = {}
self._pages_to_refresh = []
with self._mutex:
self._load_cache()
def _get(self, table, id):
return self._values[table].get(id, Missing)
def add_callback(self, record, callback, callback_id=None, extra_kwargs={}):
assert callable(
callback
), "The callback must be a 'callable' object, such as a function."
self.remove_callbacks(record._table, record.id, callback_id)
callback_obj = Callback(
callback, record, callback_id=callback_id, extra_kwargs=extra_kwargs
)
self._callbacks[record._table][record.id].append(callback_obj)
return callback_obj
def remove_callbacks(self, table, id, callback_or_callback_id_prefix=""):
"""
Remove all callbacks for the record specified by `table` and `id` that have a callback_id
starting with the string `callback_or_callback_id_prefix`, or are equal to the provided callback.
"""
if callback_or_callback_id_prefix is None:
return
callbacks = self._callbacks[table][id]
while callback_or_callback_id_prefix in callbacks:
callbacks.remove(callback_or_callback_id_prefix)
def _get_cache_path(self, attribute):
return str(
Path(CACHE_DIR).joinpath("{}{}.json".format(self._cache_key, attribute))
)
def _load_cache(self, attributes=("_values", "_role", "_collection_row_ids")):
if not self._cache_key:
return
for attr in attributes:
try:
with open(self._get_cache_path(attr)) as f:
if attr == "_collection_row_ids":
self._collection_row_ids.update(json.load(f))
else:
for k, v in json.load(f).items():
getattr(self, attr)[k].update(v)
except (FileNotFoundError, ValueError):
pass
def set_collection_rows(self, collection_id, row_ids):
if collection_id in self._collection_row_ids:
old_ids = set(self._collection_row_ids[collection_id])
new_ids = set(row_ids)
added = new_ids - old_ids
removed = old_ids - new_ids
for id in added:
self._trigger_callbacks(
"collection",
collection_id,
[("row_added", "rows", id)],
old_ids,
new_ids,
)
for id in removed:
self._trigger_callbacks(
"collection",
collection_id,
[("row_removed", "rows", id)],
old_ids,
new_ids,
)
self._collection_row_ids[collection_id] = row_ids
self._save_cache("_collection_row_ids")
def get_collection_rows(self, collection_id):
return self._collection_row_ids.get(collection_id, [])
def _save_cache(self, attribute):
if not self._cache_key:
return
with open(self._get_cache_path(attribute), "w") as f:
json.dump(getattr(self, attribute), f)
def _trigger_callbacks(self, table, id, difference, old_val, new_val):
for callback_obj in self._callbacks[table][id]:
callback_obj(difference, old_val, new_val)
def get_role(self, table, id, force_refresh=False):
self.get(table, id, force_refresh=force_refresh)
return self._role[table].get(id, None)
def get(self, table, id, force_refresh=False):
id = extract_id(id)
# look up the record in the current local dataset
result = self._get(table, id)
# if it's not found, try refreshing the record from the server
if result is Missing or force_refresh:
if table == "block":
self.call_load_page_chunk(id)
else:
self.call_get_record_values(**{table: id})
result = self._get(table, id)
return result if result is not Missing else None
def _update_record(self, table, id, value=None, role=None):
callback_queue = []
with self._mutex:
if role:
logger.debug("Updating 'role' for {}/{} to {}".format(table, id, role))
self._role[table][id] = role
self._save_cache("_role")
if value:
logger.debug(
"Updating 'value' for {}/{} to {}".format(table, id, value)
)
old_val = self._values[table][id]
difference = list(
diff(
old_val,
value,
ignore=["version", "last_edited_time", "last_edited_by"],
expand=True,
)
)
self._values[table][id] = value
self._save_cache("_values")
if old_val and difference:
logger.debug("Value changed! Difference: {}".format(difference))
callback_queue.append((table, id, difference, old_val, value))
# run callbacks outside the mutex to avoid lockups
for cb in callback_queue:
self._trigger_callbacks(*cb)
def call_get_record_values(self, **kwargs):
"""
Call the server's getRecordValues endpoint to update the local record store. The keyword arguments map
table names into lists of (or singular) record IDs to load for that table. Use True to refresh all known
records for that table.
"""
requestlist = []
for table, ids in kwargs.items():
# ensure "ids" is a proper list
if ids is True:
ids = list(self._values.get(table, {}).keys())
if isinstance(ids, str):
ids = [ids]
# if we're in a transaction, add the requested IDs to a queue to refresh when the transaction completes
if self._client.in_transaction():
self._records_to_refresh[table] = list(
set(self._records_to_refresh.get(table, []) + ids)
)
continue
requestlist += [{"table": table, "id": extract_id(id)} for id in ids]
if requestlist:
logger.debug(
"Calling 'getRecordValues' endpoint for requests: {}".format(
requestlist
)
)
results = self._client.post(
"getRecordValues", {"requests": requestlist}
).json()["results"]
for request, result in zip(requestlist, results):
self._update_record(
request["table"],
request["id"],
value=result.get("value"),
role=result.get("role"),
)
def get_current_version(self, table, id):
values = self._get(table, id)
if values and "version" in values:
return values["version"]
else:
return -1
def call_load_page_chunk(self, page_id):
if self._client.in_transaction():
self._pages_to_refresh.append(page_id)
return
data = {
"pageId": page_id,
"limit": 100,
"cursor": {"stack": []},
"chunkNumber": 0,
"verticalColumns": False,
}
recordmap = self._client.post("loadPageChunk", data).json()["recordMap"]
self.store_recordmap(recordmap)
def store_recordmap(self, recordmap):
for table, records in recordmap.items():
if not isinstance(records, dict):
continue
for id, record in records.items():
if not isinstance(record, dict):
continue
self._update_record(
table, id, value=record.get("value"), role=record.get("role")
)
def call_query_collection(
self,
collection_id,
collection_view_id,
search="",
type="table",
aggregate=[],
aggregations=[],
filter={},
sort=[],
calendar_by="",
group_by="",
):
assert not (
aggregate and aggregations
), "Use only one of `aggregate` or `aggregations` (old vs new format)"
# convert singletons into lists if needed
if isinstance(aggregate, dict):
aggregate = [aggregate]
if isinstance(sort, dict):
sort = [sort]
data = {
"collectionId": collection_id,
"collectionViewId": collection_view_id,
"loader": {
"limit": 1000000,
"loadContentCover": True,
"searchQuery": search,
"userLocale": "en",
"userTimeZone": str(get_localzone()),
"type": type,
},
"query": {
"aggregate": aggregate,
"aggregations": aggregations,
"filter": filter,
"sort": sort,
},
}
response = self._client.post("queryCollection", data).json()
self.store_recordmap(response["recordMap"])
return response["result"]
def handle_post_transaction_refreshing(self):
for block_id in self._pages_to_refresh:
self.call_load_page_chunk(block_id)
self._pages_to_refresh = []
self.call_get_record_values(**self._records_to_refresh)
self._records_to_refresh = {}
def run_local_operations(self, operations):
"""
Called to simulate the results of running the operations on the server, to keep the record store in sync
even when we haven't completed a refresh (or we did a refresh but the database hadn't actually updated yet...)
"""
for operation in operations:
self.run_local_operation(**operation)
def run_local_operation(self, table, id, path, command, args):
with self._mutex:
path = deepcopy(path)
new_val = deepcopy(self._values[table][id])
ref = new_val
# loop and descend down the path until it's consumed, or if we're doing a "set", there's one key left
while (len(path) > 1) or (path and command != "set"):
comp = path.pop(0)
if comp not in ref:
ref[comp] = [] if "list" in command else {}
ref = ref[comp]
if command == "update":
assert isinstance(ref, dict)
ref.update(args)
elif command == "set":
assert isinstance(ref, dict)
if path:
ref[path[0]] = args
else:
# this is the case of "setting the top level" (i.e. creating a record)
ref.clear()
ref.update(args)
elif command == "listAfter":
assert isinstance(ref, list)
if "after" in args:
ref.insert(ref.index(args["after"]) + 1, args["id"])
else:
ref.append(args["id"])
elif command == "listBefore":
assert isinstance(ref, list)
if "before" in args:
ref.insert(ref.index(args["before"]), args["id"])
else:
ref.insert(0, args["id"])
elif command == "listRemove":
try:
ref.remove(args["id"])
except ValueError:
pass
self._update_record(table, id, value=new_val)
|
__main__.py
|
import sys
import multiprocessing as mp
import importlib.resources
import signal
from pystray import Icon, Menu, MenuItem
import click
from . import FatalError, run_server, PORT, server_process, \
viewer, warning, local_storage, shutdown_server, load_png_logo
from .viewer import ViewerSettings
WEBVIEW_PROCESSES = []
def spawn_webview(settings, force=False):
global WEBVIEW_PROCESSES
if not force and any(proc.is_alive() for proc in WEBVIEW_PROCESSES):
warning("Webview already exists")
return
# We spawn a SUBPROCESS to deal with the webview
# By spawning a subprocess, we can fix the issues with pystray & webview integration
# Issues like closing apps & messing with GTK context magically disapear.
p = mp.Process(target=viewer.spawn_webview, args=(settings,))
p.start()
print(f"Spawned webview process: {p.pid}")
WEBVIEW_PROCESSES.append(p)
def exit(icon):
shutdown_server()
for proc in WEBVIEW_PROCESSES:
proc.terminate()
if icon is not None:
icon.stop()
sys.exit(0)
def respawn_webview(force=False):
spawn_webview(ViewerSettings(
backend=_CURRENT_BACKEND,
show_startup_msg=False,
loading_wait=0
), force=force)
def handle_user_signal(signum, _frame):
if signum == signal.SIGUSR1:
respawn_webview(force=True)
_GUI_MODE = False
_CURRENT_BACKEND = None
@click.command('mathicsd')
@click.option('--backend', default=viewer.detect_default_backend(),
help="Explicitly specify the webview backend to use.")
@click.option('--hide-startup-msg', is_flag=True,
help="Suppress the default startup message")
@click.option('--skip-loading-screen', is_flag=True,
help="Skip the loading screen")
@click.option('--gui', is_flag=True,
help="Use GUI mode (show dialogs for errors)")
def run(backend, hide_startup_msg, skip_loading_screen, gui):
global _CURRENT_BACKEND, _GUI_MODE
if gui:
_GUI_MODE = True
_CURRENT_BACKEND = backend
if (existing_proc := server_process()) is not None:
print("WARNING: Found existing process. Just spawning a new webview.")
parent = existing_proc.parent()
if parent is None:
raise FatalError("Could not find parent of already-existing server process.....")
if 'mathicsd' in parent.cmdline():
parent.send_signal(signal.SIGUSR1)
return
else:
warning("Killing old server")
existing_proc.terminate()
existing_proc.wait()
run_server()
# TODO: I don't believe SIGUSR1 is portable to Windows...
signal.signal(signal.SIGUSR1, handle_user_signal)
spawn_webview(ViewerSettings(
backend=backend, show_startup_msg=not hide_startup_msg,
loading_wait=0 if skip_loading_screen else 3
))
icon = Icon(
"Mathics Daemon", load_png_logo(),
menu=Menu(
MenuItem(
"Open Viewer",
lambda: respawn_webview()
),
MenuItem(
"Quit (Stop Server)",
lambda icon, _: exit(icon)
)
)
)
try:
icon.run()
except KeyboardInterrupt as e:
# TODO: Broken
print("Interrupt:", e)
exit(icon)
def display_gui_error(msg):
from tkinter import messagebox, Tk
Tk().withdraw()
messagebox.showerror("Fatal Error!", msg)
if __name__ == "__main__":
# We don't want resources (like )
# TODO: Investigate forkserver?
mp.set_start_method('spawn')
try:
# TODO: How to run click so it doesn't catch KeyboardInterrupt?????
run.main(standalone_mode=False)
except FatalError as e:
display_gui_error(str(e))
exit(None)
except KeyboardInterrupt:
exit(None) # No icon yet
|
test_mp_full.py
|
"""
multiproc full tests.
"""
import importlib
import multiprocessing
import platform
import pytest
import time
import wandb
from wandb.errors import UsageError
import sys
def train(add_val):
time.sleep(1)
wandb.log(dict(mystep=1, val=2 + add_val))
wandb.log(dict(mystep=2, val=8 + add_val))
wandb.log(dict(mystep=3, val=3 + add_val))
wandb.log(dict(val2=4 + add_val))
wandb.log(dict(val2=1 + add_val))
time.sleep(1)
def test_multiproc_default(live_mock_server, test_settings, parse_ctx):
run = wandb.init(settings=test_settings)
train(0)
run.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
summary = ctx_util.summary
s = {k: v for k, v in dict(summary).items() if not k.startswith("_")}
assert dict(val=3, val2=1, mystep=3) == s
@pytest.mark.skipif(platform.system() == "Windows", reason="fork needed")
@pytest.mark.skipif(sys.version_info >= (3, 10), reason="flaky?")
def test_multiproc_ignore(live_mock_server, test_settings, parse_ctx):
run = wandb.init(settings=test_settings)
train(0)
procs = []
for i in range(2):
procs.append(multiprocessing.Process(target=train, kwargs=dict(add_val=100)))
try:
for p in procs:
p.start()
finally:
for p in procs:
p.join()
assert p.exitcode == 0
run.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
summary = ctx_util.summary
s = {k: v for k, v in dict(summary).items() if not k.startswith("_")}
assert dict(val=3, val2=1, mystep=3) == s
@pytest.mark.flaky
@pytest.mark.skipif(platform.system() == "Windows", reason="fork needed")
@pytest.mark.xfail(platform.system() == "Darwin", reason="console parse_ctx issues")
def test_multiproc_strict(live_mock_server, test_settings, parse_ctx):
test_settings.update(strict="true", source=wandb.sdk.wandb_settings.Source.INIT)
run = wandb.init(settings=test_settings)
train(0)
procs = []
for i in range(2):
procs.append(multiprocessing.Process(target=train, kwargs=dict(add_val=100)))
try:
for p in procs:
p.start()
finally:
for p in procs:
p.join()
# expect fail
assert p.exitcode != 0
run.finish()
ctx_util = parse_ctx(live_mock_server.get_ctx())
summary = ctx_util.summary
s = {k: v for k, v in dict(summary).items() if not k.startswith("_")}
assert dict(val=3, val2=1, mystep=3) == s
def test_multiproc_strict_bad(live_mock_server, test_settings, parse_ctx):
with pytest.raises(UsageError):
test_settings.update(strict="bad")
def test_multiproc_spawn(runner, test_settings):
# WB5640. Before the WB5640 fix this code fragment would raise an
# exception, this test checks that it runs without error
with runner.isolated_filesystem():
from .utils import test_mod
test_mod.main()
sys.modules["__main__"].__spec__ = importlib.machinery.ModuleSpec(
name="tests.utils.test_mod", loader=importlib.machinery.BuiltinImporter
)
test_mod.main()
sys.modules["__main__"].__spec__ = None
# run this to get credit for the diff
test_mod.mp_func()
def test_missing_attach_id(live_mock_server, test_settings):
run = wandb.init(settings=test_settings)
with pytest.raises(UsageError):
wandb._attach(attach_id=None, run_id=None)
run.finish()
|
test_tracer.py
|
import time
import opentracing
from opentracing import (
child_of,
Format,
InvalidCarrierException,
UnsupportedFormatException,
SpanContextCorruptedException,
)
import ddtrace
from ddtrace.ext.priority import AUTO_KEEP
from ddtrace.opentracer import Tracer, set_global_tracer
from ddtrace.opentracer.span_context import SpanContext
from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID
from ddtrace.settings import ConfigException
import mock
import pytest
class TestTracerConfig(object):
def test_config(self):
"""Test the configuration of the tracer"""
config = {"enabled": True}
tracer = Tracer(service_name="myservice", config=config)
assert tracer._service_name == "myservice"
assert tracer._enabled is True
def test_no_service_name(self):
"""A service_name should be generated if one is not provided."""
tracer = Tracer()
assert tracer._service_name == "pytest"
def test_multiple_tracer_configs(self):
"""Ensure that a tracer config is a copy of the passed config."""
config = {"enabled": True}
tracer1 = Tracer(service_name="serv1", config=config)
assert tracer1._service_name == "serv1"
config["enabled"] = False
tracer2 = Tracer(service_name="serv2", config=config)
# Ensure tracer1's config was not mutated
assert tracer1._service_name == "serv1"
assert tracer1._enabled is True
assert tracer2._service_name == "serv2"
assert tracer2._enabled is False
def test_invalid_config_key(self):
"""A config with an invalid key should raise a ConfigException."""
config = {"enabeld": False}
# No debug flag should not raise an error
tracer = Tracer(service_name="mysvc", config=config)
# With debug flag should raise an error
config["debug"] = True
with pytest.raises(ConfigException) as ce_info:
tracer = Tracer(config=config)
assert "enabeld" in str(ce_info)
assert tracer is not None
# Test with multiple incorrect keys
config["setttings"] = {}
with pytest.raises(ConfigException) as ce_info:
tracer = Tracer(service_name="mysvc", config=config)
assert ["enabeld", "setttings"] in str(ce_info)
assert tracer is not None
def test_global_tags(self):
"""Global tags should be passed from the opentracer to the tracer."""
config = {
"global_tags": {"tag1": "value1", "tag2": 2,},
}
tracer = Tracer(service_name="mysvc", config=config)
with tracer.start_span("myop") as span:
# global tags should be attached to generated all datadog spans
assert span._dd_span.get_tag("tag1") == "value1"
assert span._dd_span.get_metric("tag2") == 2
with tracer.start_span("myop2") as span2:
assert span2._dd_span.get_tag("tag1") == "value1"
assert span2._dd_span.get_metric("tag2") == 2
class TestTracer(object):
def test_start_span(self, ot_tracer, writer):
"""Start and finish a span."""
with ot_tracer.start_span("myop") as span:
pass
# span should be finished when the context manager exits
assert span.finished
spans = writer.pop()
assert len(spans) == 1
def test_start_span_references(self, ot_tracer, writer):
"""Start a span using references."""
with ot_tracer.start_span("one", references=[child_of()]):
pass
spans = writer.pop()
assert spans[0].parent_id is None
root = ot_tracer.start_active_span("root")
# create a child using a parent reference that is not the context parent
with ot_tracer.start_active_span("one"):
with ot_tracer.start_active_span("two", references=[child_of(root.span)]):
pass
root.close()
spans = writer.pop()
assert spans[2].parent_id is spans[0].span_id
def test_start_span_custom_start_time(self, ot_tracer):
"""Start a span with a custom start time."""
t = 100
with mock.patch("ddtrace.span.time_ns") as time:
time.return_value = 102 * 1e9
with ot_tracer.start_span("myop", start_time=t) as span:
pass
assert span._dd_span.start == t
assert span._dd_span.duration == 2
def test_start_span_with_spancontext(self, ot_tracer, writer):
"""Start and finish a span using a span context as the child_of
reference.
"""
with ot_tracer.start_span("myop") as span:
with ot_tracer.start_span("myop", child_of=span.context) as span2:
pass
# span should be finished when the context manager exits
assert span.finished
assert span2.finished
spans = writer.pop()
assert len(spans) == 2
# ensure proper parenting
assert spans[1].parent_id is spans[0].span_id
def test_start_span_with_tags(self, ot_tracer):
"""Create a span with initial tags."""
tags = {"key": "value", "key2": "value2"}
with ot_tracer.start_span("myop", tags=tags) as span:
pass
assert span._dd_span.get_tag("key") == "value"
assert span._dd_span.get_tag("key2") == "value2"
def test_start_span_with_resource_name_tag(self, ot_tracer):
"""Create a span with the tag to set the resource name"""
tags = {"resource.name": "value", "key2": "value2"}
with ot_tracer.start_span("myop", tags=tags) as span:
pass
# Span resource name should be set to tag value, and should not get set as
# a tag on the underlying span.
assert span._dd_span.resource == "value"
assert span._dd_span.get_tag("resource.name") is None
# Other tags are set as normal
assert span._dd_span.get_tag("key2") == "value2"
def test_start_active_span_multi_child(self, ot_tracer, writer):
"""Start and finish multiple child spans.
This should ensure that child spans can be created 2 levels deep.
"""
with ot_tracer.start_active_span("myfirstop") as scope1:
time.sleep(0.009)
with ot_tracer.start_active_span("mysecondop") as scope2:
time.sleep(0.007)
with ot_tracer.start_active_span("mythirdop") as scope3:
time.sleep(0.005)
# spans should be finished when the context manager exits
assert scope1.span.finished
assert scope2.span.finished
assert scope3.span.finished
spans = writer.pop()
# check spans are captured in the trace
assert scope1.span._dd_span is spans[0]
assert scope2.span._dd_span is spans[1]
assert scope3.span._dd_span is spans[2]
# ensure proper parenting
assert spans[1].parent_id is spans[0].span_id
assert spans[2].parent_id is spans[1].span_id
# sanity check a lower bound on the durations
assert spans[0].duration >= 0.009 + 0.007 + 0.005
assert spans[1].duration >= 0.007 + 0.005
assert spans[2].duration >= 0.005
def test_start_active_span_multi_child_siblings(self, ot_tracer, writer):
"""Start and finish multiple span at the same level.
This should test to ensure a parent can have multiple child spans at the
same level.
"""
with ot_tracer.start_active_span("myfirstop") as scope1:
time.sleep(0.009)
with ot_tracer.start_active_span("mysecondop") as scope2:
time.sleep(0.007)
with ot_tracer.start_active_span("mythirdop") as scope3:
time.sleep(0.005)
# spans should be finished when the context manager exits
assert scope1.span.finished
assert scope2.span.finished
assert scope3.span.finished
spans = writer.pop()
# check spans are captured in the trace
assert scope1.span._dd_span is spans[0]
assert scope2.span._dd_span is spans[1]
assert scope3.span._dd_span is spans[2]
# ensure proper parenting
assert spans[1].parent_id is spans[0].span_id
assert spans[2].parent_id is spans[0].span_id
# sanity check a lower bound on the durations
assert spans[0].duration >= 0.009 + 0.007 + 0.005
assert spans[1].duration >= 0.007
assert spans[2].duration >= 0.005
def test_start_span_manual_child_of(self, ot_tracer, writer):
"""Start spans without using a scope manager.
Spans should be created without parents since there will be no call
for the active span.
"""
root = ot_tracer.start_span("zero")
with ot_tracer.start_span("one", child_of=root):
with ot_tracer.start_span("two", child_of=root):
with ot_tracer.start_span("three", child_of=root):
pass
root.finish()
spans = writer.pop()
assert spans[0].parent_id is None
# ensure each child span is a child of root
assert spans[1].parent_id is root._dd_span.span_id
assert spans[2].parent_id is root._dd_span.span_id
assert spans[3].parent_id is root._dd_span.span_id
assert spans[0].trace_id == spans[1].trace_id and spans[1].trace_id == spans[2].trace_id
def test_start_span_no_active_span(self, ot_tracer, writer):
"""Start spans without using a scope manager.
Spans should be created without parents since there will be no call
for the active span.
"""
with ot_tracer.start_span("one", ignore_active_span=True):
with ot_tracer.start_span("two", ignore_active_span=True):
pass
with ot_tracer.start_span("three", ignore_active_span=True):
pass
spans = writer.pop()
# ensure each span does not have a parent
assert spans[0].parent_id is None
assert spans[1].parent_id is None
assert spans[2].parent_id is None
# and that each span is a new trace
assert (
spans[0].trace_id != spans[1].trace_id
and spans[1].trace_id != spans[2].trace_id
and spans[0].trace_id != spans[2].trace_id
)
def test_start_active_span_child_finish_after_parent(self, ot_tracer, writer):
"""Start a child span and finish it after its parent."""
span1 = ot_tracer.start_active_span("one").span
span2 = ot_tracer.start_active_span("two").span
span1.finish()
time.sleep(0.005)
span2.finish()
spans = writer.pop()
assert len(spans) == 2
assert spans[0].parent_id is None
assert spans[1].parent_id is span1._dd_span.span_id
assert spans[1].duration > spans[0].duration
def test_start_span_multi_intertwined(self, ot_tracer, writer):
"""Start multiple spans at the top level intertwined.
Alternate calling between two traces.
"""
import threading
# synchronize threads with a threading event object
event = threading.Event()
def trace_one():
id = 11 # noqa: A001
with ot_tracer.start_active_span(str(id)):
id += 1
with ot_tracer.start_active_span(str(id)):
id += 1
with ot_tracer.start_active_span(str(id)):
event.set()
def trace_two():
id = 21 # noqa: A001
event.wait()
with ot_tracer.start_active_span(str(id)):
id += 1
with ot_tracer.start_active_span(str(id)):
id += 1
with ot_tracer.start_active_span(str(id)):
pass
# the ordering should be
# t1.span1/t2.span1, t2.span2, t1.span2, t1.span3, t2.span3
t1 = threading.Thread(target=trace_one)
t2 = threading.Thread(target=trace_two)
t1.start()
t2.start()
# wait for threads to finish
t1.join()
t2.join()
spans = writer.pop()
# trace_one will finish before trace_two so its spans should be written
# before the spans from trace_two, let's confirm this
assert spans[0].name == "11"
assert spans[1].name == "12"
assert spans[2].name == "13"
assert spans[3].name == "21"
assert spans[4].name == "22"
assert spans[5].name == "23"
# next let's ensure that each span has the correct parent:
# trace_one
assert spans[0].parent_id is None
assert spans[1].parent_id is spans[0].span_id
assert spans[2].parent_id is spans[1].span_id
# trace_two
assert spans[3].parent_id is None
assert spans[4].parent_id is spans[3].span_id
assert spans[5].parent_id is spans[3].span_id
# finally we should ensure that the trace_ids are reasonable
# trace_one
assert spans[0].trace_id == spans[1].trace_id and spans[1].trace_id == spans[2].trace_id
# traces should be independent
assert spans[2].trace_id != spans[3].trace_id
# trace_two
assert spans[3].trace_id == spans[4].trace_id and spans[4].trace_id == spans[5].trace_id
def test_start_active_span(self, ot_tracer, writer):
with ot_tracer.start_active_span("one") as scope:
pass
assert scope.span._dd_span.name == "one"
assert scope.span.finished
spans = writer.pop()
assert spans
def test_start_active_span_finish_on_close(self, ot_tracer, writer):
with ot_tracer.start_active_span("one", finish_on_close=False) as scope:
pass
assert scope.span._dd_span.name == "one"
assert not scope.span.finished
spans = writer.pop()
assert not spans
def test_start_active_span_nested(self, ot_tracer):
"""Test the active span of multiple nested calls of start_active_span."""
with ot_tracer.start_active_span("one") as outer_scope:
assert ot_tracer.active_span == outer_scope.span
with ot_tracer.start_active_span("two") as inner_scope:
assert ot_tracer.active_span == inner_scope.span
with ot_tracer.start_active_span("three") as innest_scope: # why isn't it innest? innermost so verbose
assert ot_tracer.active_span == innest_scope.span
with ot_tracer.start_active_span("two") as inner_scope:
assert ot_tracer.active_span == inner_scope.span
assert ot_tracer.active_span == outer_scope.span
assert ot_tracer.active_span is None
def test_start_active_span_trace(self, ot_tracer, writer):
"""Test the active span of multiple nested calls of start_active_span."""
with ot_tracer.start_active_span("one") as outer_scope:
outer_scope.span.set_tag("outer", 2)
with ot_tracer.start_active_span("two") as inner_scope:
inner_scope.span.set_tag("inner", 3)
with ot_tracer.start_active_span("two") as inner_scope:
inner_scope.span.set_tag("inner", 3)
with ot_tracer.start_active_span("three") as innest_scope:
innest_scope.span.set_tag("innerest", 4)
spans = writer.pop()
assert spans[0].parent_id is None
assert spans[1].parent_id is spans[0].span_id
assert spans[2].parent_id is spans[0].span_id
assert spans[3].parent_id is spans[2].span_id
@pytest.fixture
def nop_span_ctx():
return SpanContext(sampling_priority=AUTO_KEEP)
class TestTracerSpanContextPropagation(object):
"""Test the injection and extration of a span context from a tracer."""
def test_invalid_format(self, ot_tracer, nop_span_ctx):
"""An invalid format should raise an UnsupportedFormatException."""
# test inject
with pytest.raises(UnsupportedFormatException):
ot_tracer.inject(nop_span_ctx, None, {})
# test extract
with pytest.raises(UnsupportedFormatException):
ot_tracer.extract(None, {})
def test_inject_invalid_carrier(self, ot_tracer, nop_span_ctx):
"""Only dicts should be supported as a carrier."""
with pytest.raises(InvalidCarrierException):
ot_tracer.inject(nop_span_ctx, Format.HTTP_HEADERS, None)
def test_extract_invalid_carrier(self, ot_tracer):
"""Only dicts should be supported as a carrier."""
with pytest.raises(InvalidCarrierException):
ot_tracer.extract(Format.HTTP_HEADERS, None)
def test_http_headers_base(self, ot_tracer):
"""extract should undo inject for http headers."""
span_ctx = SpanContext(trace_id=123, span_id=456)
carrier = {}
ot_tracer.inject(span_ctx, Format.HTTP_HEADERS, carrier)
assert len(carrier.keys()) > 0
ext_span_ctx = ot_tracer.extract(Format.HTTP_HEADERS, carrier)
assert ext_span_ctx._dd_context.trace_id == 123
assert ext_span_ctx._dd_context.span_id == 456
def test_http_headers_baggage(self, ot_tracer):
"""extract should undo inject for http headers."""
span_ctx = SpanContext(trace_id=123, span_id=456, baggage={"test": 4, "test2": "string"})
carrier = {}
ot_tracer.inject(span_ctx, Format.HTTP_HEADERS, carrier)
assert len(carrier.keys()) > 0
ext_span_ctx = ot_tracer.extract(Format.HTTP_HEADERS, carrier)
assert ext_span_ctx._dd_context.trace_id == 123
assert ext_span_ctx._dd_context.span_id == 456
assert ext_span_ctx.baggage == span_ctx.baggage
def test_empty_propagated_context(self, ot_tracer):
"""An empty propagated context should raise a
SpanContextCorruptedException when extracted.
"""
carrier = {}
with pytest.raises(SpanContextCorruptedException):
ot_tracer.extract(Format.HTTP_HEADERS, carrier)
def test_text(self, ot_tracer):
"""extract should undo inject for http headers"""
span_ctx = SpanContext(trace_id=123, span_id=456, baggage={"test": 4, "test2": "string"})
carrier = {}
ot_tracer.inject(span_ctx, Format.TEXT_MAP, carrier)
assert len(carrier.keys()) > 0
ext_span_ctx = ot_tracer.extract(Format.TEXT_MAP, carrier)
assert ext_span_ctx._dd_context.trace_id == 123
assert ext_span_ctx._dd_context.span_id == 456
assert ext_span_ctx.baggage == span_ctx.baggage
def test_corrupted_propagated_context(self, ot_tracer):
"""Corrupted context should raise a SpanContextCorruptedException."""
span_ctx = SpanContext(trace_id=123, span_id=456, baggage={"test": 4, "test2": "string"})
carrier = {}
ot_tracer.inject(span_ctx, Format.TEXT_MAP, carrier)
assert len(carrier.keys()) > 0
# manually alter a key in the carrier baggage
del carrier[HTTP_HEADER_TRACE_ID]
corrupted_key = HTTP_HEADER_TRACE_ID[2:]
carrier[corrupted_key] = 123
with pytest.raises(SpanContextCorruptedException):
ot_tracer.extract(Format.TEXT_MAP, carrier)
def test_immutable_span_context(self, ot_tracer):
"""Span contexts should be immutable."""
with ot_tracer.start_span("root") as root:
ctx_before = root.context
root.set_baggage_item("test", 2)
assert ctx_before is not root.context
with ot_tracer.start_span("child") as level1:
with ot_tracer.start_span("child") as level2:
pass
assert root.context is not level1.context
assert level2.context is not level1.context
assert level2.context is not root.context
def test_inherited_baggage(self, ot_tracer):
"""Baggage should be inherited by child spans."""
with ot_tracer.start_active_span("root") as root:
# this should be passed down to the child
root.span.set_baggage_item("root", 1)
root.span.set_baggage_item("root2", 1)
with ot_tracer.start_active_span("child") as level1:
level1.span.set_baggage_item("level1", 1)
with ot_tracer.start_active_span("child") as level2:
level2.span.set_baggage_item("level2", 1)
# ensure immutability
assert level1.span.context is not root.span.context
assert level2.span.context is not level1.span.context
# level1 should have inherited the baggage of root
assert level1.span.get_baggage_item("root")
assert level1.span.get_baggage_item("root2")
# level2 should have inherited the baggage of both level1 and level2
assert level2.span.get_baggage_item("root")
assert level2.span.get_baggage_item("root2")
assert level2.span.get_baggage_item("level1")
assert level2.span.get_baggage_item("level2")
class TestTracerCompatibility(object):
"""Ensure that our opentracer produces results in the underlying datadog tracer."""
def test_required_dd_fields(self):
"""Ensure required fields needed for successful tracing are possessed
by the underlying datadog tracer.
"""
# a service name is required
tracer = Tracer("service")
with tracer.start_span("my_span") as span:
assert span._dd_span.service
def test_set_global_tracer():
"""Sanity check for set_global_tracer"""
my_tracer = Tracer("service")
set_global_tracer(my_tracer)
assert opentracing.tracer is my_tracer
assert ddtrace.tracer is my_tracer._dd_tracer
|
basic_sensor.py
|
"""
Basic Sensor that will adhere to robot at end of end effector
Functionality:
- Placeholder sensor
- Will read position of where robot is in global frame (has knowledge of what robot connected to)
- Initially publishing sensor data, can be turned off/on via sensor/ID/state topic
Future features:
- Real sensor simulating real data
"""
import json
import threading
import time
from sim.src.pose import Pose
from sim.src.robot import Robot
from sim.src.sensor import Sensor
from sim.infra.mqtt_publisher import MQTTPublisher
from sim.infra.mqtt_subscriber import MQTTSubscriber
from sim.infra.topic_utils import SENSOR_PREFIX, STATE_SUFFIX, DETECTION_SUFFIX, gen_influxdb_time
from sim.utils.constants import BASIC_SENSOR_DETECTION_FREQ
from sim.utils.enums import MQTTPahoRC, ObjState, SensorType
class BasicSensor(Sensor):
def __init__(self, robot, placement_pose, ID):
super().__init__(robot, placement_pose, ID, SensorType.BASIC)
self.state = ObjState.ON
self.detection_topic = SENSOR_PREFIX + str(self.id) + DETECTION_SUFFIX
self.detection_pub = MQTTPublisher(self.detection_topic)
self.state_topic = SENSOR_PREFIX + str(self.id) + STATE_SUFFIX
self.state_sub = MQTTSubscriber(self.state_topic)
self.state_sub.subscribe_topic(self.control_state)
self.disabled = False
def disable(self):
"""
Disable pubsub and turn off
"""
self.disabled = True
self.state = ObjState.OFF
self.pub_thread.join()
self.state_sub.disconnect()
self.detection_pub.stop_publish()
self.detection_pub.disconnect()
def control_state(self, client, userdata, message):
"""
Callback for subscription to sensor/ID/state
"""
msg = json.loads(message.payload.decode("utf-8"))
if ObjState(msg["control_state"]) == ObjState.ON:
# Turn on and enable publisher
self.state = ObjState.ON
self.detection_pub.start_publish()
elif ObjState(msg["control_state"]) == ObjState.OFF:
# Turn off and disable publisher
self.state = ObjState.OFF
self.detection_pub.stop_publish()
def operate(self):
"""
Periodic publish operation
"""
self.disabled = False
self.state = ObjState.ON
self.pub_thread = threading.Thread(target=self._publish_thread)
self.pub_thread.start()
def _publish_thread(self):
self.detection_pub.start_publish()
period = 1.0 / BASIC_SENSOR_DETECTION_FREQ
while not self.disabled:
begin_time = time.time()
self.detection_pub.publish_topic(self._detect_msg())
# Sleep for remainder of loop
time.sleep(period - (begin_time - time.time()))
def _read(self) -> Pose:
"""
Dummy function!!!! Returns current robot pose in global frame
Should be simulated somehow as true sensor
"""
return self.robot.pose
def _detect_msg(self) -> str:
"""
Creates message of detected sensor location (global frame currently)
"""
sensor_pose = self._read()
return json.dumps({"time": gen_influxdb_time(),
"detection": {"x": sensor_pose.x, "y": sensor_pose.y, "z": sensor_pose.z}})
|
eval_branch_pairs_synapses.py
|
import os
import sys
import glob
import multiprocessing as mp
import numpy as np
import matplotlib.pyplot as plt
import util
import util_feature_IO
def printUsageAndExit():
print("eval_branch_pairs_synapses.py network-dir mode num-workers")
print()
print("mode: aggregate, plot")
exit()
def getFiles(networkDir, gridDescriptor, boundDescriptor, synapticSide):
files = glob.glob(os.path.join(networkDir, "subcellular_features_{}synaptic_{}_{}".format(synapticSide, gridDescriptor, boundDescriptor), "*.csv"))
return files
def getEmptyStats():
return {
"branchesPre": 0,
"neuronsPre": 0,
"branchesPost": 0,
"neuronsPost": 0,
"boutons": 0
}
def processBatch(batchIndex, results, files, synapticSide):
stats = {}
for i in range(0, len(files)):
if(i % 50 == 0):
print("batch", batchIndex, "item", i, "of", len(files))
filename = files[i]
if(synapticSide == "pre"):
features = util_feature_IO.readAxonFeatures(filename)
for cube, branches in features.items():
if(cube not in stats):
stats[cube] = getEmptyStats()
for branch in branches:
stats[cube]["branchesPre"] += 1
stats[cube]["boutons"] += branch["boutons"]
stats[cube]["neuronsPre"] += 1
else:
features = util_feature_IO.readDendriteFeatures(filename)
for cube, branches in features.items():
if(cube not in stats):
stats[cube] = getEmptyStats()
stats[cube]["branchesPost"] += len(branches)
stats[cube]["neuronsPost"] += 1
results[batchIndex] = stats
def mergeResults(results):
statsMerged = {}
for statsSingle in results.values():
for cube, values in statsSingle.items():
if (cube not in statsMerged):
statsMerged[cube] = getEmptyStats()
statsMerged[cube]['branchesPre'] += values["branchesPre"]
statsMerged[cube]['neuronsPre'] += values["neuronsPre"]
statsMerged[cube]['branchesPost'] += values["branchesPost"]
statsMerged[cube]['neuronsPost'] += values["neuronsPost"]
statsMerged[cube]['boutons'] += values["boutons"]
return statsMerged
def writeCubeStats(filename, stats):
with open(filename, "w+") as f:
f.write("ix,iy,iz,branches_pre,neurons_pre,branches_post,neurons_post,boutons\n")
for cube, values in stats.items():
f.write("{},{},{},{},{},{},{},{:.4f}\n".format(cube[0], cube[1], cube[2], values["branchesPre"], values["neuronsPre"], values["branchesPost"], values["neuronsPost"], values["boutons"]))
def aggregate(outfolder, gridDescriptor, filesPre, filesPost, numWorkers):
batchesPre = np.array_split(filesPre, numWorkers)
batchesPost = np.array_split(filesPost, numWorkers)
processes = []
manager = mp.Manager()
results = manager.dict()
for i in range(0, len(batchesPre)):
p = mp.Process(target=processBatch, args=(i, results, batchesPre[i], "pre", ))
processes.append(p)
p.start()
for i in range(len(batchesPre), len(batchesPre)+len(batchesPost)):
p = mp.Process(target=processBatch, args=(i, results, batchesPost[i-len(batchesPre)], "post", ))
processes.append(p)
p.start()
for p in processes:
p.join()
merged = mergeResults(results)
writeCubeStats(os.path.join(outfolder, "cube-stats_{}.csv".format(gridDescriptor)), merged)
def createPlot(outfolder, gridDescriptors):
idx = np.arange(len(gridDescriptors))
pairsMean = []
boutonsMean = []
for gridDescriptor in gridDescriptors:
D = np.loadtxt(os.path.join(outfolder, "cube-stats_{}.csv".format(gridDescriptor)), delimiter=",", skiprows=1, usecols=(3,4,5))
pairs = np.multiply(D[:,0],D[:,1])
boutons = D[:,2]
pairsMean.append(np.mean(pairs))
boutonsMean.append(np.mean(boutons))
plt.plot(idx, pairsMean, marker="o", label="branch pairs")
plt.plot(idx, boutonsMean, marker="o", label="synapses")
plt.legend()
plt.yscale("log")
plt.xlabel("overlap volume (min: 1-1-1; max 100-100-100)")
plt.savefig(os.path.join(outfolder, "branchPairsSynapses.png"))
if __name__ == "__main__":
if(len(sys.argv) != 4):
printUsageAndExit()
networkDir = sys.argv[1]
mode = sys.argv[2]
numWorkers = int(sys.argv[3])
gridDescriptors = ["100-100-100", "50-50-50", "25-25-25", "10-10-10", "5-5-5", "1-1-1"]
boundDescriptor = "ref-volume"
outfolder = os.path.join(networkDir, "eval", "branch_pairs_synapses")
if(mode == "aggregate"):
util.makeCleanDir(outfolder)
for gridDescriptor in gridDescriptors:
filesPre = getFiles(networkDir, gridDescriptor, boundDescriptor, "pre")
filesPost = getFiles(networkDir, gridDescriptor, boundDescriptor, "post")
aggregate(outfolder, gridDescriptor, filesPre, filesPost, numWorkers)
elif(mode == "plot"):
gridDescriptors.reverse()
createPlot(outfolder, gridDescriptors)
else:
raise RuntimeError("invalid mode: {}".format(mode))
|
txnTest.py
|
#!/usr/bin/env python2
import sys, pdb
import bitcoin
import bitcoin.rpc
import bitcoin.core
import bitcoin.wallet
import time
import types
import datetime
from decimal import *
import httplib
import socket
import random
import threading
#bitcoin.SelectParams('testnet')
bitcoin.SelectParams('regtest')
BTC = 100000000
mBTC = 100000
uBTC = 100
DEFAULT_TX_FEE = 10
RPC_TIMEOUT=300
PerfectFractions = True
cnxn = None
def rpcRetry(fn):
global cnxn
while 1:
try:
ret = fn(cnxn)
return ret
except httplib.BadStatusLine as e:
cnxn = bitcoin.rpc.Proxy()
except httplib.ImproperConnectionState as e:
cnxn = bitcoin.rpc.Proxy()
except (socket.error,socket.timeout) as e: # connection refused. Sleep and retry
while 1:
try:
time.sleep(30)
cnxn = bitcoin.rpc.Proxy()
break
except:
pass
def Repeat(wallet, fee, verbose = False, tps = 10000):
start = time.time()
i = 0
sleepAmt = 0.0
for tx in wallet:
inp = []
amount = Decimal(0)
if tx["spendable"] is True:
if (i!=0) and (i & 255) == 0:
end = time.time()
interval = end - start
start = end
curtps = 256.0/interval
print ("%d: issued 256 payments in %f seconds. %f payments/sec" % (i, interval, curtps))
if curtps > tps: # Super simple prorportional control algorithm
sleepAmt += (curtps-tps) * .0001
elif curtps < tps:
sleepAmt -= (tps-curtps) * .0001
i+=1
inp.append({"txid":bitcoin.core.b2lx(tx["outpoint"].hash),"vout":tx["outpoint"].n})
amount += tx["amount"]
amount -= fee
out = { str(tx["address"]): str(amount/BTC) }
if verbose:
print("%d: Send %s to %s" % (i, str(out), str(tx["address"])))
txn = rpcRetry(lambda x: x._call("createrawtransaction",inp, out))
signedtxn = rpcRetry(lambda x: x._call("signrawtransaction",str(txn)))
if signedtxn["complete"]:
try:
rpcRetry(lambda x: x._call("sendrawtransaction", signedtxn["hex"]))
except bitcoin.rpc.JSONRPCError as e:
print("Exception: %s" % str(e))
else:
print("tx not complete %s" % str(signedtxn))
if sleepAmt > 0:
time.sleep(sleepAmt)
def main(op, params=None):
global cnxn
cnxn = bitcoin.rpc.Proxy(timeout=RPC_TIMEOUT)
# try:
# print ("Balance: ", cnxn.getbalance())
# except ValueError as v:
# print(str(v))
# pdb.set_trace()
if op=="unspent":
if len(params):
amt = int(params[0])
else:
amt = 10000
wallet = cnxn.listunspent()
print ("This wallet has %d unspent outputs." % len(wallet))
spendable = filter(lambda x: x["spendable"], wallet)
print (" spendable txos: %d" % len(spendable))
satSpendable = 0
utxoOverAmt = 0
largest = 0
for s in spendable:
satSpendable += s["amount"]
if s["amount"]>=amt:
utxoOverAmt+=1
if s["amount"] > largest:
largest = s["amount"]
print (" spendable satoshis: %d" % satSpendable)
print (" UTXOs over %d: %d" % (amt, utxoOverAmt) )
print (" largest UTXO: %d" % largest)
wallet.sort(key=lambda x: x["amount"], reverse=True)
print (" 20 largest utxos:")
for w in wallet[0:20]:
print(" %d" % w["amount"])
if op=="repeat":
getcontext().prec = 8
fee = Decimal(0)
threaded = False
if len(params):
fee = Decimal(params[0])
if len(params)>1: # was transactions per second set?
tps = int(params[1])
else:
tps = 10000
if len(params)>2:
threaded = params[2] in ["True", "true", "TRUE", "threaded", "1"]
while 1:
print("starting over")
wallet = cnxn.listunspent()
ntx = len(wallet)
if threaded or ntx > 100000000: # don't use the threaded version yet -- bitcoind not parallelized anyway
print("Repeating %d tx threaded" % len(wallet))
splits = [ntx/4, ntx/2, ntx*3/4]
th = []
th.append( threading.Thread(target=lambda: Repeat(wallet[:splits[0]], fee)))
th.append( threading.Thread(target=lambda: Repeat(wallet[splits[0]:splits[1]], fee)))
th.append( threading.Thread(target=lambda: Repeat(wallet[splits[1]:splits[2]], fee)))
th.append( threading.Thread(target=lambda: Repeat(wallet[splits[2]:], fee)))
for t in th:
t.start()
for t in th:
t.join()
else:
print("Repeating %d tx sequential" % len(wallet))
Repeat(wallet, fee, False, tps)
if op=="join":
addrs = [cnxn.getnewaddress(),cnxn.getnewaddress()]
if len(params):
amt = int(params[0])
else:
amt = 100
if len(params)>1:
repeat = int(params[1])
else:
repeat = 1
wallet = cnxn.listunspent()
# print "This wallet has %d unspent outputs. Joining the %d at offset %d." % (len(wallet),amt, offset)
# consolidate(wallet[offset:offset+amt],cnxn.getnewaddress(), cnxn)
print ("This wallet has %d unspent outputs. Joining %d, %d times." % (len(wallet),amt, repeat))
offset = 100
for cnt in range(0,repeat):
print (cnt)
bigAmt = wallet[0]
itr = 0
idx = 0
for tx in wallet: # Find a larger utxo that will pay for a lot of dust
if tx["spendable"] is True and bigAmt["amount"] < tx["amount"]:
bigAmt = tx
idx = itr
itr += 1
del wallet[idx]
print (str(bigAmt))
consolidate(wallet[offset:offset+amt] + [bigAmt],addrs[0], cnxn)
del wallet[offset:offset+amt] # delete all the entries I just used
offset+=amt
if offset > len(wallet): break
#wallet = cnxn.listunspent()
#addrs = [cnxn.getnewaddress() for i in range(0,10)]
#split([wallet[0]],addrs, cnxn)
if op=="spamtill":
if len(params):
poolSize = int(params[0])
else:
poolSize = None
amt = None
addrs = [cnxn.getnewaddress() for i in range(0,25)]
while 1:
try:
spamTx(cnxn,50000,addrs, amt,False,mempoolTarget=poolSize)
except bitcoin.rpc.JSONRPCError as e:
print ("Out of addresses. Sleeping")
time.sleep(60)
except httplib.BadStatusLine as e:
cnxn = bitcoin.rpc.Proxy()
except (socket.error,socket.timeout) as e: # connection refused. Sleep and retry
while 1:
try:
time.sleep(30)
cnxn = bitcoin.rpc.Proxy()
break
except:
pass
if op=="spam":
if len(params):
amt = int(params[0])
else:
amt = None
addrs = [cnxn.rawgetnewaddress() for i in range(0,5)]
# addrs = cnxn.getaddressesbyaccount("")
while 1:
try:
spamTx(cnxn,50000,addrs, amt,False)
except bitcoin.rpc.JSONRPCError as e:
print ("Out of addresses. Sleeping")
time.sleep(60)
except httplib.BadStatusLine as e:
cnxn = bitcoin.rpc.Proxy()
except (socket.error,socket.timeout) as e: # connection refused. Sleep and retry
while 1:
try:
time.sleep(30)
cnxn = bitcoin.rpc.Proxy()
break
except:
pass
if op=="sweep": # [minAmount] [group]
addr = cnxn.getnewaddress()
if len(params):
amt = int(params[0])
else:
amt = 10000000
if len(params)>1:
group = int(params[1])
else:
group = 50
wallet = cnxn.listunspent()
offset = 100
spend = []
for tx in wallet:
if tx["spendable"] is True and tx["amount"] < amt and tx["confirmations"] > 0:
# print (str(tx))
spend.append(tx)
if len(spend)>=group:
rpcRetry(lambda x: consolidate(spend, addr, x,100*len(spend)))
spend=[]
if len(spend):
rpcRetry(lambda x: consolidate(spend, addr, x,100*len(spend)))
if op=="split": # split [nSplits] [fee] [minimum amount to split]
if len(params):
nSplits = int(params[0])
else:
nSplits = 25
if len(params)>1:
fee = int(params[1])
else:
fee = 100
minAmount = nSplits*(BTC/10000)
if len(params)>2:
minAmount = int(params[2])
wallet = cnxn.listunspent()
j = 0
addrs = [cnxn.rawgetnewaddress() for i in range(0,nSplits)]
for w in wallet:
j+=1
if w['amount'] > minAmount:
if 1: # try:
split([w],addrs, cnxn, fee)
print ("%d: split %d satoshi into %d addrs fee %d (%d sat per output)" % (j, w['amount'],nSplits, fee, w['amount']/nSplits))
else: # :except bitcoin.rpc.JSONRPCError as e:
print ("\n%d: Exception %s" % (j,str(e)))
pdb.set_trace()
else:
print ("address has only %d satoshi" % w['amount'])
# else: print "Split: %d" % j
if op=="info":
blkid = cnxn.getbestblockhash()
blk = cnxn.getblock(blkid)
txn = blk.vtx[0]
print (txn.vin)
print (txn.vout)
# cnxn.sendrawtransaction(txn) # u'transaction already in block chain' code: -27
#pdb.set_trace()
def generate(amt=1,cnxn=None):
if cnxn is None: cnxn = bu
cnxn._call("generate",amt)
def spamTx(bu, numTx,addrp,amt = None,gen=False, mempoolTarget=None):
addr = addrp
print ("SPAM")
lastGenerate = -1
start = time.time()
if amt == None:
randAmt = True
else: randAmt = False
for i in range(0, numTx):
if (i!=0) and (i & 255) == 0:
end = time.time()
interval = end - start
start = end
print ("issued 256 payments in %f seconds. %f payments/sec" % (interval, 256.0/interval))
if mempoolTarget: # if the mempool is too big, wait for it to be reduced
while True:
time.sleep(10) # give time for other threads to run and sync tx from other nodes
mempoolData=bu._call("getmempoolinfo")
mempoolBytes = mempoolData["bytes"]
if mempoolBytes < mempoolTarget:
break
blockNum = bu._call("getblockcount")
print("mempool is %d bytes, %d tx. block %d. Waiting..." % (mempoolBytes, mempoolData["size"], blockNum))
if addrp is None:
print ("creating new address")
addr = bu._call('getnewaddress')
if type(addrp) is types.ListType:
addr = addrp[i%len(addrp)]
if type(addrp) is types.ListType:
change = addrp[(i+3)%len(addrp)]
else:
change = None
if randAmt:
amt = random.randint(100*uBTC, BTC/2)
print ("Count ", i, "Send %d to %s" % (amt, str(addr)))
try:
bu.sendtoaddress(addr, amt) # giga-perf stuff: bu.gigasendtoaddress(addr, amt, "", "", False, change)
except bitcoin.rpc.JSONRPCError as e:
print("except:", str(e))
if "Fee is larger" in str(e) and randAmt:
pass
else: raise
except bitcoin.rpc.JSONRPCError as e:
print("except 2")
if gen and i > lastGenerate: # Out of TxOuts in the wallet so commit these txn
generate()
print ("\nGenerated at count %d. Interval %d" % (i, i-lastGenerate))
lastGenerate = i
else:
print ("\n%d: Exception %s" % (i,str(e)))
raise
finally:
pass
def split(frm, toAddrs, cnxn, txfee=DEFAULT_TX_FEE):
inp = []
getcontext().prec = 8
amount = Decimal(0)
for tx in frm:
# inp.append({"txid":str(tx["txid"]),"vout":tx["vout"]})
inp.append({"txid":bitcoin.core.b2lx(tx["outpoint"].hash),"vout":tx["outpoint"].n})
amount += tx["amount"]
outp = {} # = { str(toAddr): str((amount-txfee)/BTC) }
getcontext().prec = 8
amtPer = (Decimal(amount-txfee)/len(toAddrs)).to_integral_value()
# print ("amount: ", amount, " amount per: ", amtPer, "from :", len(frm), "to: ", len(toAddrs), "tx fee: ", txfee)
sum = Decimal(0)
for a in toAddrs[0:-1]:
outp[str(a)] = str(amtPer/BTC)
sum += Decimal(str(amtPer/BTC))
a = toAddrs[-1]
lastAmtPer = amount - sum*BTC - txfee
# print ("final amt: ", lastAmtPer)
outp[str(a)] = str(lastAmtPer/BTC)
tally = Decimal(0)
for key,val in outp.items():
tally += Decimal(val)
# print("Final tally: ", str(tally))
if tally > amount:
print("Bug: sum of splits is > input")
pdb.set_trace()
try:
txn = cnxn._call("createrawtransaction",inp, outp)
signedtxn = cnxn._call("signrawtransaction",str(txn))
if signedtxn["complete"]:
cnxn._call("sendrawtransaction", signedtxn["hex"])
except bitcoin.rpc.JSONRPCError as e:
print (str(e))
def consolidate(frm, toAddr, cnxn, txfee=DEFAULT_TX_FEE):
#out = bitcoin.core.CTxOut(frm["amount"],toAddr)
#script = bitcoin.core.CScript()
# bitcoin.wallet.CBitcoinAddress(toAddr)
# pdb.set_trace()
inp = []
amount = Decimal(0)
for tx in frm:
# pdb.set_trace()
if tx["spendable"] is True and tx["confirmations"] > 0:
inp.append({"txid":bitcoin.core.b2lx(tx["outpoint"].hash),"vout":tx["outpoint"].n})
amount += tx["amount"]
#out = bitcoin.core.CMutableTxOut(frm["amount"],toAddr.to_scriptPubKey())
if PerfectFractions:
outamt = str((amount-txfee)/BTC)
else:
outamt = float((amount-txfee)/BTC)
out = { str(toAddr): outamt }
#txn = bitcoin.core.CMutableTransaction(inp,[out])
#print(inp)
print("%d inputs -> %s" % (len(inp), out))
txn = cnxn._call("createrawtransaction",inp, out)
signedtxn = cnxn._call("signrawtransaction",str(txn))
if signedtxn["complete"]:
cnxn._call("sendrawtransaction", signedtxn["hex"])
def consolidate2(frm, toAddr, cnxn):
#out = bitcoin.core.CTxOut(frm["amount"],toAddr)
#script = bitcoin.core.CScript()
# bitcoin.wallet.CBitcoinAddress(toAddr)
inp = []
for tx in frm["txids"]:
txinfo = cnxn.gettransaction(tx)
print (txinfo)
vout = None
for d in txinfo["details"]:
if d["address"] == frm["address"]:
vout = d["vout"]
break
if not vout is None:
inp.append({"txid":str(tx),"vout":vout})
pdb.set_trace()
#out = bitcoin.core.CMutableTxOut(frm["amount"],toAddr.to_scriptPubKey())
out = { str(toAddr): str(frm["amount"]) }
#txn = bitcoin.core.CMutableTransaction(inp,[out])
txn = cnxn._call("createrawtransaction",inp, out)
signedtxn = cnxn._call("signrawtransaction",str(txn))
cnxn.sendrawtransaction(signedtxn)
def consolidate2(frm, toAddr, cnxn):
pdb.set_trace()
#out = bitcoin.core.CTxOut(frm["amount"],toAddr)
#script = bitcoin.core.CScript()
# bitcoin.wallet.CBitcoinAddress(toAddr)
inp = []
for tx in frm["txids"]:
txinfo = cnxn.gettransaction(tx)
print (txinfo)
vout = None
for d in txinfo["details"]:
if d["address"] == frm["address"]:
vout = d["vout"]
break
if not vout is None:
inp.append(bitcoin.core.CMutableTxIn(bitcoin.core.COutPoint(tx, vout)))
out = bitcoin.core.CMutableTxOut(frm["amount"],toAddr.to_scriptPubKey())
txn = bitcoin.core.CMutableTransaction(inp,[out])
cnxn.sendrawtransaction(txn)
# python txnTest.py nol split
# 645 python txnTest.py nol spam
# 653 python txnTest.py nol unspent
# 654 python txnTest.py nol join 100 1000
if __name__ == "__main__":
idx = 1
if len(sys.argv) > 1:
if sys.argv[1] == "help":
print("./txnTest.py <network> <operation> [operation specific arguments]")
print(' network can be: "testnet", "regtest", "nol", "main"')
print(' operation can be: "split", "join", "spam", "unspent", "info"')
print(" split: create more UTXOs.")
print(" parameters: [nSplits: takes every UTXO that has sufficient balance and splits it into this many more UTXOs, default 25]")
print(" example: ./txnTest.py nol split 10")
print(" join: consolidate UTXOs.")
print(" parameters: <nJoin: take this many UTXOs and join them into 1> <nRepeat: repeat the join this many times>")
print(" example that joins 50 UTXOs into one output 2 times: ./txnTest.py nol join 50 2")
print(" spam: generate a lot of transactions, by paying to myself.")
print(" example: ./txnTest.py nol spam")
sys.exit(1)
if sys.argv[idx] == "testnet":
bitcoin.SelectParams('testnet')
idx+=1
elif sys.argv[idx] == "regtest":
bitcoin.SelectParams('regtest')
idx+=1
elif sys.argv[idx] == "nol":
bitcoin.SelectParams('nol')
idx+=1
elif sys.argv[idx] == "main":
bitcoin.SelectParams('mainnet')
idx+=1
else:
print("Invalid network %s" % sys.argv[idx])
sys.exit(-1)
if len(sys.argv) > idx:
op = sys.argv[idx]
else: op = "info"
main(op, sys.argv[idx+1:])
def Test():
pdb.set_trace()
if 1:
bitcoin.SelectParams('nol')
main("repeat",[])
# main("spam")
# main("sweep",[100000,20])
|
doudizhu_random_multi_process.py
|
import time
import multiprocessing
import rlcard3
from rlcard3.agents.random_agent import RandomAgent
from rlcard3.utils.utils import set_global_seed, assign_task
if __name__ == '__main__':
# Timer start
start = time.time()
# Avoid RuntimeError
multiprocessing.freeze_support()
# Set the number of process
process_num = 8
# Set episode_num
episode_num = 10000
# Assign tasks
per_tasks = assign_task(episode_num, process_num)
# Set game and make environment
game = 'doudizhu'
env = rlcard3.make(game)
# Set global seed
set_global_seed(1)
# Set up agents
agent_num = env.player_num
env.set_agents([RandomAgent(action_num=env.action_num)
for _ in range(agent_num)])
# Set a global list to reserve trajectories
manager = multiprocessing.Manager()
trajectories_set = manager.list()
# Generate Processes
processes = []
for p in range(process_num):
process = multiprocessing.Process(target=env.run_multi, args=(per_tasks[p], trajectories_set))
processes.append(process)
# Run process
for p in processes:
p.start()
for p in processes:
p.join()
end = time.time()
print('run time:', end-start)
|
emailb0mb3r.py
|
#!/usr/bin/python
# this python script sends multiple messages to a target.
import getpass
import smtplib
from email.message import EmailMessage
import threading
import random
import time
import colorama
import sys
from colorama import Style
from colorama import Fore
colorama.init()
err = (Fore.RED+Style.BRIGHT+"[-]"+Style.RESET_ALL)
suc = (Fore.BLUE+Style.BRIGHT+"[+]"+Style.RESET_ALL)
text_colour = Fore.YELLOW+Style.BRIGHT
print(Fore.RED + f"""
██▓ ███▄ █ █████▒▓█████ ▄████▄ ▄▄▄█████▓ ██▓ ▒█████ ███▄ █
▓██▒ ██ ▀█ █ ▓██ ▒ ▓█ ▀ ▒██▀ ▀█ ▓ ██▒ ▓▒▓██▒▒██▒ ██▒ ██ ▀█ █
▒██▒▓██ ▀█ ██▒▒████ ░ ▒███ ▒▓█ ▄ ▒ ▓██░ ▒░▒██▒▒██░ ██▒▓██ ▀█ ██▒
░██░▓██▒ ▐▌██▒░▓█▒ ░ ▒▓█ ▄ ▒▓▓▄ ▄██▒░ ▓██▓ ░ ░██░▒██ ██░▓██▒ ▐▌██▒
░██░▒██░ ▓██░░▒█░ ░▒████▒▒ ▓███▀ ░ ▒██▒ ░ ░██░░ ████▓▒░▒██░ ▓██░
░▓ ░ ▒░ ▒ ▒ ▒ ░ ░░ ▒░ ░░ ░▒ ▒ ░ ▒ ░░ ░▓ ░ ▒░▒░▒░ ░ ▒░ ▒ ▒
▒ ░░ ░░ ░ ▒░ ░ ░ ░ ░ ░ ▒ ░ ▒ ░ ░ ▒ ▒░ ░ ░░ ░ ▒░
▒ ░ ░ ░ ░ ░ ░ ░ ░ ░ ▒ ░░ ░ ░ ▒ ░ ░ ░
░ ░ ░ ░░ ░ ░ ░ ░ ░
░
███▄ ▄███▓ ▄▄▄ ██▓ ██▓ ▄▄▄▄ ▒█████ ███▄ ▄███▓ ▄▄▄▄ ▓█████ ██▀███
▓██▒▀█▀ ██▒▒████▄ ▓██▒▓██▒ ▓█████▄ ▒██▒ ██▒▓██▒▀█▀ ██▒▓█████▄ ▓█ ▀ ▓██ ▒ ██▒
▓██ ▓██░▒██ ▀█▄ ▒██▒▒██░ ▒██▒ ▄██▒██░ ██▒▓██ ▓██░▒██▒ ▄██▒███ ▓██ ░▄█ ▒
▒██ ▒██ ░██▄▄▄▄██ ░██░▒██░ ▒██░█▀ ▒██ ██░▒██ ▒██ ▒██░█▀ ▒▓█ ▄ ▒██▀▀█▄
▒██▒ ░██▒ ▓█ ▓██▒░██░░██████▒ ░▓█ ▀█▓░ ████▓▒░▒██▒ ░██▒░▓█ ▀█▓░▒████▒░██▓ ▒██▒
░ ▒░ ░ ░ ▒▒ ▓▒█░░▓ ░ ▒░▓ ░ ░▒▓███▀▒░ ▒░▒░▒░ ░ ▒░ ░ ░░▒▓███▀▒░░ ▒░ ░░ ▒▓ ░▒▓░
░ ░ ░ ▒ ▒▒ ░ ▒ ░░ ░ ▒ ░ ▒░▒ ░ ░ ▒ ▒░ ░ ░ ░▒░▒ ░ ░ ░ ░ ░▒ ░ ▒░
░ ░ ░ ▒ ▒ ░ ░ ░ ░ ░ ░ ░ ░ ▒ ░ ░ ░ ░ ░ ░░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
░ ░
""" + Style.RESET_ALL+text_colour+" #Coded my McSam\n")
def email_details():
global from_email, to_email, password, subject, email_message, number, email, err, suc, arrow
print(text_colour+"\nEnter the email address you want to send the messages from."+Style.RESET_ALL)
from_email = str(input("[>] ")) # alllow the user to enter the sender's email address
print(text_colour+"\nEnter your Password here."+Style.RESET_ALL)
password = str(getpass.getpass("[>] ")) # the sender's password
print(text_colour+"\nEnter the email you want to bomb."+Style.RESET_ALL)
to_email = str(input("[>] ")) # allow the user to enter the targets email.
print(text_colour+"\nEnter the subject of your mail."+Style.RESET_ALL)
subject = str(input("[>] "))
print(text_colour+"\nEnter your message here."+Style.RESET_ALL)
email_message = str(input("[>] "))
try:
print(text_colour+"\nEnter the number of mails you want to send."+Style.RESET_ALL)
number = int(input("[>] "))
except ValueError:
print(text_colour+"Invalid Input!!")
print(text_colour+"\nEnter the number of mails you want to send."+Style.RESET_ALL)
number = int(input("[>] "))
email = EmailMessage()
email["from"] = from_email
email["to"] = to_email
email.set_content(email_message)
# The body of the email.
def change_subject():
lowerchars = [' 1',' 2',' 3',' 4',' 5',' 6',' 7',' 8',' 9',' 10',' 11',' 12',' 13',' 14',' 15',' 16',' 17',' 18',' 19',' 20',' 21',' 22',' 23',' 24',' 25',' 26','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z','!','?','%','&','*','@','#']
smaple_list = random.sample(lowerchars,1)
subject_2 = subject + smaple_list[0]
return subject_2
def mail_stuff(smtp_host, smtp_port, subject_1):
global from_email, to_email, password, email_message, number
email["subject"] = subject_1
try:
with (smtplib.SMTP(host=smtp_host, port=smtp_port)) as smtpObj:
smtpObj.ehlo()
smtpObj.starttls()
smtpObj.login(from_email, password)
smtpObj.send_message(email)
print(suc + " Successfully sent email.")
except KeyboardInterrupt:
print(err + " Cancelled!")
sys.exit()
except smtplib.SMTPAuthenticationError:
print(err + " The login creditials may be wrong.")
except TimeoutError:
print(err + " A timeout error occured!")
except:
print(err + " Unable to send message.")
del email["subject"]
def program():
global from_email, to_email, password, subject_1, email_message, number, lowerchars, subject
if smtp_sever.lower() == "gmail":
smtp_host = "smtp.gmail.com"
smtp_port = 587
email_details()
threads = []
for i in range(number):
thread = threading.Thread(target=mail_stuff(smtp_host,smtp_port,change_subject()))
threads.append(thread)
for i in range(number):
threads[i].start()
else:
print(text_colour+"\nEnter the smtp host."+Style.RESET_ALL)
smtp_host = str(input("[>] "))
print(text_colour+"\nEnter the smtp port."+Style.RESET_ALL)
smtp_port = int(input("[>] "))
email_details()
threads = []
for i in range(number):
thread = threading.Thread(target=mail_stuff, args=(smtp_host,smtp_port,change_subject(),))
threads.append(thread)
for i in range(number):
threads[i].start()
try:
print(text_colour+"Enter the name of the smtp relay you want to use."+Style.RESET_ALL)
smtp_sever = input("[>] ") # the smtp relay needed to deliver the message
program()
except KeyboardInterrupt:
print("\n"+err+" Cancelled!")
|
resnet50_waa.py
|
'''
Copyright 2019 Xilinx Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from ctypes import *
import cv2
import numpy as np
import runner
import xir.graph
import pathlib
import xir.subgraph
import os
import input_fn
import math
import threading
import time
import sys
import waa_rt
'''
Calculate softmax
data: data to be calculated
size: data size
return: softamx result
'''
def CPUCalcSoftmax(data,size):
sum=0.0
result = [0 for i in range(size)]
for i in range(size):
result[i] = math.exp(data[i])
sum +=result[i]
for i in range(size):
result[i] /=sum
return result
def get_script_directory():
path = os.getcwd()
return path
'''
Get topk results according to its probability
datain: data result of softmax
filePath: filePath in witch that records the infotmation of kinds
'''
def TopK(datain,size,filePath):
cnt=[i for i in range(size) ]
pair=zip(datain,cnt)
pair=sorted(pair,reverse=True)
softmax_new,cnt_new=zip(*pair)
fp=open(filePath, "r")
data1=fp.readlines()
fp.close()
for i in range(5):
flag=0
for line in data1:
if flag==cnt_new[i]:
print("Top[%d] %f %s" %(i, (softmax_new[i]),(line.strip)("\n")))
flag=flag+1
SCRIPT_DIR = get_script_directory()
calib_image_dir = SCRIPT_DIR + "/images/"
label_file="./words.txt"
IMAGE_WIDTH = 224
IMAGE_HEIGHT = 224
global threadnum
threadnum = 0
'''
run resnt50 with batch
dpu: dpu runner
img: imagelist to be run
cnt: threadnum
'''
def runResnet50(dpu,img,cnt):
"""get tensor"""
inputTensors = dpu.get_input_tensors()
outputTensors = dpu.get_output_tensors()
outputHeight = outputTensors[0].dims[1]
outputWidth = outputTensors[0].dims[2]
outputChannel = outputTensors[0].dims[3]
outputSize = outputHeight*outputWidth*outputChannel
softmax = np.empty(outputSize)
batchSize = inputTensors[0].dims[0]
n_of_images = len(img)
count = 0
while count < cnt:
runSize = batchSize
shapeIn = (runSize,) + tuple([inputTensors[0].dims[i] for i in range(inputTensors[0].ndim)][1:])
"""prepare batch input/output """
outputData = []
inputData = []
outputData.append(np.empty((runSize,outputHeight,outputWidth,outputChannel), dtype = np.float32, order = 'C'))
inputData.append(np.empty((shapeIn), dtype = np.float32, order = 'C'))
"""init input image to input buffer """
for j in range(runSize):
imageRun = inputData[0]
imageRun[j,...] = img[(count+j)% n_of_images].reshape(inputTensors[0].dims[1],inputTensors[0].dims[2],inputTensors[0].dims[3])
"""run with batch """
job_id = dpu.execute_async(inputData,outputData)
dpu.wait(job_id)
for j in range(len(outputData)):
outputData[j] = outputData[j].reshape(runSize, outputSize)
"""softmax calculate with batch """
for j in range(runSize):
softmax = CPUCalcSoftmax(outputData[0][j], outputSize)
#TopK(softmax, outputSize, label_file)
count = count + runSize
def get_subgraph (g):
sub = []
root = g.get_root_subgraph()
sub = [ s for s in root.children
if s.metadata.get_attr_str ("device") == "DPU"]
return sub
def main(argv):
global threadnum
listimage=os.listdir(calib_image_dir)
threadAll = []
threadnum = int(argv[1])
i = 0
global runTotall
runTotall = len(listimage)
g = xir.graph.Graph.deserialize(pathlib.Path(argv[2]))
subgraphs = get_subgraph (g)
assert len(subgraphs) == 1 # only one DPU kernel
all_dpu_runners = [];
for i in range(int(threadnum)):
all_dpu_runners.append(runner.Runner(subgraphs[0], "run"));
"""image list to be run """
xclbin_p=str("/mnt/dpu.xclbin")
kernelName_p="pp_pipeline_accel"
deviceIdx_p=0
fpga_pp = waa_rt.PreProcess(xclbin_p,kernelName_p,deviceIdx_p)
time1 = int(round(time.time() * 1000))
img = []
for i in range(runTotall):
path = os.path.join(calib_image_dir,listimage[i])
image = cv2.imread(path)
rows, cols, channels = image.shape
image = fpga_pp.preprocess_input(image, rows, cols)
img.append(image)
time_pre = int(round(time.time() * 1000))
start = 0
for i in range(int(threadnum)):
if (i==threadnum-1):
end = len(img)
else:
end = start+(len(img)//threadnum)
t1 = threading.Thread(target=runResnet50, args=(all_dpu_runners[i], img[start:end], len(img[start:end])))
threadAll.append(t1)
start = end
for x in threadAll:
x.start()
for x in threadAll:
x.join()
time2 = int(round(time.time() * 1000))
timetotal = time2 - time1
fps = float(runTotall * 1000 / timetotal)
#print("Pre time: %d ms" %(time_pre - time1))
#print("DPU + post time: %d ms" %(time2 - time_pre))
#print("Total time : %d ms" %timetotal)
#print("Total frames : %d" %len(img))
print("Performance : %.2f FPS" %fps)
if __name__ == "__main__":
if len(sys.argv) != 3:
print("please input thread number and json file path.")
else :
main(sys.argv)
|
test_client.py
|
#!/usr/bin/env python
# Copyright 2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is NOT a POX component. It's a little tool to test out the messenger.
"""
import socket
import threading
import json
class JSONDestreamer (object):
import json
decoder = json.JSONDecoder()
def __init__ (self, callback = None):
self._buf = ''
self.callback = callback if callback else self.rx
def push (self, data):
if len(self._buf) == 0:
data = data.lstrip()
self._buf += data
try:
while len(self._buf) > 0:
r,off = self.decoder.raw_decode(self._buf)
self._buf = self._buf[off:].lstrip()
self.callback(r)
except ValueError:
pass
def rx (self, data):
import json
print "Recv:", json.dumps(data, indent=4)
jd = JSONDestreamer()
done = False
def reader (socket):
global done
while True:
d = socket.recv(1024)
if d == "":
done = True
break
jd.push(d)
cur_chan = None
def channel (ch):
global cur_chan
cur_chan = ch
import readline
def main (addr = "127.0.0.1", port = 7790):
port = int(port)
print "Connecting to %s:%i" % (addr,port)
sock = socket.create_connection((addr, port))
t = threading.Thread(target=reader, args=(sock,))
t.daemon = True
t.start()
while not done:
try:
#print ">",
m = raw_input()
if len(m) == 0: continue
m = eval(m)
if not isinstance(m, dict):
continue
if cur_chan is not None and 'CHANNEL' not in m:
m['CHANNEL'] = cur_chan
m = json.dumps(m)
sock.send(m)
except EOFError:
break
except KeyboardInterrupt:
break
except:
import traceback
traceback.print_exc()
if __name__ == "__main__":
import sys
main(*sys.argv[1:])
else:
# This will get run if you try to run this as a POX component.
def launch ():
from pox.core import core
log = core.getLogger()
log.critical("This isn't a POX component.")
log.critical("Please see the documentation.")
raise RuntimeError("This isn't a POX component.")
|
client_socket.py
|
"""
client_socket.py:
Socket used to attach to the TCP server as a client and read/write data.
"""
import select
import socket
import threading
from fprime.constants import DATA_ENCODING
from fprime_gds.common.handlers import DataHandler
# Constants for public use
GUI_TAG = "GUI"
FSW_TAG = "FSW"
class ThreadedTCPSocketClient(DataHandler):
"""
Threaded TCP client that connects to the socket server that serves packets from the flight
software
"""
def __init__(self, sock=None, dest=FSW_TAG):
"""
Threaded client socket constructor
Keyword Arguments:
sock {Socket} -- A socket for the client to use. Created own if
None (default: {None})
"""
if sock is None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock = sock
# NOTE can't do this b/c EINPROGRESS: self.sock.setblocking(0)
self.dest = dest
self.__distributors = []
self.__select_timeout = 1
self.__data_recv_thread = threading.Thread(target=self.recv)
self.stop_event = threading.Event()
def get_data_bytes(self, string_data):
"""
Convert the data bytes from string to bytes
:param string_data: data in string format
:return: data in bytes format
"""
return string_data.encode(DATA_ENCODING)
def get_data_string(self, bytes_data):
"""
Convert the data bytes from string to bytes
:param bytes_data: data in bytes format
:return: data in string format
"""
return bytes_data.decode(DATA_ENCODING)
def register_distributor(self, distributor):
"""Registers a fprime.gds.distributor object with this socket
Arguments:
fprime.gds.distributor {Distributor} -- Distributor must implement data_callback
"""
self.__distributors.append(distributor)
def register_to_server(self, register_as):
"""
Registers the caller to the server as type register_as
This function assumes the socket connects to an fprime TCP server
Args:
register_as (string): How to identify this process to the TCP server
Can be either "FSW" or "GUI"
"""
data = "Register %s\n" % register_as
self.sock.send(self.get_data_bytes(data))
def connect(self, host, port):
"""Connect to host at given port and start the threaded recv method.
Arguments:
host {string} -- IP of the host server
port {int} -- Port of the host server
"""
try:
self.sock.connect((host, port))
self.__data_recv_thread.start()
except OSError:
print("There was a problem connecting to the TCP Server")
exit(-1)
def disconnect(self):
"""Disconnect the socket client from the server and stop the internal thread."""
self.stop_event.set()
self.__data_recv_thread.join()
self.sock.close()
def data_callback(self, data, sender=None):
"""
Handles incoming data by sending it to a socket.
:param data: data to send to the client socket
:param sender: sender source of the data
"""
self.send(data, self.dest)
def send(self, data, dest):
"""
Send data to the server
All necessary headers are added in this function.
Arguments:
data {binary} -- The data to send (What you want the destination
to receive)
dest {String} -- Where to send the data to. Either "FSW" or "GUI"
"""
self.sock.send(b"A5A5 %s %s" % (self.get_data_bytes(dest), data))
def recv(self):
"""
Method run constantly by the enclosing thread. Looks for data from the server.
"""
while not self.stop_event.is_set():
ready = select.select([self.sock], [], [], self.__select_timeout)
if ready[0]:
chunk = self.sock.recv(1024)
for d in self.__distributors:
d.on_recv(chunk)
|
cbas_secondary_indexes.py
|
from cbas_base import *
# from couchbase import FMT_BYTES
import threading
import random
class CBASSecondaryIndexes(CBASBaseTest):
def setUp(self):
self.input = TestInputSingleton.input
if "default_bucket" not in self.input.test_params:
self.input.test_params.update({"default_bucket": False})
super(CBASSecondaryIndexes, self).setUp()
self.load_sample_buckets(servers=[self.master],
bucketName=self.cb_bucket_name,
total_items=self.beer_sample_docs_count)
if "add_all_cbas_nodes" in self.input.test_params and \
self.input.test_params["add_all_cbas_nodes"] and len(
self.cbas_servers) > 1:
self.add_all_nodes_then_rebalance(self.cbas_servers)
self.cbas_util.createConn(self.cb_bucket_name)
# Create bucket on CBAS
self.cbas_util.create_bucket_on_cbas(cbas_bucket_name=self.cbas_bucket_name,
cb_bucket_name=self.cb_bucket_name,
cb_server_ip=self.cb_server_ip)
# Create dataset on the CBAS bucket
self.cbas_util.create_dataset_on_bucket(
cbas_bucket_name=self.cb_bucket_name,
cbas_dataset_name=self.cbas_dataset_name)
def tearDown(self):
super(CBASSecondaryIndexes, self).tearDown()
def verify_index_used(self, statement, index_used=False, index_name=None):
statement = 'EXPLAIN %s'%statement
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
statement)
self.assertEquals(status, "success")
if status == 'success':
self.assertEquals(errors, None)
if index_used:
self.assertTrue("index-search" in str(results))
self.assertFalse("data-scan" in str(results))
self.log.info("INDEX-SEARCH is found in EXPLAIN hence indexed data will be scanned to serve %s"%statement)
if index_name:
self.assertTrue(index_name in str(results))
else:
self.assertTrue("data-scan" in str(results))
self.assertFalse("index-search" in str(results))
self.log.info("DATA-SCAN is found in EXPLAIN hence index is not used to serve %s"%statement)
def test_create_index(self):
'''
Steps :
1. Create bucket in CBAS, create dataset
2. Create index on various fields as passed in the parameters
3. Validate if the index is created and the index definition has the expected fields
Author : Mihir Kamdar
Created date : 8/1/2017
'''
# Create Index
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
def test_create_index_without_if_not_exists(self):
'''
Steps :
1. Create bucket in CBAS, create dataset
2. Create index
3. Again create an index with the same name without using IF_NOT_EXISTS clause
3. Validate if the error msg is as expected
Author : Mihir Kamdar
Created date : 8/1/2017
'''
# Create Index
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
# Create another index with same name
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(self.cbas_util.validate_error_in_response(status, errors, self.expected_error),
"Error msg not matching expected error msg")
def test_create_index_with_if_not_exists(self):
'''
Steps :
1. Create bucket in CBAS, create dataset
2. Create index
3. Again create an index with the same name using IF_NOT_EXISTS clause
3. Validate if that there is no error
Author : Mihir Kamdar
Created date : 8/1/2017
'''
# Create Index
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} IF NOT EXISTS on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
# Create another index with same name
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
def test_create_index_with_if_not_exists_different_fields(self):
'''
Steps :
1. Create bucket in CBAS, create dataset
2. Create index
3. Again create an index with the same name but with different fields using IF_NOT_EXISTS clause
4. Validate there is no error
5. The index definition of should not change.
Author : Mihir Kamdar
Created date : 8/1/2017
'''
index_field1 = "city:string"
index_field2 = "abv:bigint"
# Create Index
create_idx_statement = "create index {0} IF NOT EXISTS on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_field1)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, [index_field1],
self.cbas_dataset_name)[0])
# Create another index with same name
create_idx_statement = "create index {0} IF NOT EXISTS on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_field2)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
# The index definition should be based on the older field, it should not change
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, [index_field1],
self.cbas_dataset_name)[0])
def test_multiple_composite_index_with_overlapping_fields(self):
'''
Steps :
1. Create bucket in CBAS, create dataset
2. Create index
3. Again create a composite index
4. Now create another composite index with some overlapping fields
5. Both the indexes should get created successfully
Author : Mihir Kamdar
Created date : 8/1/2017
'''
index_fields1 = ["city:string", "abv:bigint"]
index_fields2 = ["abv:bigint", "geo.lat:double"]
# Create Index
index_fields = ""
for index_field in index_fields1:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} IF NOT EXISTS on {1}({2});".format(
self.index_name + "1", self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name + "1", index_fields1,
self.cbas_dataset_name)[0])
# Create another composite index with overlapping fields
index_fields = ""
for index_field in index_fields2:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} IF NOT EXISTS on {1}({2});".format(
self.index_name + "2", self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name + "2", index_fields2,
self.cbas_dataset_name)[0])
def test_create_index_non_empty_dataset(self):
'''
Steps :
1. Create bucket in CBAS, create dataset, connect to the bucket, disconnect from bucket
2. Create index
3. Validate the index is created correctly
Author : Mihir Kamdar
Created date : 8/1/2017
'''
# Connect to Bucket
result = self.cbas_util.connect_to_bucket(cbas_bucket_name=
self.cbas_bucket_name,
cb_bucket_password=self.cb_bucket_password)
# Allow ingestion to complete
self.sleep(30)
# Disconnect from bucket
result = self.cbas_util.disconnect_from_bucket(cbas_bucket_name=
self.cbas_bucket_name)
# Create Index
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
def test_create_index_with_bucket_connected(self):
'''
Steps :
1. Create bucket in CBAS, create dataset, connect to the bucket
2. Create index
3. Create index should fail.
4. Validate that the error msg is as expected
Author : Mihir Kamdar
Created date : 8/1/2017
'''
# Connect to Bucket
self.cbas_util.connect_to_bucket(cbas_bucket_name=
self.cbas_bucket_name,
cb_bucket_password=self.cb_bucket_password)
# Allow ingestion to complete
self.sleep(30)
# Create Index
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(self.cbas_util.validate_error_in_response(status, errors, self.expected_error))
def test_drop_index(self):
'''
Steps :
1. Create bucket in CBAS, create dataset
2. Create index
3. Validate the index is created correctly
4. Drop index
5. Validate that the index is dropped
Author : Mihir Kamdar
Created date : 8/1/2017
'''
# Create Index
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
drop_idx_statement = "drop index {0}.{1};".format(
self.cbas_dataset_name, self.index_name)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
drop_idx_statement)
self.assertTrue(status == "success", "Drop Index query failed")
self.assertFalse(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
def test_drop_non_existing_index(self):
'''
Steps :
1. Create bucket in CBAS, create dataset
2. Drop a non-existing index without using IF_EXISTS clause
3. Validate that the error msg is as expected
4. Drop a non-existing index using IF_EXISTS clause
5. Validate there is no error
Author : Mihir Kamdar
Created date : 8/1/2017
'''
# Drop non-existing index without IF EXISTS
drop_idx_statement = "drop index {0}.{1};".format(
self.cbas_dataset_name, self.index_name)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
drop_idx_statement)
self.assertTrue(self.cbas_util.validate_error_in_response(status, errors, self.expected_error))
# Drop non-existing index with IF EXISTS
drop_idx_statement = "drop index {0}.{1} IF EXISTS;".format(
self.cbas_dataset_name, self.index_name)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
drop_idx_statement)
self.assertEqual(status, "success",
"Drop non existent index with IF EXISTS fails")
def test_drop_dataset_drops_index(self):
'''
Steps :
1. Create bucket in CBAS, create dataset
2. Create index
3. Validate the index is created correctly
4. Drop dataset
5. Validate that the index is also dropped
Author : Mihir Kamdar
Created date : 8/1/2017
'''
# Create Index
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
# Drop dataset
self.cbas_util.drop_dataset(self.cbas_dataset_name)
# Check that the index no longer exists
self.assertFalse(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
def test_drop_non_empty_index(self):
'''
Steps :
1. Create bucket in CBAS, create dataset
2. Create index
3. Validate the index is created correctly
4. Connect dataset, disconnect dataset
5. Drop index
6. Validate that the index is dropped
Author : Mihir Kamdar
Created date : 8/1/2017
'''
# Create Index
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
# Connect to Bucket
self.cbas_util.connect_to_bucket(cbas_bucket_name=
self.cbas_bucket_name,
cb_bucket_password=self.cb_bucket_password)
# Allow ingestion to complete
self.sleep(30)
# Disconnect from bucket
self.cbas_util.disconnect_from_bucket(cbas_bucket_name=
self.cbas_bucket_name)
drop_idx_statement = "drop index {0}.{1};".format(
self.cbas_dataset_name, self.index_name)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
drop_idx_statement)
self.assertTrue(status == "success", "Drop Index query failed")
self.assertFalse(
self.cbas_util.verify_index_created(self.index_name,
self.index_fields,
self.cbas_dataset_name)[0])
def _direct_client(self, server, bucket, timeout=30):
# CREATE SDK CLIENT
if self.sdk_client_type == "java":
try:
from sdk_client import SDKClient
scheme = "couchbase"
host = self.master.ip
if self.master.ip == "127.0.0.1":
scheme = "http"
host = "{0}:{1}".format(self.master.ip, self.master.port)
return SDKClient(scheme=scheme, hosts=[host], bucket=bucket,
password=self.master.rest_password)
except Exception, ex:
self.log.error("cannot load sdk client due to error {0}"
.format(str(ex)))
# USE MC BIN CLIENT WHEN NOT USING SDK CLIENT
return self.direct_mc_bin_client(server, bucket, timeout=timeout)
def test_index_population(self):
'''
Steps :
1.
'''
# Create Index
# to_verify=0
search_by = self.input.param("search_by", '')
exp_number = self.input.param("exp_number", 0)
not_fit_value = self.input.param("not_fit_value", '')
expected_status = self.input.param("status", 'success')
binary = self.input.param("binary", False)
index_used = self.input.param("index_used", False)
if ";" in str(not_fit_value):
not_fit_value = not_fit_value.split(';')
testuser = [{'id': self.cb_bucket_name, 'name': self.cb_bucket_name, 'password': 'password'}]
rolelist = [{'id': self.cb_bucket_name, 'name': self.cb_bucket_name, 'roles': 'admin'}]
self.add_built_in_server_user(testuser=testuser, rolelist=rolelist)
self.client = self._direct_client(self.master, self.cb_bucket_name)
k = 'test_index_population'
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
if binary:
self.client.upsert('utf16_doc', not_fit_value.encode('utf16'))
else:
if "." in index_fields.split(":")[0]:
self.client.upsert(k, {index_fields.split(":")[0].split(".")[0]:{index_fields.split(":")[0].split(".")[1] : not_fit_value}})
else:
self.client.upsert(k, {index_fields.split(":")[0] : not_fit_value})
self.client.close()
if index_fields.split(":")[1] == "string" and isinstance(not_fit_value,str) or \
index_fields.split(":")[1] == "double" and isinstance(not_fit_value,(float,int)) or \
index_fields.split(":")[1] == "bigint" and isinstance(not_fit_value,(float,int)):
index_used=True
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
self.cbas_util.connect_to_bucket(cbas_bucket_name=
self.cbas_bucket_name,
cb_bucket_password=self.cb_bucket_password)
self.sleep(20)
if isinstance(search_by, basestring):
statement = 'SELECT count(*) FROM `{0}` where {1}="{2}"'.format(self.cbas_dataset_name, index_fields.split(":")[0], search_by)
else:
statement = 'SELECT count(*) FROM `{0}` where {1}={2}'.format(self.cbas_dataset_name,
index_fields.split(":")[0], search_by)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
statement)
self.assertEquals(status, "success")
self.assertEquals(errors, None)
self.assertEquals(results, [{'$1': exp_number}])
if isinstance(not_fit_value,str):
statement = 'SELECT count(*) FROM `{0}` where {1}="{2}"'.format(self.cbas_dataset_name,
index_fields.split(":")[0], not_fit_value)
else:
statement = 'SELECT count(*) FROM `{0}` where {1}={2}'.format(self.cbas_dataset_name,
index_fields.split(":")[0], not_fit_value)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
statement)
self.assertEquals(status, expected_status)
if status == 'success':
self.assertEquals(errors, None)
self.assertEquals(results, [{'$1': 1}])
self.log.info("Verify whether statement %s used index or not. Indexed: %s"%(statement,index_fields))
self.verify_index_used(statement, index_used, self.index_name)
# https://issues.couchbase.com/browse/MB-25646
# https://issues.couchbase.com/browse/MB-25657
def test_index_population_thread(self):
to_verify = 0
index_used = self.input.param("index_used", False)
def update_data(client, index_fields):
for _ in xrange(100):
if index_fields.split(":")[-1] == 'double':
not_fit_value = random.choice([False, "sdfs", 11111])
elif index_fields.split(":")[-1] == 'string':
not_fit_value = random.choice([False, 11111, 36.6])
elif index_fields.split(":")[-1] == 'bigint':
not_fit_value = random.choice([False, "sdfs", 36.6])
perc = random.randrange(0, 100)
if perc > 75:
# 25% with binary data
# client.upsert('utf16_doc', str(not_fit_value).encode('utf16'), format=FMT_BYTES)
client.upsert(k, {index_fields.split(":")[0]: not_fit_value})
else:
# 10% field removed
client.upsert(k, {index_fields.split(":")[0] + "_NEW_FIELD": not_fit_value})
# Create Index
search_by = self.input.param("search_by", '')
exp_number = self.input.param("exp_number", 0)
not_fit_value = self.input.param("not_fit_value", '')
expected_status = self.input.param("status", 'success')
if ";" in not_fit_value:
not_fit_value = not_fit_value.split(';')
testuser = [{'id': self.cb_bucket_name, 'name': self.cb_bucket_name, 'password': 'password'}]
rolelist = [{'id': self.cb_bucket_name, 'name': self.cb_bucket_name, 'roles': 'admin'}]
self.add_built_in_server_user(testuser=testuser, rolelist=rolelist)
self.client = self._direct_client(self.master, self.cb_bucket_name)
k = 'test_index_population_thread'
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
self.cbas_util.connect_to_bucket(cbas_bucket_name=
self.cbas_bucket_name,
cb_bucket_password=self.cb_bucket_password)
self.sleep(10)
d = threading.Thread(name='daemon', target=update_data, args=(self.client, index_fields,))
d.setDaemon(True)
d.start()
for i in xrange(10):
if isinstance(search_by, basestring):
statement = 'SELECT count(*) FROM `{0}` where {1}="{2}"'.format(self.cbas_dataset_name,
index_fields.split(":")[0], search_by)
else:
statement = 'SELECT count(*) FROM `{0}` where {1}={2}'.format(self.cbas_dataset_name,
index_fields.split(":")[0], search_by)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
statement)
self.assertEquals(status, "success")
self.assertEquals(errors, None)
self.assertEquals(results, [{'$1': exp_number}])
if isinstance(not_fit_value,str):
statement = 'SELECT count(*) FROM `{0}` where {1}="{2}"'.format(self.cbas_dataset_name,
index_fields.split(":")[0], not_fit_value)
else:
statement = 'SELECT count(*) FROM `{0}` where {1}={2}'.format(self.cbas_dataset_name,
index_fields.split(":")[0], not_fit_value)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
statement)
self.assertEquals(status, expected_status)
if status == 'success':
self.assertEquals(errors, None)
self.assertEquals(results, [{'$1': 0}])
self.log.info("Verify whether statement %s used index or not. Indexed: %s"%(statement,index_fields))
self.verify_index_used(statement, index_used, self.index_name)
self.client.close()
def test_index_population_where_statements(self):
exp_number = self.input.param("exp_number", 0)
where_statement = self.input.param("where_statement", '').replace('_EQ_', '=')
index_used = self.input.param("index_used", False)
testuser = [{'id': self.cb_bucket_name, 'name': self.cb_bucket_name, 'password': 'password'}]
rolelist = [{'id': self.cb_bucket_name, 'name': self.cb_bucket_name, 'roles': 'admin'}]
self.add_built_in_server_user(testuser=testuser, rolelist=rolelist)
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
self.cbas_util.connect_to_bucket(cbas_bucket_name=
self.cbas_bucket_name,
cb_bucket_password=self.cb_bucket_password)
self.sleep(20)
statement = 'SELECT count(*) FROM `{0}` where {1};'.format(self.cbas_dataset_name, where_statement)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
statement)
self.assertEquals(status, "success")
self.assertEquals(errors, None)
self.assertEquals(results, [{'$1': exp_number}])
self.log.info("Verify whether statement %s used index or not. Indexed: %s"%(statement,index_fields))
self.verify_index_used(statement, index_used, self.index_name)
def test_index_population_joins(self):
exp_number = self.input.param("exp_number", 0)
self.index_name2 = self.input.param('index_name2', None)
self.index_fields2 = self.input.param('index_fields2', None)
if self.index_fields2:
self.index_fields2 = self.index_fields2.split("-")
statement = self.input.param("statement", '').replace('_EQ_', '=').replace('_COMMA_', ',')
testuser = [{'id': self.cb_bucket_name, 'name': self.cb_bucket_name, 'password': 'password'}]
rolelist = [{'id': self.cb_bucket_name, 'name': self.cb_bucket_name, 'roles': 'admin'}]
self.add_built_in_server_user(testuser=testuser, rolelist=rolelist)
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
index_fields2 = ""
for index_field in self.index_fields2:
index_fields2 += index_field + ","
index_fields2 = index_fields2[:-1]
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name2, self.cbas_dataset_name, index_fields2)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name2, self.index_fields2,
self.cbas_dataset_name)[0])
self.cbas_util.connect_to_bucket(cbas_bucket_name=
self.cbas_bucket_name,
cb_bucket_password=self.cb_bucket_password)
self.sleep(20)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
statement)
self.assertEquals(status, "success")
self.assertEquals(errors, None)
self.assertEquals(len(results), exp_number)
# https://issues.couchbase.com/browse/MB-25695
def test_index_metadata(self):
self.buckets = [Bucket(name="beer-sample")]
self.perform_doc_ops_in_all_cb_buckets(100000, "create", start_key=0, end_key=100000)
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.cbas_util.connect_to_bucket(cbas_bucket_name=
self.cbas_bucket_name,
cb_bucket_password=self.cb_bucket_password)
self.cbas_util.wait_for_ingestion_complete([self.cbas_dataset_name], 107303)
statement = 'SELECT count(*) FROM `{0}`'.format(self.cbas_dataset_name)
#
_, result = self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)
self.assertEquals(result[0]['Index']['DatasetName'], self.cbas_dataset_name)
self.assertEquals(result[0]['Index']['DataverseName'], 'Default')
self.assertEquals(result[0]['Index']['IndexName'], self.index_name)
self.assertEquals(result[0]['Index']['IndexStructure'], 'BTREE')
self.assertEquals(result[0]['Index']['IsPrimary'], False)
self.assertEquals(result[0]['Index']['PendingOp'], 0)
self.assertEquals(result[0]['Index']['SearchKey'], [index_field.split(":")[:-1]])
self.assertEquals(result[0]['Index']['SearchKeyType'], index_field.split(":")[1:])
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
statement)
self.assertEquals(status, "success")
self.assertEquals(errors, None)
self.assertEquals(results, [{'$1': 107303}])
self.cbas_util.disconnect_from_bucket(cbas_bucket_name=
self.cbas_bucket_name)
drop_idx_statement = "drop index {0}.{1};".format(self.cbas_dataset_name, self.index_name)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
drop_idx_statement)
_, result = self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)
self.assertEquals(result, [])
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
statement)
self.assertEquals(status, "success")
self.assertEquals(errors, None)
self.assertEquals(results, [{'$1': 107303}])
|
MultiWorker.py
|
import multiprocessing as mp
import typing as t
from threading import Thread
from typeguard import typechecked
class MultiWorker:
_sentinel = None
_finished_adding = False
@typechecked
def __init__(
self,
job: t.Callable[..., t.Any],
init: t.Callable[..., t.Any] = None,
init_args: t.Union[tuple, t.Any] = (),
worker_count: int = mp.cpu_count()) -> None:
assert job is not None
assert init_args is not None
assert worker_count > 0
self._tasks = mp.Queue()
self._results = mp.Queue()
self._workers = []
for _ in range(worker_count):
self._workers.append(mp.Process(target = MultiWorker._worker, args = (job, init, init_args, self._tasks, self._results)))
self._overlord = Thread(target = MultiWorker._overlord, args = (self._workers, self._tasks, self._results))
@staticmethod
def _worker(job: callable, init: callable, init_args: tuple, tasks: mp.Queue, results: mp.Queue) -> None:
state = None
if init is not None:
if type(init_args) != tuple:
state = init(init_args)
elif len(init_args) == 0:
state = init()
else:
state = init(*init_args)
while True:
item = tasks.get()
if item == MultiWorker._sentinel:
tasks.put(MultiWorker._sentinel)
break
else:
if state is None:
result = job(item)
else:
result = job(state, item)
results.put(result)
@staticmethod
def _overlord(workers: t.List[mp.Process], tasks: mp.Queue, results: mp.Queue) -> None:
for worker in workers:
worker.join()
results.put(MultiWorker._sentinel)
tasks.close()
@typechecked
def start(self) -> None:
for worker in self._workers:
worker.start()
self._overlord.start()
@typechecked
def add_task(self, item) -> None:
self._tasks.put(item)
@typechecked
def finished_adding_tasks(self) -> None:
if not self._finished_adding:
self._finished_adding = True
self._tasks.put(MultiWorker._sentinel)
@typechecked
def get_results(self) -> t.Iterator:
while True:
item = self._results.get()
if item == MultiWorker._sentinel:
break
else:
yield item
self._results.close()
|
controller.py
|
import sys
import os
sys.path.append('scripts/')
from arduino_comms import Database, Monitor
from comms_emulate import EmuSystem
import configparser
from time import strftime, localtime, sleep
import threading
import pickle
import ast
class aeroD:
def __init__(self, interval=8):
self.interval = interval
self._running = True
file = open('scripts/notes.txt', 'r')
contents = file.read()
self.noteTemplates = ast.literal_eval(contents)
self.noteBuffer = []
self.mon = pickle.load(open('monitor.p', 'rb'))
self.conf = configparser.ConfigParser()
self.db = Database('datadb')
self.thread = threading.Thread(target=self.startThread, args=())
self.thread.setDaemon(True)
self.thread.start()
def startThread(self):
while self._running:
self.updateDB()
sleep(self.interval)
print("Exiting Thread")
pickle.dump(self.mon, (open('monitor.p', 'wb')))
def stop(self):
self._running = False
self.thread.join()
def updateDB(self):
data = self.mon.getAllSensors()
self.db.insertData(data)
lastpoint = self.db.readLast()
self.sysCheckup(lastpoint)
def sysCheckup(self, data):
timeNow = strftime('%H:%M', localtime())
self.conf.read('aerodoc.ini')
if float(data['PH']) <= float(self.conf['CONTROLLER']['ph_low']):
self.mon.pumpSwitch(1, True)
print("PH Up Activated")
self.createNote('ph_low')
elif float(self.conf['CONTROLLER']['ph_up']) <= float(data['PH']):
self.mon.pumpSwitch(2, True)
print("PH Down Activated")
self.createNote('ph_up')
elif (float(self.conf['CONTROLLER']['ph_low']) <= float(data['PH']) <=
float(self.conf['CONTROLLER']['ph_up'])):
self.mon.pumpSwitch(1, False)
self.mon.pumpSwitch(2, False)
if float(data['EC']) <= float(self.conf['CONTROLLER']['ec_low']):
self.mon.pumpSwitch(3, True)
print("Activating EC Up")
self.createNote('ec_low')
else:
self.mon.pumpSwitch(3, False)
print("EC deactivated")
if (not self.mon.fanStatus) and (float(self.conf['CONTROLLER']['temp_up']) <= float(data['temp']) or
float(self.conf['CONTROLLER']['hum_up']) <= float(data['hum'])):
self.mon.fanSwitch(True)
print("Fan Activated")
self.createNote('temp_up')
if ((not self.mon.fanStatus) and float(self.conf['CONTROLLER']['hum_up']) <= float(data['hum'])):
self.createNote('hum_up')
if (not self.mon.lightStatus) and (self.conf['CONTROLLER']['light_start'] <= timeNow <=
self.conf['CONTROLLER']['light_stop']):
self.mon.lightSwitch(True)
print("Light Activated")
self.createNote('light_on')
elif (self.mon.lightStatus and
(timeNow <= self.conf['CONTROLLER']['light_start'] or
self.conf['CONTROLLER']['light_stop'] <= timeNow)):
self.mon.lightSwitch(False)
print("Light Deactivated")
self.createNote('light_off')
def createNote(self, type):
try:
self.noteBuffer = pickle.load(open('noteBuffer.p', 'rb'))
note = self.noteTemplates[type]
note['time'] = strftime('%Y-%m-%d %H:%M', localtime())
self.noteBuffer.append(self.noteTemplates[type])
pickle.dump(self.noteBuffer, open('noteBuffer.p', 'wb'))
except:
print('Error grabbing note')
|
test_integration.py
|
from __future__ import absolute_import, division, print_function
import sys
from threading import Thread, Lock
import json
import warnings
import time
import stripe
import pytest
if sys.version_info[0] < 3:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
else:
from http.server import BaseHTTPRequestHandler, HTTPServer
class TestIntegration(object):
@pytest.fixture(autouse=True)
def close_mock_server(self):
yield
if self.mock_server:
self.mock_server.shutdown()
self.mock_server.server_close()
self.mock_server_thread.join()
@pytest.fixture(autouse=True)
def setup_stripe(self):
orig_attrs = {
"api_base": stripe.api_base,
"api_key": stripe.api_key,
"default_http_client": stripe.default_http_client,
"enable_telemetry": stripe.enable_telemetry,
"max_network_retries": stripe.max_network_retries,
"proxy": stripe.proxy,
}
stripe.api_base = "http://localhost:12111" # stripe-mock
stripe.api_key = "sk_test_123"
stripe.default_http_client = None
stripe.enable_telemetry = False
stripe.max_network_retries = 3
stripe.proxy = None
yield
stripe.api_base = orig_attrs["api_base"]
stripe.api_key = orig_attrs["api_key"]
stripe.default_http_client = orig_attrs["default_http_client"]
stripe.enable_telemetry = orig_attrs["enable_telemetry"]
stripe.max_network_retries = orig_attrs["max_network_retries"]
stripe.proxy = orig_attrs["proxy"]
def setup_mock_server(self, handler):
# Configure mock server.
# Passing 0 as the port will cause a random free port to be chosen.
self.mock_server = HTTPServer(("localhost", 0), handler)
_, self.mock_server_port = self.mock_server.server_address
# Start running mock server in a separate thread.
# Daemon threads automatically shut down when the main process exits.
self.mock_server_thread = Thread(target=self.mock_server.serve_forever)
self.mock_server_thread.setDaemon(True)
self.mock_server_thread.start()
def test_hits_api_base(self):
class MockServerRequestHandler(BaseHTTPRequestHandler):
num_requests = 0
def do_GET(self):
self.__class__.num_requests += 1
self.send_response(200)
self.send_header(
"Content-Type", "application/json; charset=utf-8"
)
self.end_headers()
self.wfile.write(json.dumps({}).encode("utf-8"))
return
self.setup_mock_server(MockServerRequestHandler)
stripe.api_base = "http://localhost:%s" % self.mock_server_port
stripe.Balance.retrieve()
assert MockServerRequestHandler.num_requests == 1
def test_hits_proxy_through_default_http_client(self):
class MockServerRequestHandler(BaseHTTPRequestHandler):
num_requests = 0
def do_GET(self):
self.__class__.num_requests += 1
self.send_response(200)
self.send_header(
"Content-Type", "application/json; charset=utf-8"
)
self.end_headers()
self.wfile.write(json.dumps({}).encode("utf-8"))
return
self.setup_mock_server(MockServerRequestHandler)
stripe.proxy = "http://localhost:%s" % self.mock_server_port
stripe.Balance.retrieve()
assert MockServerRequestHandler.num_requests == 1
stripe.proxy = "http://bad-url"
with warnings.catch_warnings(record=True) as w:
stripe.Balance.retrieve()
assert len(w) == 1
assert "stripe.proxy was updated after sending a request" in str(
w[0].message
)
assert MockServerRequestHandler.num_requests == 2
def test_hits_proxy_through_custom_client(self):
class MockServerRequestHandler(BaseHTTPRequestHandler):
num_requests = 0
def do_GET(self):
self.__class__.num_requests += 1
self.send_response(200)
self.send_header(
"Content-Type", "application/json; charset=utf-8"
)
self.end_headers()
self.wfile.write(json.dumps({}).encode("utf-8"))
return
self.setup_mock_server(MockServerRequestHandler)
stripe.default_http_client = stripe.http_client.new_default_http_client(
proxy="http://localhost:%s" % self.mock_server_port
)
stripe.Balance.retrieve()
assert MockServerRequestHandler.num_requests == 1
def test_passes_client_telemetry_when_enabled(self):
class MockServerRequestHandler(BaseHTTPRequestHandler):
num_requests = 0
def do_GET(self):
try:
self.__class__.num_requests += 1
req_num = self.__class__.num_requests
if req_num == 1:
time.sleep(31 / 1000) # 31 ms
assert not self.headers.get(
"X-Stripe-Client-Telemetry"
)
elif req_num == 2:
assert self.headers.get("X-Stripe-Client-Telemetry")
telemetry = json.loads(
self.headers.get("x-stripe-client-telemetry")
)
assert "last_request_metrics" in telemetry
req_id = telemetry["last_request_metrics"][
"request_id"
]
duration_ms = telemetry["last_request_metrics"][
"request_duration_ms"
]
assert req_id == "req_1"
# The first request took 31 ms, so the client perceived
# latency shouldn't be outside this range.
assert 30 < duration_ms < 300
else:
assert False, (
"Should not have reached request %d" % req_num
)
self.send_response(200)
self.send_header(
"Content-Type", "application/json; charset=utf-8"
)
self.send_header("Request-Id", "req_%d" % req_num)
self.end_headers()
self.wfile.write(json.dumps({}).encode("utf-8"))
except AssertionError as ex:
# Throwing assertions on the server side causes a
# connection error to be logged instead of an assertion
# failure. Instead, we return the assertion failure as
# json so it can be logged as a StripeError.
self.send_response(400)
self.send_header(
"Content-Type", "application/json; charset=utf-8"
)
self.end_headers()
self.wfile.write(
json.dumps(
{
"error": {
"type": "invalid_request_error",
"message": str(ex),
}
}
).encode("utf-8")
)
self.setup_mock_server(MockServerRequestHandler)
stripe.api_base = "http://localhost:%s" % self.mock_server_port
stripe.enable_telemetry = True
stripe.Balance.retrieve()
stripe.Balance.retrieve()
assert MockServerRequestHandler.num_requests == 2
def test_uses_thread_local_client_telemetry(self):
class MockServerRequestHandler(BaseHTTPRequestHandler):
num_requests = 0
seen_metrics = set()
stats_lock = Lock()
def do_GET(self):
with self.__class__.stats_lock:
self.__class__.num_requests += 1
req_num = self.__class__.num_requests
if self.headers.get("X-Stripe-Client-Telemetry"):
telemetry = json.loads(
self.headers.get("X-Stripe-Client-Telemetry")
)
req_id = telemetry["last_request_metrics"]["request_id"]
with self.__class__.stats_lock:
self.__class__.seen_metrics.add(req_id)
self.send_response(200)
self.send_header(
"Content-Type", "application/json; charset=utf-8"
)
self.send_header("Request-Id", "req_%d" % req_num)
self.end_headers()
self.wfile.write(json.dumps({}).encode("utf-8"))
self.setup_mock_server(MockServerRequestHandler)
stripe.api_base = "http://localhost:%s" % self.mock_server_port
stripe.enable_telemetry = True
stripe.default_http_client = stripe.http_client.RequestsClient()
def work():
stripe.Balance.retrieve()
stripe.Balance.retrieve()
threads = [Thread(target=work) for i in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
assert MockServerRequestHandler.num_requests == 20
assert len(MockServerRequestHandler.seen_metrics) == 10
|
mips.py
|
# Preprocessor for MIPS assignments
from multiprocessing import Process
import helpers.common as common
import re
def processToken(token):
# are we a directive
directives = [".data", ".text", ".kdata", ".ktext", ".set", ".ascii", ".asciiz", ".byte", ".halfword", ".word", ".space", ".align", ".double", ".extern", ".float", ".globl", ".half", "eret"]
if token in directives:
return "!"
# are we an instruction
instructions = ["add", "addu", "addi", "addiu", "and", "andi", "div", "divu", "mult", "multu", "nor", "or", "ori", "sll", "sllv", "sra", "srav", "srl", "srlv", "sub", "subu", "xor", "xori", "lhi", "llo", "slt", "sltu", "slti", "sltiu", "beq", "bgtz", "blez", "bne", "j", "jal", "jalr", "jr", "lb", "lbu", "lh", "lhu", "lw", "sb", "sh", "sw", "mfhi", "mflo", "mthi", "mtlo", "trap", "mul", "lui", "syscall", "abs", "rem", "move", "clear", "not", "li", "la", "bgt", "blt", "bge", "ble", "break", "daddi", "bal", "bgtu", "bltu", "bgeu", "bleu"]
if token in instructions:
if token in ["andi", "divu", "multu", "ori", "subu", "addu", "addi", "xori"]:
token = token[:-1]
return chr(ord("(") + instructions.index(token))
# are we a register
isNumRegister = re.match("^\$[0-3]?[0-9]$", token) != None
isNamedRegister = re.match("^\$[vatsk][0-9]$", token) != None
isSpecialRegister = token in ["$zero", "$lo", "$hi", "$at", "$gp", "$sp", "$fp", "$ra"]
if isNumRegister or isNamedRegister or isSpecialRegister:
return "$"
# are we a number/hex/char/string/literal
isNumber = re.match("^(-?)[0-9]+(\.[0-9]*)?$", token) != None
isHex = re.match("^0x[0-9a-f]+$", token) != None
isChar = re.match("^'\\\?.'$", token) != None
isString = re.match("^\".*\"$", token) != None
if isNumber or isChar or isString or isHex:
return "#"
# are we a label declaration
if re.match("^[_a-z][_a-z0-9\.]*(:|=)$",token) != None:
return "%"
# are we a label use
if re.match("^[_a-z][_a-z0-9\.]*$", token) != None:
return "&"
# WTF
# print "No match for token {}".format(token)
return '?'
def processTokens(tokens):
result = ""
for token in tokens:
result += processToken(token.strip())
return result
def processMIPS(text):
# convert to lowercase, remove comments:
text = text.lower()
text = re.sub("#.*\n", "\n", text)
# handle spacing around ":"
text = re.sub("\s:", ":", text)
text = re.sub(":", ": ", text)
# replace certain punctuation with spaces
text = re.sub(",|\(|\)|;"," ", text)
# reduce long whitespace to a single space
text = re.sub("\s+", " ", text)
# get each token
return processTokens(text.strip().split(" "))
def doAssignment(students, assign, helpers):
helpers.printf("processing '{}' in parellel...\n".format(assign.name))
# for each student
for student in students:
# for each entry
entries = assign.args["entries"]
for entry in entries:
sources = entry["sources"]
# try to read the text
text = helpers.readFromAssignment(student, assign.name, sources[0])
if text != None:
# process the file
result = processMIPS(text)
# write the result
safeFilename = common.makeFilenameSafe(sources[0]) + "mips.txt"
helpers.writeToPreprocessed(result, student, assign.name, safeFilename)
# all done
helpers.printf("Finished '{}'!\n".format(assign.name))
def run(students, assignments, args, helpers):
# threads to join later
threads = []
# for each assignment
for assign in assignments:
t = Process(target=doAssignment, args=(students, assign, helpers))
threads.append(t)
t.start()
# join the threads
for t in threads:
t.join()
# all done here
return True
|
downloader.py
|
import logging
import os
import threading
import requests
import cfg
from .app_constants import *
class Downloader:
"""
Description: Downloader class contains methods which collectively work together to download image resources \
from urls and save them into user specified system path. As per its mechanism, it employs four \
threads to concurrently download the resources. These threads (Consumers) pick up urls from the "url_queue" \
which is filled up by FileParser class (Producer).
Version: 1.0
Comment:
"""
def __init__ ( self, url_queue ):
self.logger = logging.getLogger ( __name__ )
# contains only serviceable urls in a queue
# FileParser is the producer of urls in this queue, while Downloader threads are consumers
self.url_queue = url_queue
# this will be used, if because of any reason function "get_filename_from_url"
# could not derive a file name
self.custom_file_id = 0
# default image extension in case downloading file is missing
self.default_image_ext = "jfif"
# this is used by downloader threads to get a lock over self.custom_file_id
self.mutex = threading.Lock ( )
# Number of downloader threads
self.NUM_DL_THREADS = 4
# thread_list contains downloader thread instance which are created and \
# started by create_start_downloader_threads function
self.dl_thread_list = [ ]
self.create_downloader_threads ( )
def create_downloader_threads ( self ):
"""
Creates the downloader threads
:return:
"""
for _ in range ( self.NUM_DL_THREADS ):
dwnld_th_inst = threading.Thread ( target=self.thread_downloader )
self.dl_thread_list.append ( dwnld_th_inst )
def start_downloader_threads ( self ):
"""
Starts the downloader threads
:return:
"""
for i in range ( self.NUM_DL_THREADS ):
self.dl_thread_list[ i ].start ( )
def wait_for_downloader_threads ( self ):
"""
Waits for and releases resources held by downloader threads
:return:
"""
for i in range ( self.NUM_DL_THREADS ):
self.dl_thread_list[ i ].join ( )
# Last message for user
print ( "Download dir is {}".format ( cfg.APP_CFG[ IMAGE_SAVE_DIR ] ) )
print ( "Log dir is {}".format ( cfg.APP_CFG[ LOG_DIR ] ) )
def thread_downloader ( self ):
"""
This function details the functionality of a downloader thread. Each thread fetches items \
from self.url_queue and download the image resources.
:return:
"""
while (True):
url = self.url_queue.get ( block=True, timeout=None )
# exit point of a thread (Use "EXIT" to exit and then put it back for other threads to use (and exit)
if url == "EXIT":
self.url_queue.put ( item="EXIT", block=True, timeout=None )
break
self.download_image ( url )
def download_image ( self, url, reattempt_count=cfg.APP_CFG.get ( MAX_DOWNLOAD_REATTEMPTS ) ):
"""
This function downloads image resource from web and saves it into IMAGE_SAVE_DIR directory
:param url: str
:param reattempt_count: int (Number of times an url will attempted to be fetched in case of failure)
:return: True (If successful download) / False (If download fails)
"""
# stream=True is set on the request, this avoids reading the content at once into memory for large responses.
# timeout parameter specifies Requests to stop waiting for a response after a given number of seconds.
try:
response = requests.get (
url,
allow_redirects=True,
stream=True,
timeout=cfg.APP_CFG[ URL_TIMEOUT ],
proxies=cfg.APP_CFG[ SYSTEM_PROXY ]
)
# Raises stored HTTPError, if one occurred.
response.raise_for_status ( )
# Reference: http://docs.python-requests.org/en/master/api/#exceptions
except requests.exceptions.Timeout as t_err: # Maybe set up for a retry, or continue in a retry loop
self.logger.info (
"For URL: {0} - An exception of type {1} occurred. Arguments:\n{2!r}".format ( url,
type ( t_err ).__name__,
t_err.args ) )
if not reattempt_count:
self.logger.debug ( "URL {} has not been downloaded.".format ( url ) )
return False
return self.download_image ( url, reattempt_count - 1 )
except (requests.exceptions.ConnectionError, # connection-related errors
requests.exceptions.HTTPError, # 401 Unauthorized
requests.exceptions.URLRequired, # invalid URL
requests.exceptions.TooManyRedirects, # request exceeds the configured number of max redirections
requests.exceptions.RequestException # Mother of all requests exceptions. it's doomsday :D
) as err:
self.logger.info (
"For URL: {0} - An exception of type {1} occurred. Arguments:\n{2!r}".format ( url,
type ( err ).__name__,
err.args ) )
self.logger.debug ( "URL {} has not been downloaded.".format ( url ) )
return False
if response.status_code != 200:
self.logger.debug (
"For URL: %s - Received status code %s. Reason: %s" % (url, response.status_code, response.reason) )
return False
path = cfg.APP_CFG[ IMAGE_SAVE_DIR ] + self.get_dl_filename_from_url ( url )
# It is strongly recommended that we open files in binary mode as per requests documentation
# Reference: http://docs.python-requests.org/en/master/user/quickstart/
with open ( path, 'wb' ) as fp:
# Iterates over the response data. When stream=True is set on the request, this avoids \
# reading the content at once into memory for large responses. The chunk size is the \
# number of bytes it should read into memory.
# iter_content automatically decodes the gzip and deflate transfer-encodings.
chunk_size = 1024
for data_block in response.iter_content ( chunk_size ):
fp.write ( data_block )
return True
def create_custom_dl_file_name ( self ):
"""
Create a customized name for a downloading image file. It is a thread safe function.
:return: str
"""
self.mutex.acquire ( )
curr_file_id = self.custom_file_id
self.custom_file_id += 1
self.mutex.release ( )
custom_dl_file_name = "application_image_" + str ( curr_file_id ) + "." + self.default_image_ext
return custom_dl_file_name
def check_create_dup_dl_file_name ( self, dl_file_name ):
"""
Checks whether the download file already exist or not in IMAGE_SAVE_DIR directory. If it does not exist, then return \
the same dl_file_name. If it already exist, then returns a customized dl_file_name for this image.
:param dl_file_name: Name of the downloading image file (str)
:return: unique file_name of the downloading image into IMAGE_SAVE_DIR directory (str)
"""
file_path = cfg.APP_CFG[ IMAGE_SAVE_DIR ] + "/" + dl_file_name
if not os.path.isfile ( file_path ): return dl_file_name
return self.create_custom_dl_file_name ( )
def get_dl_filename_from_url ( self, url ):
"""
Finds download filename from url. If it is not possible to get a file name from url then it assigns one.
:param url: string
:return: dl_file_name: string (e.g; "/application_image_1.jfif" or "/tiger_image.jfif")
"""
# striping rightmost '/' char in url if it exists
url = url.rstrip ( '/' )
dl_file_name = url.split ( "/" )[ -1 ]
if not dl_file_name:
dl_file_name = self.create_custom_dl_file_name ( )
# web image might lack extension. so verifying and if it lacks ext then assigning one
file_name_ext = dl_file_name.rsplit ( '.', 1 )
file_extension = file_name_ext[ -1 ]
# Available image file formats: http://preservationtutorial.library.cornell.edu/presentation/table7-1.html
if file_extension not in (
"tif", "tiff", "gif", "jpeg", "jpg", "jif", "jfif", "jp2", "jpx", "j2k", "j2c", "fpx", "pcd", "png"):
# assigning default image extension
file_extension = self.default_image_ext
dl_file_name = file_name_ext[ 0 ] + "." + file_extension
dl_file_name = self.check_create_dup_dl_file_name ( dl_file_name )
return "/" + dl_file_name
|
pykms_GuiMisc.py
|
#!/usr/bin/env python3
import os
import re
import sys
from collections import Counter
from time import sleep
import threading
import tkinter as tk
from tkinter import ttk
import tkinter.font as tkFont
from pykms_Format import MsgMap, unshell_message, unformat_message
#------------------------------------------------------------------------------------------------------------------------------------------------------------
# https://stackoverflow.com/questions/3221956/how-do-i-display-tooltips-in-tkinter
class ToolTip(object):
""" Create a tooltip for a given widget """
def __init__(self, widget, bg = '#FFFFEA', pad = (5, 3, 5, 3), text = 'widget info', waittime = 400, wraplength = 250):
self.waittime = waittime # ms
self.wraplength = wraplength # pixels
self.widget = widget
self.text = text
self.widget.bind("<Enter>", self.onEnter)
self.widget.bind("<Leave>", self.onLeave)
self.widget.bind("<ButtonPress>", self.onLeave)
self.bg = bg
self.pad = pad
self.id = None
self.tw = None
def onEnter(self, event = None):
self.schedule()
def onLeave(self, event = None):
self.unschedule()
self.hide()
def schedule(self):
self.unschedule()
self.id = self.widget.after(self.waittime, self.show)
def unschedule(self):
id_ = self.id
self.id = None
if id_:
self.widget.after_cancel(id_)
def show(self):
def tip_pos_calculator(widget, label, tip_delta = (10, 5), pad = (5, 3, 5, 3)):
w = widget
s_width, s_height = w.winfo_screenwidth(), w.winfo_screenheight()
width, height = (pad[0] + label.winfo_reqwidth() + pad[2],
pad[1] + label.winfo_reqheight() + pad[3])
mouse_x, mouse_y = w.winfo_pointerxy()
x1, y1 = mouse_x + tip_delta[0], mouse_y + tip_delta[1]
x2, y2 = x1 + width, y1 + height
x_delta = x2 - s_width
if x_delta < 0:
x_delta = 0
y_delta = y2 - s_height
if y_delta < 0:
y_delta = 0
offscreen = (x_delta, y_delta) != (0, 0)
if offscreen:
if x_delta:
x1 = mouse_x - tip_delta[0] - width
if y_delta:
y1 = mouse_y - tip_delta[1] - height
offscreen_again = y1 < 0 # out on the top
if offscreen_again:
# No further checks will be done.
# TIP:
# A further mod might automagically augment the
# wraplength when the tooltip is too high to be
# kept inside the screen.
y1 = 0
return x1, y1
bg = self.bg
pad = self.pad
widget = self.widget
# creates a toplevel window
self.tw = tk.Toplevel(widget)
# leaves only the label and removes the app window
self.tw.wm_overrideredirect(True)
win = tk.Frame(self.tw, background = bg, borderwidth = 0)
label = ttk.Label(win, text = self.text, justify = tk.LEFT, background = bg, relief = tk.SOLID, borderwidth = 0,
wraplength = self.wraplength)
label.grid(padx = (pad[0], pad[2]), pady = (pad[1], pad[3]), sticky=tk.NSEW)
win.grid()
x, y = tip_pos_calculator(widget, label)
self.tw.wm_geometry("+%d+%d" % (x, y))
def hide(self):
tw = self.tw
if tw:
tw.destroy()
self.tw = None
##-----------------------------------------------------------------------------------------------------------------------------------------------------------
class TextRedirect(object):
class Pretty(object):
grpmsg = unformat_message([MsgMap[1], MsgMap[7], MsgMap[12], MsgMap[20]])
arrows = [ item[0] for item in grpmsg ]
clt_msg_nonewline = [ item[1] for item in grpmsg ]
arrows = list(set(arrows))
lenarrow = len(arrows[0])
srv_msg_nonewline = [ item[0] for item in unformat_message([MsgMap[2], MsgMap[5], MsgMap[13], MsgMap[18]]) ]
msg_align = [ msg[0].replace('\t', '').replace('\n', '') for msg in unformat_message([MsgMap[-2], MsgMap[-4]]) ]
def __init__(self, srv_text_space, clt_text_space, customcolors):
self.srv_text_space = srv_text_space
self.clt_text_space = clt_text_space
self.customcolors = customcolors
def textbox_write(self, tag, message, color, extras):
widget = self.textbox_choose(message)
self.w_maxpix, self.h_maxpix = widget.winfo_width(), widget.winfo_height()
self.xfont = tkFont.Font(font = widget['font'])
widget.configure(state = 'normal')
widget.insert('end', self.textbox_format(message), tag)
self.textbox_color(tag, widget, color, self.customcolors['black'], extras)
widget.after(100, widget.see('end'))
widget.configure(state = 'disabled')
def textbox_choose(self, message):
if any(item.startswith('logsrv') for item in [message, self.str_to_print]):
self.srv_text_space.focus_set()
self.where = "srv"
return self.srv_text_space
elif any(item.startswith('logclt') for item in [message, self.str_to_print]):
self.clt_text_space.focus_set()
self.where = "clt"
return self.clt_text_space
def textbox_color(self, tag, widget, forecolor = 'white', backcolor = 'black', extras = []):
for extra in extras:
if extra == 'bold':
self.xfont.configure(weight = "bold")
elif extra == 'italic':
self.xfont.configure(slant = "italic")
elif extra == 'underlined':
self.xfont.text_font.configure(underline = True)
elif extra == 'strike':
self.xfont.configure(overstrike = True)
elif extra == 'reverse':
forecolor, backcolor = backcolor, forecolor
widget.tag_configure(tag, foreground = forecolor, background = backcolor, font = self.xfont)
widget.tag_add(tag, "insert linestart", "insert lineend")
def textbox_newline(self, message):
if not message.endswith('\n'):
return message + '\n'
else:
return message
def textbox_format(self, message):
# vertical align.
self.w_maxpix = self.w_maxpix - 5 # pixel reduction for distance from border.
w_fontpix, h_fontpix = (self.xfont.measure('0'), self.xfont.metrics('linespace'))
msg_unformat = message.replace('\t', '').replace('\n', '')
lenfixed_chars = int((self.w_maxpix / w_fontpix) - len(msg_unformat))
if message in self.srv_msg_nonewline + self.clt_msg_nonewline:
lung = lenfixed_chars - self.lenarrow
if message in self.clt_msg_nonewline:
message = self.textbox_newline(message)
else:
lung = lenfixed_chars
if (self.where == "srv") or (self.where == "clt" and message not in self.arrows):
message = self.textbox_newline(message)
# horizontal align.
if msg_unformat in self.msg_align:
msg_strip = message.lstrip('\n')
message = '\n' * (len(message) - len(msg_strip) + TextRedirect.Pretty.newlinecut[0]) + msg_strip
TextRedirect.Pretty.newlinecut.pop(0)
count = Counter(message)
countab = (count['\t'] if count['\t'] != 0 else 1)
message = message.replace('\t' * countab, ' ' * lung)
return message
def textbox_do(self):
msgs, TextRedirect.Pretty.tag_num = unshell_message(self.str_to_print, TextRedirect.Pretty.tag_num)
for tag in msgs:
self.textbox_write(tag, msgs[tag]['text'], self.customcolors[msgs[tag]['color']], msgs[tag]['extra'])
def flush(self):
pass
def write(self, string):
if string != '\n':
self.str_to_print = string
self.textbox_do()
class Stderr(Pretty):
def __init__(self, srv_text_space, clt_text_space, customcolors, side):
self.srv_text_space = srv_text_space
self.clt_text_space = clt_text_space
self.customcolors = customcolors
self.side = side
self.tag_err = 'STDERR'
self.xfont = tkFont.Font(font = self.srv_text_space['font'])
def textbox_choose(self, message):
if self.side == "srv":
return self.srv_text_space
elif self.side == "clt":
return self.clt_text_space
def write(self, string):
widget = self.textbox_choose(string)
self.textbox_color(self.tag_err, widget, self.customcolors['red'], self.customcolors['black'])
self.srv_text_space.configure(state = 'normal')
self.srv_text_space.insert('end', string, self.tag_err)
self.srv_text_space.see('end')
self.srv_text_space.configure(state = 'disabled')
class Log(Pretty):
def textbox_format(self, message):
if message.startswith('logsrv'):
message = message.replace('logsrv ', '')
if message.startswith('logclt'):
message = message.replace('logclt ', '')
return message + '\n'
##-----------------------------------------------------------------------------------------------------------------------------------------------------------
class TextDoubleScroll(tk.Frame):
def __init__(self, master, **kwargs):
""" Initialize.
- horizontal scrollbar
- vertical scrollbar
- text widget
"""
tk.Frame.__init__(self, master)
self.master = master
self.textbox = tk.Text(self.master, **kwargs)
self.sizegrip = ttk.Sizegrip(self.master)
self.hs = ttk.Scrollbar(self.master, orient = "horizontal", command = self.on_scrollbar_x)
self.vs = ttk.Scrollbar(self.master, orient = "vertical", command = self.on_scrollbar_y)
self.textbox.configure(yscrollcommand = self.on_textscroll, xscrollcommand = self.hs.set)
def on_scrollbar_x(self, *args):
""" Horizontally scrolls text widget. """
self.textbox.xview(*args)
def on_scrollbar_y(self, *args):
""" Vertically scrolls text widget. """
self.textbox.yview(*args)
def on_textscroll(self, *args):
""" Moves the scrollbar and scrolls text widget when the mousewheel is moved on a text widget. """
self.vs.set(*args)
self.on_scrollbar_y('moveto', args[0])
def put(self, **kwargs):
""" Grid the scrollbars and textbox correctly. """
self.textbox.grid(row = 0, column = 0, padx = 3, pady = 3, sticky = "nsew")
self.vs.grid(row = 0, column = 1, sticky = "ns")
self.hs.grid(row = 1, column = 0, sticky = "we")
self.sizegrip.grid(row = 1, column = 1, sticky = "news")
def get(self):
""" Return the "frame" useful to place inner controls. """
return self.textbox
##-----------------------------------------------------------------------------------------------------------------------------------------------------------
def custom_background(window):
# first level canvas.
allwidgets = window.grid_slaves(0,0)[0].grid_slaves() + window.grid_slaves(0,0)[0].place_slaves()
widgets_alphalow = [ widget for widget in allwidgets if widget.winfo_class() == 'Canvas']
widgets_alphahigh = []
# sub-level canvas.
for side in ["Srv", "Clt"]:
widgets_alphahigh.append(window.pagewidgets[side]["BtnWin"])
for position in ["Left", "Right"]:
widgets_alphahigh.append(window.pagewidgets[side]["AniWin"][position])
for pagename in window.pagewidgets[side]["PageWin"].keys():
widgets_alphalow.append(window.pagewidgets[side]["PageWin"][pagename])
try:
from PIL import Image, ImageTk
# Open Image.
img = Image.open(os.path.dirname(os.path.abspath( __file__ )) + "/graphics/pykms_Keys.gif")
img = img.convert('RGBA')
# Resize image.
img.resize((window.winfo_width(), window.winfo_height()), Image.ANTIALIAS)
# Put semi-transparent background chunks.
window.backcrops_alphalow, window.backcrops_alphahigh = ([] for _ in range(2))
def cutter(master, image, widgets, crops, alpha):
for widget in widgets:
x, y, w, h = master.get_position(widget)
cropped = image.crop((x, y, x + w, y + h))
cropped.putalpha(alpha)
crops.append(ImageTk.PhotoImage(cropped))
# Not in same loop to prevent reference garbage.
for crop, widget in zip(crops, widgets):
widget.create_image(1, 1, image = crop, anchor = 'nw')
cutter(window, img, widgets_alphalow, window.backcrops_alphalow, 36)
cutter(window, img, widgets_alphahigh, window.backcrops_alphahigh, 96)
# Put semi-transparent background overall.
img.putalpha(128)
window.backimg = ImageTk.PhotoImage(img)
window.masterwin.create_image(1, 1, image = window.backimg, anchor = 'nw')
except ImportError:
for widget in widgets_alphalow + widgets_alphahigh:
widget.configure(background = window.customcolors['lavender'])
# Hide client.
window.clt_on_show(force_remove = True)
# Show Gui.
window.deiconify()
##-----------------------------------------------------------------------------------------------------------------------------------------------------------
class Animation(object):
def __init__(self, gifpath, master, widget, loop = False):
from PIL import Image, ImageTk, ImageSequence
self.master = master
self.widget = widget
self.loop = loop
self.cancelid = None
self.flagstop = False
self.index = 0
self.frames = []
img = Image.open(gifpath)
size = img.size
for frame in ImageSequence.Iterator(img):
static_img = ImageTk.PhotoImage(frame.convert('RGBA'))
try:
static_img.delay = int(frame.info['duration'])
except KeyError:
static_img.delay = 100
self.frames.append(static_img)
self.widget.configure(width = size[0], height = size[1])
self.initialize()
def initialize(self):
self.widget.configure(image = self.frames[0])
self.widget.image = self.frames[0]
def deanimate(self):
while not self.flagstop:
pass
self.flagstop = False
self.index = 0
self.widget.configure(relief = "raised")
def animate(self):
frame = self.frames[self.index]
self.widget.configure(image = frame, relief = "sunken")
self.index += 1
self.cancelid = self.master.after(frame.delay, self.animate)
if self.index == len(self.frames):
if self.loop:
self.index = 0
else:
self.stop()
def start(self, event = None):
if str(self.widget['state']) != 'disabled':
if self.cancelid is None:
if not self.loop:
self.btnani_thread = threading.Thread(target = self.deanimate, name = "Thread-BtnAni")
self.btnani_thread.setDaemon(True)
self.btnani_thread.start()
self.cancelid = self.master.after(self.frames[0].delay, self.animate)
def stop(self, event = None):
if self.cancelid:
self.master.after_cancel(self.cancelid)
self.cancelid = None
self.flagstop = True
self.initialize()
def custom_pages(window, side):
buttons = window.pagewidgets[side]["BtnAni"]
labels = window.pagewidgets[side]["LblAni"]
for position in buttons.keys():
buttons[position].config(anchor = "center",
font = window.customfonts['btn'],
background = window.customcolors['white'],
activebackground = window.customcolors['white'],
borderwidth = 2)
try:
anibtn = Animation(os.path.dirname(os.path.abspath( __file__ )) + "/graphics/pykms_Keyhole_%s.gif" %position,
window, buttons[position], loop = False)
anilbl = Animation(os.path.dirname(os.path.abspath( __file__ )) + "/graphics/pykms_Arrow_%s.gif" %position,
window, labels[position], loop = True)
def animationwait(master, button, btn_animation, lbl_animation):
while btn_animation.cancelid:
pass
sleep(1)
x, y = master.winfo_pointerxy()
if master.winfo_containing(x, y) == button:
lbl_animation.start()
def animationcombo(master, button, btn_animation, lbl_animation):
wait_thread = threading.Thread(target = animationwait,
args = (master, button, btn_animation, lbl_animation),
name = "Thread-WaitAni")
wait_thread.setDaemon(True)
wait_thread.start()
lbl_animation.stop()
btn_animation.start()
buttons[position].bind("<ButtonPress>", lambda event, anim1 = anibtn, anim2 = anilbl,
bt = buttons[position], win = window:
animationcombo(win, bt, anim1, anim2))
buttons[position].bind("<Enter>", anilbl.start)
buttons[position].bind("<Leave>", anilbl.stop)
except ImportError:
buttons[position].config(activebackground = window.customcolors['blue'],
foreground = window.customcolors['blue'])
labels[position].config(background = window.customcolors['lavender'])
if position == "Left":
buttons[position].config(text = '<<')
elif position == "Right":
buttons[position].config(text = '>>')
##-----------------------------------------------------------------------------------------------------------------------------------------------------------
class ListboxOfRadiobuttons(tk.Frame):
def __init__(self, master, radios, font, changed, **kwargs):
tk.Frame.__init__(self, master)
self.master = master
self.radios = radios
self.font = font
self.changed = changed
self.scrollv = tk.Scrollbar(self, orient = "vertical")
self.textbox = tk.Text(self, yscrollcommand = self.scrollv.set, **kwargs)
self.scrollv.config(command = self.textbox.yview)
# layout.
self.scrollv.pack(side = "right", fill = "y")
self.textbox.pack(side = "left", fill = "both", expand = True)
# create radiobuttons.
self.radiovar = tk.StringVar()
self.radiovar.set('FILE')
self.create()
def create(self):
self.rdbtns = []
for n, nameradio in enumerate(self.radios):
rdbtn = tk.Radiobutton(self, text = nameradio, value = nameradio, variable = self.radiovar,
font = self.font, indicatoron = 0, width = 15,
borderwidth = 3, selectcolor = 'yellow', command = self.change)
self.textbox.window_create("end", window = rdbtn)
# to force one checkbox per line
if n != len(self.radios) - 1:
self.textbox.insert("end", "\n")
self.rdbtns.append(rdbtn)
self.textbox.configure(state = "disabled")
def change(self):
st = self.state()
for widget, default in self.changed:
wclass = widget.winfo_class()
if st in ['STDOUT', 'FILEOFF']:
if wclass == 'Entry':
widget.delete(0, 'end')
widget.configure(state = "disabled")
elif wclass == 'TCombobox':
if st == 'STDOUT':
widget.set(default)
widget.configure(state = "readonly")
elif st == 'FILEOFF':
widget.set('')
widget.configure(state = "disabled")
elif st in ['FILE', 'FILESTDOUT', 'STDOUTOFF']:
if wclass == 'Entry':
widget.configure(state = "normal")
widget.delete(0, 'end')
widget.insert('end', default)
widget.xview_moveto(1)
elif wclass == 'TCombobox':
widget.configure(state = "readonly")
widget.set(default)
elif wclass == 'Button':
widget.configure(state = "normal")
def configure(self, state):
for rb in self.rdbtns:
rb.configure(state = state)
def state(self):
return self.radiovar.get()
|
hello_world__Process.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from multiprocessing import Process
def f(name):
print('Hello,', name)
if __name__ == '__main__':
p = Process(target=f, args=('bob',))
p.start()
p.join()
|
test_shm.py
|
import pytest
import time
import numpy as np
import torch
from multiprocessing import Process
from ding.envs.env_manager.subprocess_env_manager import ShmBuffer
def writer(shm):
while True:
shm.fill(np.random.random(size=(4, 84, 84)).astype(np.float32))
time.sleep(1)
@pytest.mark.unittest
def test_shm():
shm = ShmBuffer(dtype=np.float32, shape=(4, 84, 84), copy_on_get=False)
writer_process = Process(target=writer, args=(shm, ))
writer_process.start()
time.sleep(0.1)
data1 = shm.get()
time.sleep(1)
data2 = shm.get()
# same memory
assert (data1 == data2).all()
time.sleep(1)
data3 = shm.get().copy()
time.sleep(1)
data4 = shm.get()
assert (data3 != data4).all()
writer_process.terminate()
|
keep_alive.py
|
# file name is keep_alive.py
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def main():
return "Your bot is alive!"
def run():
app.run(host="0.0.0.0", port=8080)
def keep_alive():
server = Thread(target=run)
server.start()
|
ping.py
|
#!/usr/bin/env python3
# -*- coding: UTF=8 -*-
import ipaddress
import threading
from queue import Queue
from scapy.all import IP, ICMP, sr1, getmacbyip
try:
from method.sub.ipget import cidr_ip
except ImportError:
from sub.ipget import cidr_ip
class Ping:
"""A class to recieve IPs of host in current network."""
def __init__(self, verbose, threads=100) -> None:
"""
Constructs all the necessary attributes.
verbose: bool
permission for output of additional info about packets
threads: int
amount of threads (default - 100)
ip: string
IPs of hosts in CIDR notation
hosts: list
list of IPs of network to check
res: list
list of active IPs
"""
self.verbose = verbose
self.threads = threads
self.print_lock = threading.Lock()
self.q = Queue()
ip = cidr_ip()
self.hosts = [str(ip) for ip in ipaddress.IPv4Network(ip)]
self.res = []
def scan(self, ipaddr) -> None:
"""Scans network and catches active IPs."""
if getmacbyip(ipaddr) is None: # checks if host's MAC cannot be resolved
pass
else: # checks if host is online (for assurance)
icmp = IP(dst=ipaddr)/ICMP() # icmp packet to send
ans = sr1(icmp, timeout=5, verbose=self.verbose) # sending a request
if ans:
self.res.append(ipaddr) # keeping an answered host's IP
def threader(self) -> None:
"""Creates a single thread."""
while True:
current = self.q.get()
self.scan(current)
self.q.task_done()
def main(self) -> list:
"""Makes (magic) threads work and returns IPs."""
for thread in range(self.threads):
t = threading.Thread(target=self.threader)
t.daemon = True
t.start()
for curr in self.hosts:
self.q.put(curr)
self.q.join()
return self.res
if __name__ == "__main__":
scan = Ping(verbose=False, threads=100)
print(scan.main())
|
test.py
|
from contextlib import contextmanager
from ctypes import cdll
from ctypes.util import find_library
import datetime
import functools
import hashlib
import hmac
import os
import socket
import sqlite3
import tempfile
import threading
import unittest
import urllib.parse
import uuid
import httpx
from sqlite_s3_query import sqlite_s3_query, sqlite_s3_query_multi
class TestSqliteS3Query(unittest.TestCase):
def test_sqlite3_installed_on_ci(self):
ci = os.environ.get('CI', '')
sqlite3_version = os.environ.get('SQLITE3_VERSION', 'default')
if ci and sqlite3_version != 'default':
libsqlite3 = cdll.LoadLibrary(find_library('sqlite3'))
self.assertEqual(libsqlite3.sqlite3_libversion_number(), int(sqlite3_version))
def test_without_versioning(self):
db = get_db([
"CREATE TABLE my_table (my_col_a text, my_col_b text);",
] + [
"INSERT INTO my_table VALUES " + ','.join(["('some-text-a', 'some-text-b')"] * 500),
])
put_object_without_versioning('bucket-without-versioning', 'my.db', db)
with self.assertRaisesRegex(Exception, 'The bucket must have versioning enabled'):
sqlite_s3_query('http://localhost:9000/bucket-without-versioning/my.db', get_credentials=lambda now: (
'us-east-1',
'AKIAIOSFODNN7EXAMPLE',
'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
None,
)).__enter__()
def test_select(self):
db = get_db([
"CREATE TABLE my_table (my_col_a text, my_col_b text);",
] + [
"INSERT INTO my_table VALUES " + ','.join(["('some-text-a', 'some-text-b')"] * 500),
])
put_object_with_versioning('my-bucket', 'my.db', db)
with sqlite_s3_query('http://localhost:9000/my-bucket/my.db', get_credentials=lambda now: (
'us-east-1',
'AKIAIOSFODNN7EXAMPLE',
'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
None,
)) as query:
with query('SELECT my_col_a FROM my_table') as (columns, rows):
rows = list(rows)
self.assertEqual(rows, [('some-text-a',)] * 500)
with sqlite_s3_query('http://localhost:9000/my-bucket/my.db', get_credentials=lambda now: (
'us-east-1',
'AKIAIOSFODNN7EXAMPLE',
'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
None,
)) as query:
with \
query('SELECT my_col_a FROM my_table') as (columns_a, rows_a), \
query('SELECT my_col_b FROM my_table') as (columns_b, rows_b):
rows = [
(next(rows_a)[0], next(rows_b)[0])
for i in range(0, 500)
]
self.assertEqual(rows, [('some-text-a','some-text-b')] * 500)
with sqlite_s3_query('http://localhost:9000/my-bucket/my.db', get_credentials=lambda now: (
'us-east-1',
'AKIAIOSFODNN7EXAMPLE',
'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
None,
)) as query:
db = get_db([
"CREATE TABLE my_table (my_col_a text, my_col_b text);",
] + [
"INSERT INTO my_table VALUES " + ','.join(["('some-new-a', 'some-new-b')"] * 500),
])
put_object_with_versioning('my-bucket', 'my.db', db)
with query('SELECT my_col_a FROM my_table') as (columns, rows):
rows = list(rows)
self.assertEqual(rows, [('some-text-a',)] * 500)
with self.assertRaisesRegex(Exception, 'Attempting to use finalized statement'):
with sqlite_s3_query('http://localhost:9000/my-bucket/my.db', get_credentials=lambda now: (
'us-east-1',
'AKIAIOSFODNN7EXAMPLE',
'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
None,
)) as query:
with query('SELECT my_col_a FROM my_table') as (columns, rows):
for row in rows:
break
next(rows)
with self.assertRaisesRegex(Exception, 'Attempting to use finalized statement'):
with sqlite_s3_query('http://localhost:9000/my-bucket/my.db', get_credentials=lambda now: (
'us-east-1',
'AKIAIOSFODNN7EXAMPLE',
'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
None,
)) as query:
with query('SELECT my_col_a FROM my_table') as (columns, rows):
pass
next(rows)
def test_select_multi(self):
db = get_db([
"CREATE TABLE my_table (my_col_a text, my_col_b text);",
] + [
"INSERT INTO my_table VALUES " + ','.join(["('some-text-a', 'some-text-b')"] * 500),
])
put_object_with_versioning('my-bucket', 'my.db', db)
with sqlite_s3_query_multi('http://localhost:9000/my-bucket/my.db', get_credentials=lambda now: (
'us-east-1',
'AKIAIOSFODNN7EXAMPLE',
'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
None,
)) as query:
rows_list = [
list(rows)
for (columns, rows) in query('''
SELECT my_col_a FROM my_table;
SELECT my_col_a FROM my_table LIMIT 10;
''')
]
self.assertEqual(rows_list, [[('some-text-a',)] * 500, [('some-text-a',)] * 10])
with self.assertRaisesRegex(Exception, 'Just after creating context'):
with sqlite_s3_query_multi('http://localhost:9000/my-bucket/my.db', get_credentials=lambda now: (
'us-east-1',
'AKIAIOSFODNN7EXAMPLE',
'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
None,
)) as query:
raise Exception('Just after creating context')
with self.assertRaisesRegex(Exception, 'Just after iterating statements'):
with sqlite_s3_query_multi('http://localhost:9000/my-bucket/my.db', get_credentials=lambda now: (
'us-east-1',
'AKIAIOSFODNN7EXAMPLE',
'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
None,
)) as query:
for (columns, rows) in query('''
SELECT my_col_a FROM my_table;
SELECT my_col_a FROM my_table LIMIT 10;
'''):
raise Exception('Just after iterating statements')
with self.assertRaisesRegex(Exception, 'Just after iterating first row'):
with sqlite_s3_query_multi('http://localhost:9000/my-bucket/my.db', get_credentials=lambda now: (
'us-east-1',
'AKIAIOSFODNN7EXAMPLE',
'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
None,
)) as query:
for (columns, rows) in query('''
SELECT my_col_a FROM my_table;
SELECT my_col_a FROM my_table LIMIT 10;
'''):
for row in rows:
raise Exception('Just after iterating first row')
with self.assertRaisesRegex(Exception, 'Multiple open statements'):
with sqlite_s3_query_multi('http://localhost:9000/my-bucket/my.db', get_credentials=lambda now: (
'us-east-1',
'AKIAIOSFODNN7EXAMPLE',
'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
None,
)) as query:
it = iter(query('''
SELECT my_col_a FROM my_table;
SELECT my_col_a FROM my_table LIMIT 10;
'''))
columns_1, rows_1 = next(it)
for row in rows_1:
break
columns_2, rows_2 = next(it)
for row in rows_2:
raise Exception('Multiple open statements')
with self.assertRaisesRegex(Exception, 'Attempting to use finalized statement'):
with sqlite_s3_query_multi('http://localhost:9000/my-bucket/my.db', get_credentials=lambda now: (
'us-east-1',
'AKIAIOSFODNN7EXAMPLE',
'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
None,
)) as query:
for columns, rows in query('''
SELECT my_col_a FROM my_table;
SELECT my_col_a FROM my_table LIMIT 10;
'''):
pass
rows_list = list(rows)
def test_placeholder(self):
db = get_db([
"CREATE TABLE my_table (my_col_a text, my_col_b text);",
] + [
"INSERT INTO my_table VALUES ('a','b'),('c','d')",
])
put_object_with_versioning('my-bucket', 'my.db', db)
with sqlite_s3_query('http://localhost:9000/my-bucket/my.db', get_credentials=lambda now: (
'us-east-1',
'AKIAIOSFODNN7EXAMPLE',
'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
None,
)) as query:
with query("SELECT my_col_a FROM my_table WHERE my_col_b = ?", params=(('d',))) as (columns, rows):
rows = list(rows)
self.assertEqual(rows, [('c',)])
def test_partial(self):
db = get_db([
"CREATE TABLE my_table (my_col_a text, my_col_b text);",
] + [
"INSERT INTO my_table VALUES ('a','b'),('c','d')",
])
put_object_with_versioning('my-bucket', 'my.db', db)
query_my_db = functools.partial(sqlite_s3_query,
url='http://localhost:9000/my-bucket/my.db',
get_credentials=lambda now: (
'us-east-1',
'AKIAIOSFODNN7EXAMPLE',
'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
None,
)
)
with query_my_db() as query:
with query("SELECT my_col_a FROM my_table WHERE my_col_b = ?", params=(('d',))) as (columns, rows):
rows = list(rows)
self.assertEqual(rows, [('c',)])
def test_time_and_non_python_identifier(self):
db = get_db(["CREATE TABLE my_table (my_col_a text, my_col_b text);"])
put_object_with_versioning('my-bucket', 'my.db', db)
with sqlite_s3_query('http://localhost:9000/my-bucket/my.db', get_credentials=lambda now: (
'us-east-1',
'AKIAIOSFODNN7EXAMPLE',
'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
None,
)) as query:
now = datetime.datetime.utcnow()
with query("SELECT date('now'), time('now')") as (columns, rows):
rows = list(rows)
self.assertEqual(rows, [(now.strftime('%Y-%m-%d'), now.strftime('%H:%M:%S'))])
self.assertEqual(columns, ("date('now')", "time('now')"))
def test_non_existant_table(self):
db = get_db(["CREATE TABLE my_table (my_col_a text, my_col_b text);"])
put_object_with_versioning('my-bucket', 'my.db', db)
with sqlite_s3_query('http://localhost:9000/my-bucket/my.db', get_credentials=lambda now: (
'us-east-1',
'AKIAIOSFODNN7EXAMPLE',
'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
None,
)) as query:
with self.assertRaisesRegex(Exception, 'no such table: non_table'):
query("SELECT * FROM non_table").__enter__()
def test_empty_object(self):
db = get_db(["CREATE TABLE my_table (my_col_a text, my_col_b text);"])
put_object_with_versioning('my-bucket', 'my.db', b'')
with self.assertRaisesRegex(Exception, 'disk I/O error'):
sqlite_s3_query('http://localhost:9000/my-bucket/my.db', get_credentials=lambda now: (
'us-east-1',
'AKIAIOSFODNN7EXAMPLE',
'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
None,
)).__enter__()
def test_bad_db_header(self):
db = get_db(["CREATE TABLE my_table (my_col_a text, my_col_b text);"])
put_object_with_versioning('my-bucket', 'my.db', b'*' * 100)
with sqlite_s3_query('http://localhost:9000/my-bucket/my.db', get_credentials=lambda now: (
'us-east-1',
'AKIAIOSFODNN7EXAMPLE',
'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
None,
)) as query:
with self.assertRaisesRegex(Exception, 'disk I/O error'):
query("SELECT * FROM non_table").__enter__()
def test_bad_db_second_half(self):
db = get_db(["CREATE TABLE my_table (my_col_a text, my_col_b text);"] + [
"INSERT INTO my_table VALUES " + ','.join(["('some-text-a', 'some-text-b')"] * 500),
] * 10)
half_len = int(len(db) / 2)
db = db[:half_len] + len(db[half_len:]) * b'-'
put_object_with_versioning('my-bucket', 'my.db', db)
with sqlite_s3_query('http://localhost:9000/my-bucket/my.db', get_credentials=lambda now: (
'us-east-1',
'AKIAIOSFODNN7EXAMPLE',
'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
None,
)) as query:
with self.assertRaisesRegex(Exception, 'database disk image is malformed'):
with query("SELECT * FROM my_table") as (columns, rows):
list(rows)
def test_num_connections(self):
num_connections = 0
@contextmanager
def server():
nonlocal num_connections
def _run(server_sock):
nonlocal num_connections
while True:
try:
downstream_sock, _ = server_sock.accept()
except Exception:
break
num_connections += 1
connection_t = threading.Thread(target=handle_downstream, args=(downstream_sock,))
connection_t.start()
with shutdown(get_new_socket()) as server_sock:
server_sock.bind(('127.0.0.1', 9001))
server_sock.listen(socket.IPPROTO_TCP)
threading.Thread(target=_run, args=(server_sock,)).start()
yield server_sock
def get_http_client():
@contextmanager
def client():
with httpx.Client() as original_client:
class Client():
def stream(self, method, url, params, headers):
parsed_url = urllib.parse.urlparse(url)
url = urllib.parse.urlunparse(parsed_url._replace(netloc='localhost:9001'))
return original_client.stream(method, url, params=params, headers=headers + (('host', 'localhost:9000'),))
yield Client()
return client()
with server() as server_sock:
db = get_db([
"CREATE TABLE my_table (my_col_a text, my_col_b text);",
] + [
"INSERT INTO my_table VALUES " + ','.join(["('some-text-a', 'some-text-b')"] * 500),
])
put_object_with_versioning('my-bucket', 'my.db', db)
with sqlite_s3_query('http://localhost:9000/my-bucket/my.db', get_credentials=lambda now: (
'us-east-1',
'AKIAIOSFODNN7EXAMPLE',
'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
None,
), get_http_client=get_http_client) as query:
with query('SELECT my_col_a FROM my_table') as (columns, rows):
rows = list(rows)
self.assertEqual(rows, [('some-text-a',)] * 500)
self.assertEqual(num_connections, 1)
def test_streaming(self):
rows_count = 0
rows_yielded_at_request = []
def get_http_client():
@contextmanager
def client():
with httpx.Client() as original_client:
class Client():
@contextmanager
def stream(self, method, url, params, headers):
rows_yielded_at_request.append(
(rows_count, dict(headers).get('range'))
)
with original_client.stream(method, url,
params=params, headers=headers
) as response:
yield response
yield Client()
return client()
db = get_db([
"PRAGMA page_size = 4096;",
"CREATE TABLE my_table (my_col_a text, my_col_b text);",
] + [
"INSERT INTO my_table VALUES " + ','.join(["('some-text-a', 'some-text-b')"] * 500),
])
put_object_with_versioning('my-bucket', 'my.db', db)
with sqlite_s3_query('http://localhost:9000/my-bucket/my.db', get_credentials=lambda now: (
'us-east-1',
'AKIAIOSFODNN7EXAMPLE',
'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
None,
), get_http_client=get_http_client) as query:
with query('SELECT my_col_a FROM my_table') as (cols, rows):
for row in rows:
rows_count += 1
self.assertIn(rows_yielded_at_request, ([
(0, None),
(0, 'bytes=0-99'),
(0, 'bytes=0-4095'),
(0, 'bytes=24-39'), # For older SQLite that doesn't support immutable files
(0, 'bytes=4096-8191'),
(0, 'bytes=8192-12287'),
(140, 'bytes=12288-16383'),
(276, 'bytes=16384-20479'),
(412, 'bytes=20480-24575'),
], [
(0, None),
(0, 'bytes=0-99'),
(0, 'bytes=0-4095'),
(0, 'bytes=4096-8191'),
(0, 'bytes=8192-12287'),
(140, 'bytes=12288-16383'),
(276, 'bytes=16384-20479'),
(412, 'bytes=20480-24575'),
]))
# Documenting the difference with the above and a query that is not streaming. In this
# case, a query with an ORDER BY on a column that does not have an index requires SQLite to
# fetch all the pages before yielding any rows to client code
rows_count = 0
rows_yielded_at_request.clear()
with sqlite_s3_query('http://localhost:9000/my-bucket/my.db', get_credentials=lambda now: (
'us-east-1',
'AKIAIOSFODNN7EXAMPLE',
'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
None,
), get_http_client=get_http_client) as query:
with query('SELECT my_col_a FROM my_table ORDER BY my_col_a') as (cols, rows):
for row in rows:
rows_count += 1
self.assertIn(rows_yielded_at_request, ([
(0, None),
(0, 'bytes=0-99'),
(0, 'bytes=0-4095'),
(0, 'bytes=24-39'), # For older SQLite that doesn't support immutable files
(0, 'bytes=4096-8191'),
(0, 'bytes=8192-12287'),
(0, 'bytes=12288-16383'),
(0, 'bytes=16384-20479'),
(0, 'bytes=20480-24575'),
], [
(0, None),
(0, 'bytes=0-99'),
(0, 'bytes=0-4095'),
(0, 'bytes=4096-8191'),
(0, 'bytes=8192-12287'),
(0, 'bytes=12288-16383'),
(0, 'bytes=16384-20479'),
(0, 'bytes=20480-24575'),
]))
def test_too_many_bytes(self):
@contextmanager
def server():
def _run(server_sock):
while True:
try:
downstream_sock, _ = server_sock.accept()
except Exception:
break
connection_t = threading.Thread(target=handle_downstream, args=(downstream_sock,))
connection_t.start()
with shutdown(get_new_socket()) as server_sock:
server_sock.bind(('127.0.0.1', 9001))
server_sock.listen(socket.IPPROTO_TCP)
threading.Thread(target=_run, args=(server_sock,)).start()
yield server_sock
def get_http_client():
@contextmanager
def client():
with httpx.Client() as original_client:
class Client():
@contextmanager
def stream(self, method, url, params, headers):
parsed_url = urllib.parse.urlparse(url)
url = urllib.parse.urlunparse(parsed_url._replace(netloc='localhost:9001'))
range_query = dict(headers).get('range')
is_query = range_query and range_query != 'bytes=0-99'
with original_client.stream(method, url,
params=params, headers=headers + (('host', 'localhost:9000'),)
) as response:
chunks = response.iter_bytes()
def iter_bytes(chunk_size=None):
yield from chunks
if is_query:
yield b'e'
response.iter_bytes = iter_bytes
yield response
yield Client()
return client()
with server() as server_sock:
db = get_db([
"CREATE TABLE my_table (my_col_a text, my_col_b text);",
] + [
"INSERT INTO my_table VALUES " + ','.join(["('some-text-a', 'some-text-b')"] * 500),
])
put_object_with_versioning('my-bucket', 'my.db', db)
with sqlite_s3_query('http://localhost:9000/my-bucket/my.db', get_credentials=lambda now: (
'us-east-1',
'AKIAIOSFODNN7EXAMPLE',
'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
None,
), get_http_client=get_http_client) as query:
with self.assertRaisesRegex(Exception, 'disk I/O error'):
query('SELECT my_col_a FROM my_table').__enter__()
def test_disconnection(self):
@contextmanager
def server():
def _run(server_sock):
while True:
try:
downstream_sock, _ = server_sock.accept()
except Exception:
break
downstream_sock.close()
connection_t = threading.Thread(target=handle_downstream, args=(downstream_sock,))
connection_t.start()
with shutdown(get_new_socket()) as server_sock:
server_sock.bind(('127.0.0.1', 9001))
server_sock.listen(socket.IPPROTO_TCP)
threading.Thread(target=_run, args=(server_sock,)).start()
yield server_sock
def get_http_client():
@contextmanager
def client():
with httpx.Client() as original_client:
class Client():
def stream(self, method, url, headers, params):
parsed_url = urllib.parse.urlparse(url)
url = urllib.parse.urlunparse(parsed_url._replace(netloc='localhost:9001'))
return original_client.stream(method, url, headers=headers + (('host', 'localhost:9000'),))
yield Client()
return client()
db = get_db([
"CREATE TABLE my_table (my_col_a text, my_col_b text);",
] + [
"INSERT INTO my_table VALUES " + ','.join(["('some-text-a', 'some-text-b')"] * 500),
])
put_object_with_versioning('my-bucket', 'my.db', db)
with server() as server_sock:
with self.assertRaisesRegex(Exception, 'Connection'):
sqlite_s3_query('http://localhost:9000/my-bucket/my.db', get_credentials=lambda now: (
'us-east-1',
'AKIAIOSFODNN7EXAMPLE',
'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
None,
), get_http_client=get_http_client).__enter__()
def put_object_without_versioning(bucket, key, content):
create_bucket(bucket)
url = f'http://127.0.0.1:9000/{bucket}/{key}'
body_hash = hashlib.sha256(content).hexdigest()
parsed_url = urllib.parse.urlsplit(url)
headers = aws_sigv4_headers(
'AKIAIOSFODNN7EXAMPLE', 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
(), 's3', 'us-east-1', parsed_url.netloc, 'PUT', parsed_url.path, (), body_hash,
)
response = httpx.put(url, content=content, headers=headers)
response.raise_for_status()
def put_object_with_versioning(bucket, key, content):
create_bucket(bucket)
enable_versioning(bucket)
url = f'http://127.0.0.1:9000/{bucket}/{key}'
body_hash = hashlib.sha256(content).hexdigest()
parsed_url = urllib.parse.urlsplit(url)
headers = aws_sigv4_headers(
'AKIAIOSFODNN7EXAMPLE', 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
(), 's3', 'us-east-1', parsed_url.netloc, 'PUT', parsed_url.path, (), body_hash,
)
response = httpx.put(url, content=content, headers=headers)
response.raise_for_status()
def create_bucket(bucket):
url = f'http://127.0.0.1:9000/{bucket}/'
content = b''
body_hash = hashlib.sha256(content).hexdigest()
parsed_url = urllib.parse.urlsplit(url)
headers = aws_sigv4_headers(
'AKIAIOSFODNN7EXAMPLE', 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
(), 's3', 'us-east-1', parsed_url.netloc, 'PUT', parsed_url.path, (), body_hash,
)
response = httpx.put(url, content=content, headers=headers)
def enable_versioning(bucket):
content = '''
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>Enabled</Status>
</VersioningConfiguration>
'''.encode()
url = f'http://127.0.0.1:9000/{bucket}/?versioning'
body_hash = hashlib.sha256(content).hexdigest()
parsed_url = urllib.parse.urlsplit(url)
headers = aws_sigv4_headers(
'AKIAIOSFODNN7EXAMPLE', 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY',
(), 's3', 'us-east-1', parsed_url.netloc, 'PUT', parsed_url.path, (('versioning', ''),), body_hash,
)
response = httpx.put(url, content=content, headers=headers)
response.raise_for_status()
def aws_sigv4_headers(access_key_id, secret_access_key, pre_auth_headers,
service, region, host, method, path, params, body_hash):
algorithm = 'AWS4-HMAC-SHA256'
now = datetime.datetime.utcnow()
amzdate = now.strftime('%Y%m%dT%H%M%SZ')
datestamp = now.strftime('%Y%m%d')
credential_scope = f'{datestamp}/{region}/{service}/aws4_request'
pre_auth_headers_lower = tuple((
(header_key.lower(), ' '.join(header_value.split()))
for header_key, header_value in pre_auth_headers
))
required_headers = (
('host', host),
('x-amz-content-sha256', body_hash),
('x-amz-date', amzdate),
)
headers = sorted(pre_auth_headers_lower + required_headers)
signed_headers = ';'.join(key for key, _ in headers)
def signature():
def canonical_request():
canonical_uri = urllib.parse.quote(path, safe='/~')
quoted_params = sorted(
(urllib.parse.quote(key, safe='~'), urllib.parse.quote(value, safe='~'))
for key, value in params
)
canonical_querystring = '&'.join(f'{key}={value}' for key, value in quoted_params)
canonical_headers = ''.join(f'{key}:{value}\n' for key, value in headers)
return f'{method}\n{canonical_uri}\n{canonical_querystring}\n' + \
f'{canonical_headers}\n{signed_headers}\n{body_hash}'
def sign(key, msg):
return hmac.new(key, msg.encode('ascii'), hashlib.sha256).digest()
string_to_sign = f'{algorithm}\n{amzdate}\n{credential_scope}\n' + \
hashlib.sha256(canonical_request().encode('ascii')).hexdigest()
date_key = sign(('AWS4' + secret_access_key).encode('ascii'), datestamp)
region_key = sign(date_key, region)
service_key = sign(region_key, service)
request_key = sign(service_key, 'aws4_request')
return sign(request_key, string_to_sign).hex()
return (
(b'authorization', (
f'{algorithm} Credential={access_key_id}/{credential_scope}, '
f'SignedHeaders={signed_headers}, Signature=' + signature()).encode('ascii')
),
(b'x-amz-date', amzdate.encode('ascii')),
(b'x-amz-content-sha256', body_hash.encode('ascii')),
) + pre_auth_headers
def get_db(sqls):
with tempfile.NamedTemporaryFile() as fp:
with sqlite3.connect(fp.name, isolation_level=None) as con:
cur = con.cursor()
for sql in sqls:
cur.execute(sql)
with open(fp.name, 'rb') as f:
return f.read()
def get_new_socket():
sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM,
proto=socket.IPPROTO_TCP)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
return sock
def upstream_connect():
upstream_sock = socket.create_connection(('127.0.0.1', 9000))
upstream_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
return upstream_sock
@contextmanager
def shutdown(sock):
try:
yield sock
finally:
try:
sock.shutdown(socket.SHUT_RDWR)
except OSError:
pass
finally:
sock.close()
def proxy(done, source, target):
try:
chunk = source.recv(1)
while chunk:
target.sendall(chunk)
chunk = source.recv(1)
except OSError:
pass
finally:
done.set()
def handle_downstream(downstream_sock):
with \
shutdown(upstream_connect()) as upstream_sock, \
shutdown(downstream_sock) as downstream_sock:
done = threading.Event()
threading.Thread(target=proxy, args=(done, upstream_sock, downstream_sock)).start()
threading.Thread(target=proxy, args=(done, downstream_sock, upstream_sock)).start()
done.wait()
|
web.py
|
from flask import Flask,request
from requests import get,post
import threading
import os
from main import start_bot
status =""
app = Flask(__name__)
SITE_NAME = 'http://127.0.0.1:8080/'
@app.route('/jsonrpc',methods=['POST'])
def proxypost():
path="jsonrpc"
#print("post")
#print(f'{SITE_NAME}{path}')
url=f'{SITE_NAME}{path}?'
#print(request.form)
student = request.data
#print(student)
#获取到POST过来的数据,因为我这里传过来的数据需要转换一下编码。根据晶具体情况而定
return (post(url=url,data=student).content)
@app.route('/', methods=['GET'])
def index():
global status
if status=="":
t1 = threading.Thread(target=start_bot) # 通过target指定子线程要执行的任务。可以通过args=元组 来指定test1的参数。
t1.start() # 只有在调用start方法后才会创建子线程并执行
print(t1.is_alive())
status=t1
# threading.enumerate() 打印正在执行的线程,包括主线程和子线程
#print(threading.enumerate())
return "正在唤醒Bot", 200
else:
print(status.is_alive())
if status.is_alive()==True:
return "Bot 已经在运行", 200
elif status.is_alive()==False:
t1 = threading.Thread(target=start_bot) # 通过target指定子线程要执行的任务。可以通过args=元组 来指定test1的参数。
t1.start() # 只有在调用start方法后才会创建子线程并执行
print(t1.is_alive())
status=t1
return "重新唤醒Bot", 200
@app.route('/jsonrpc/',methods=['GET'])
def proxyget():
path="jsonrpc"
#print(f'{SITE_NAME}{path}')
url=f'{SITE_NAME}{path}?'
#print(request.args)
par=request.args
#http://127.0.0.1:5000/jsonrpc?jsonrpc=2.0&method=aria2.getGlobalStat&id=QXJpYU5nXzE2MTM4ODAwNTBfMC44NTY2NjkzOTUyMjEzNDg3¶ms=WyJ0b2tlbjp3Y3k5ODE1MSJd&
return get(url=url,params=par).content
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port, debug=False)
|
lambda_executors.py
|
import os
import re
import json
import time
import logging
import threading
import subprocess
# from datetime import datetime
from multiprocessing import Process, Queue
try:
from shlex import quote as cmd_quote
except ImportError:
# for Python 2.7
from pipes import quote as cmd_quote
from localstack import config
from localstack.utils.common import run, TMP_FILES, short_uid, save_file, to_str, cp_r
from localstack.services.install import INSTALL_PATH_LOCALSTACK_FAT_JAR
# constants
LAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR
LAMBDA_EXECUTOR_CLASS = 'cloud.localstack.LambdaExecutor'
EVENT_FILE_PATTERN = '%s/lambda.event.*.json' % config.TMP_FOLDER
LAMBDA_RUNTIME_PYTHON27 = 'python2.7'
LAMBDA_RUNTIME_PYTHON36 = 'python3.6'
LAMBDA_RUNTIME_NODEJS = 'nodejs'
LAMBDA_RUNTIME_NODEJS610 = 'nodejs6.10'
LAMBDA_RUNTIME_NODEJS810 = 'nodejs8.10'
LAMBDA_RUNTIME_JAVA8 = 'java8'
LAMBDA_RUNTIME_DOTNETCORE2 = 'dotnetcore2.0'
LAMBDA_RUNTIME_GOLANG = 'go1.x'
LAMBDA_EVENT_FILE = 'event_file.json'
# logger
LOG = logging.getLogger(__name__)
# maximum time a pre-allocated container can sit idle before getting killed
MAX_CONTAINER_IDLE_TIME = 600
class LambdaExecutor(object):
""" Base class for Lambda executors. Subclasses must overwrite the execute method """
def __init__(self):
pass
def execute(self, func_arn, func_details, event, context=None, version=None, asynchronous=False):
raise Exception('Not implemented.')
def startup(self):
pass
def cleanup(self, arn=None):
pass
def run_lambda_executor(self, cmd, env_vars={}, asynchronous=False):
process = run(cmd, asynchronous=True, stderr=subprocess.PIPE, outfile=subprocess.PIPE, env_vars=env_vars)
if asynchronous:
result = '{"asynchronous": "%s"}' % asynchronous
log_output = 'Lambda executed asynchronously'
else:
return_code = process.wait()
result = to_str(process.stdout.read())
log_output = to_str(process.stderr.read())
if return_code != 0:
raise Exception('Lambda process returned error status code: %s. Output:\n%s' %
(return_code, log_output))
return result, log_output
# holds information about an existing container.
class ContainerInfo:
"""
Contains basic information about a docker container.
"""
def __init__(self, name, entry_point):
self.name = name
self.entry_point = entry_point
class LambdaExecutorContainers(LambdaExecutor):
""" Abstract executor class for executing Lambda functions in Docker containers """
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
raise Exception('Not implemented')
def execute(self, func_arn, func_details, event, context=None, version=None, asynchronous=False):
lambda_cwd = func_details.cwd
runtime = func_details.runtime
handler = func_details.handler
environment = func_details.envvars.copy()
# configure USE_SSL in environment
if config.USE_SSL:
environment['USE_SSL'] = '1'
# prepare event body
if not event:
LOG.warning('Empty event body specified for invocation of Lambda "%s"' % func_arn)
event = {}
event_body = json.dumps(event)
event_body_escaped = event_body.replace("'", "\\'")
docker_host = config.DOCKER_HOST_FROM_CONTAINER
# amend the environment variables for execution
environment['AWS_LAMBDA_EVENT_BODY'] = event_body_escaped
environment['HOSTNAME'] = docker_host
environment['LOCALSTACK_HOSTNAME'] = docker_host
if context:
environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name
environment['AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version
environment['AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn
# custom command to execute in the container
command = ''
# if running a Java Lambda, set up classpath arguments
if runtime == LAMBDA_RUNTIME_JAVA8:
# copy executor jar into temp directory
cp_r(LAMBDA_EXECUTOR_JAR, lambda_cwd)
# TODO cleanup once we have custom Java Docker image
taskdir = '/var/task'
save_file(os.path.join(lambda_cwd, LAMBDA_EVENT_FILE), event_body)
command = ("bash -c 'cd %s; java -cp .:`ls *.jar | tr \"\\n\" \":\"` \"%s\" \"%s\" \"%s\"'" %
(taskdir, LAMBDA_EXECUTOR_CLASS, handler, LAMBDA_EVENT_FILE))
# determine the command to be executed (implemented by subclasses)
cmd = self.prepare_execution(func_arn, environment, runtime, command, handler, lambda_cwd)
# lambci writes the Lambda result to stdout and logs to stderr, fetch it from there!
LOG.debug('Running lambda cmd: %s' % cmd)
result, log_output = self.run_lambda_executor(cmd, environment, asynchronous)
LOG.debug('Lambda result / log output:\n%s\n>%s' % (result.strip(), log_output.strip().replace('\n', '\n> ')))
return result, log_output
class LambdaExecutorReuseContainers(LambdaExecutorContainers):
""" Executor class for executing Lambda functions in re-usable Docker containers """
def __init__(self):
super(LambdaExecutorReuseContainers, self).__init__()
# keeps track of each function arn and the last time it was invoked
self.function_invoke_times = {}
# locking thread for creation/destruction of docker containers.
self.docker_container_lock = threading.RLock()
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
# check whether the Lambda has been invoked before
has_been_invoked_before = func_arn in self.function_invoke_times
# set the invocation time
self.function_invoke_times[func_arn] = time.time()
# create/verify the docker container is running.
LOG.debug('Priming docker container with runtime "%s" and arn "%s".', runtime, func_arn)
container_info = self.prime_docker_container(runtime, func_arn, env_vars.items(), lambda_cwd)
# Note: currently "docker exec" does not support --env-file, i.e., environment variables can only be
# passed directly on the command line, using "-e" below. TODO: Update this code once --env-file is
# available for docker exec, to better support very large Lambda events (very long environment values)
exec_env_vars = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
if not command:
command = '%s %s' % (container_info.entry_point, handler)
# determine files to be copied into the container
copy_command = ''
event_file = os.path.join(lambda_cwd, LAMBDA_EVENT_FILE)
if not has_been_invoked_before:
# if this is the first invocation: copy the entire folder into the container
copy_command = 'docker cp "%s/." "%s:/var/task"; ' % (lambda_cwd, container_info.name)
elif os.path.exists(event_file):
# otherwise, copy only the event file if it exists
copy_command = 'docker cp "%s" "%s:/var/task"; ' % (event_file, container_info.name)
cmd = (
'%s' # copy files command
'docker exec'
' %s' # env variables
' %s' # container name
' %s' # run cmd
) % (copy_command, exec_env_vars, container_info.name, command)
return cmd
def startup(self):
self.cleanup()
# start a process to remove idle containers
self.start_idle_container_destroyer_interval()
def cleanup(self, arn=None):
if arn:
self.function_invoke_times.pop(arn, None)
return self.destroy_docker_container(arn)
self.function_invoke_times = {}
return self.destroy_existing_docker_containers()
def prime_docker_container(self, runtime, func_arn, env_vars, lambda_cwd):
"""
Prepares a persistent docker container for a specific function.
:param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc.
:param func_arn: The ARN of the lambda function.
:param env_vars: The environment variables for the lambda.
:param lambda_cwd: The local directory containing the code for the lambda function.
:return: ContainerInfo class containing the container name and default entry point.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
LOG.debug('Priming docker container: %s' % container_name)
status = self.get_docker_container_status(func_arn)
# Container is not running or doesn't exist.
if status < 1:
# Make sure the container does not exist in any form/state.
self.destroy_docker_container(func_arn)
env_vars_str = ' '.join(['-e {}={}'.format(k, cmd_quote(v)) for (k, v) in env_vars])
network = config.LAMBDA_DOCKER_NETWORK
network_str = ' --network="%s" ' % network if network else ''
# Create and start the container
LOG.debug('Creating container: %s' % container_name)
cmd = (
'docker create'
' --name "%s"'
' --entrypoint /bin/bash' # Load bash when it starts.
' --interactive' # Keeps the container running bash.
' -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY"'
' -e HOSTNAME="$HOSTNAME"'
' -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME"'
' %s' # env_vars
' %s' # network
' lambci/lambda:%s'
) % (container_name, env_vars_str, network_str, runtime)
LOG.debug(cmd)
run(cmd, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
LOG.debug('Copying files to container "%s" from "%s".' % (container_name, lambda_cwd))
cmd = (
'docker cp'
' "%s/." "%s:/var/task"'
) % (lambda_cwd, container_name)
LOG.debug(cmd)
run(cmd, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
LOG.debug('Starting container: %s' % container_name)
cmd = 'docker start %s' % (container_name)
LOG.debug(cmd)
run(cmd, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
# give the container some time to start up
time.sleep(1)
# Get the entry point for the image.
LOG.debug('Getting the entrypoint for image: lambci/lambda:%s' % runtime)
cmd = (
'docker image inspect'
' --format="{{ .ContainerConfig.Entrypoint }}"'
' lambci/lambda:%s'
) % (runtime)
LOG.debug(cmd)
run_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
entry_point = run_result.strip('[]\n\r ')
container_network = self.get_docker_container_network(func_arn)
LOG.debug('Using entrypoint "%s" for container "%s" on network "%s".'
% (entry_point, container_name, container_network))
return ContainerInfo(container_name, entry_point)
def destroy_docker_container(self, func_arn):
"""
Stops and/or removes a docker container for a specific lambda function ARN.
:param func_arn: The ARN of the lambda function.
:return: None
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# Get the container name and id.
container_name = self.get_container_name(func_arn)
if status == 1:
LOG.debug('Stopping container: %s' % container_name)
cmd = (
'docker stop -t0 %s'
) % (container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
status = self.get_docker_container_status(func_arn)
if status == -1:
LOG.debug('Removing container: %s' % container_name)
cmd = (
'docker rm %s'
) % (container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_all_container_names(self):
"""
Returns a list of container names for lambda containers.
:return: A String[] localstack docker container names for each function.
"""
with self.docker_container_lock:
LOG.debug('Getting all lambda containers names.')
cmd = 'docker ps -a --filter="name=localstack_lambda_*" --format "{{.Names}}"'
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE).strip()
if len(cmd_result) > 0:
container_names = cmd_result.split('\n')
else:
container_names = []
return container_names
def destroy_existing_docker_containers(self):
"""
Stops and/or removes all lambda docker containers for localstack.
:return: None
"""
with self.docker_container_lock:
container_names = self.get_all_container_names()
LOG.debug('Removing %d containers.' % len(container_names))
for container_name in container_names:
cmd = 'docker rm -f %s' % container_name
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_docker_container_status(self, func_arn):
"""
Determine the status of a docker container.
:param func_arn: The ARN of the lambda function.
:return: 1 If the container is running,
-1 if the container exists but is not running
0 if the container does not exist.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
# Check if the container is already running.
LOG.debug('Getting container status: %s' % container_name)
cmd = (
'docker ps'
' -a'
' --filter name="%s"'
' --format "{{ .Status }}"'
) % (container_name)
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
# If the container doesn't exist. Create and start it.
container_status = cmd_result.strip()
if len(container_status) == 0:
return 0
if container_status.lower().startswith('up '):
return 1
return -1
def get_docker_container_network(self, func_arn):
"""
Determine the network of a docker container.
:param func_arn: The ARN of the lambda function.
:return: name of the container network
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# container does not exist
if status == 0:
return ''
# Get the container name.
container_name = self.get_container_name(func_arn)
# Get the container network
LOG.debug('Getting container network: %s' % container_name)
cmd = (
'docker inspect %s'
' --format "{{ .HostConfig.NetworkMode }}"'
) % (container_name)
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
container_network = cmd_result.strip()
return container_network
def idle_container_destroyer(self):
"""
Iterates though all the lambda containers and destroys any container that has
been inactive for longer than MAX_CONTAINER_IDLE_TIME.
:return: None
"""
LOG.info('Checking if there are idle containers.')
current_time = time.time()
for func_arn, last_run_time in self.function_invoke_times.items():
duration = current_time - last_run_time
# not enough idle time has passed
if duration < MAX_CONTAINER_IDLE_TIME:
continue
# container has been idle, destroy it.
self.destroy_docker_container(func_arn)
def start_idle_container_destroyer_interval(self):
"""
Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.
Thus checking for idle containers and destroying them.
:return: None
"""
self.idle_container_destroyer()
threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()
def get_container_name(self, func_arn):
"""
Given a function ARN, returns a valid docker container name.
:param func_arn: The ARN of the lambda function.
:return: A docker compatible name for the arn.
"""
return 'localstack_lambda_' + re.sub(r'[^a-zA-Z0-9_.-]', '_', func_arn)
class LambdaExecutorSeparateContainers(LambdaExecutorContainers):
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
entrypoint = ''
if command:
entrypoint = ' --entrypoint ""'
else:
command = '"%s"' % handler
env_vars_string = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
network = config.LAMBDA_DOCKER_NETWORK
network_str = ' --network="%s" ' % network if network else ''
if config.LAMBDA_REMOTE_DOCKER:
cmd = (
'CONTAINER_ID="$(docker create'
' %s'
' %s'
' %s' # network
' "lambci/lambda:%s" %s'
')";'
'docker cp "%s/." "$CONTAINER_ID:/var/task";'
'docker start -a "$CONTAINER_ID";'
) % (entrypoint, env_vars_string, network_str, runtime, command, lambda_cwd)
else:
lambda_cwd_on_host = self.get_host_path_for_path_in_docker(lambda_cwd)
cmd = (
'docker run'
'%s -v "%s":/var/task'
' %s'
' %s' # network
' --rm'
' "lambci/lambda:%s" %s'
) % (entrypoint, lambda_cwd_on_host, env_vars_string, network_str, runtime, command)
return cmd
def get_host_path_for_path_in_docker(self, path):
return re.sub(r'^%s/(.*)$' % config.TMP_FOLDER,
r'%s/\1' % config.HOST_TMP_FOLDER, path)
class LambdaExecutorLocal(LambdaExecutor):
def execute(self, func_arn, func_details, event, context=None, version=None, asynchronous=False):
lambda_cwd = func_details.cwd
environment = func_details.envvars.copy()
# execute the Lambda function in a forked sub-process, sync result via queue
queue = Queue()
lambda_function = func_details.function(version)
def do_execute():
# now we're executing in the child process, safe to change CWD and ENV
if lambda_cwd:
os.chdir(lambda_cwd)
if environment:
os.environ.update(environment)
result = lambda_function(event, context)
queue.put(result)
process = Process(target=do_execute)
process.run()
result = queue.get()
# TODO capture log output during local execution?
log_output = ''
return result, log_output
def execute_java_lambda(self, event, context, handler, main_file):
event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
save_file(event_file, json.dumps(event))
TMP_FILES.append(event_file)
class_name = handler.split('::')[0]
classpath = '%s:%s' % (LAMBDA_EXECUTOR_JAR, main_file)
cmd = 'java -cp %s %s %s %s' % (classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
asynchronous = False
# flip asynchronous flag depending on origin
if 'Records' in event:
# TODO: add more event supporting asynchronous lambda execution
if 'Sns' in event['Records'][0]:
asynchronous = True
if 'dynamodb' in event['Records'][0]:
asynchronous = True
result, log_output = self.run_lambda_executor(cmd, asynchronous=asynchronous)
LOG.debug('Lambda result / log output:\n%s\n> %s' % (result.strip(), log_output.strip().replace('\n', '\n> ')))
return result, log_output
# --------------
# GLOBAL STATE
# --------------
EXECUTOR_LOCAL = LambdaExecutorLocal()
EXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()
EXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()
DEFAULT_EXECUTOR = EXECUTOR_LOCAL
# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable
AVAILABLE_EXECUTORS = {
'local': EXECUTOR_LOCAL,
'docker': EXECUTOR_CONTAINERS_SEPARATE,
'docker-reuse': EXECUTOR_CONTAINERS_REUSE
}
|
Chap10_Example10.22.py
|
from threading import *
def mymsgprint():
mychildthread2 = Thread(target=disp2) # DL2
print("Second Child thread name is", mychildthread2.getName(), end='')
if mychildthread2.daemon:
print(" and is a daemon thread and it's parent name is", mychildt1.getName())
else:
print(" and is a non-daemon thread and its parent name is", mychildt1.getName())
mychildthread2.start()
def disp2():
print("Display2 function")
mythreadobj = current_thread()
print("The main thread name is", mythreadobj.getName(), end='')
if mythreadobj.daemon:
print(" and is a daemon thread.")
else:
print(" and is a non-daemon thread.")
mychildt1 = Thread(target=mymsgprint)
mychildt1.daemon = True # DL1
print("Child thread name is", mychildt1.getName(), end='')
if mychildt1.daemon:
print(" and is a daemon thread.")
else:
print(" and is a non-daemon thread.")
mychildt1.start()
mychildt1.join() # to make main thread idle and wait for child threads to complete.
print("Main Thread Completed!")
|
processor.py
|
import os
import re
import subprocess
import sys
from functools import partial
from threading import Thread
from gooey.gui import events
from gooey.gui.pubsub import pub
from gooey.gui.util.casting import safe_float
from gooey.gui.util.taskkill import taskkill
from gooey.util.functional import unit, bind
class ProcessController(object):
def __init__(self, progress_regex, progress_expr, hide_progress_msg,
encoding, shell=True):
self._process = None
self.progress_regex = progress_regex
self.progress_expr = progress_expr
self.hide_progress_msg = hide_progress_msg
self.encoding = encoding
self.wasForcefullyStopped = False
self.shell_execution = shell
def was_success(self):
self._process.communicate()
return self._process.returncode == 0
def poll(self):
if not self._process:
raise Exception('Not started!')
self._process.poll()
def stop(self):
if self.running():
self.wasForcefullyStopped = True
taskkill(self._process.pid)
def running(self):
return self._process and self.poll() is None
def run(self, command):
self.wasForcefullyStopped = False
env = os.environ.copy()
env["GOOEY"] = "1"
env["PYTHONIOENCODING"] = self.encoding
try:
self._process = subprocess.Popen(
command.encode(sys.getfilesystemencoding()),
bufsize=1, stdout=subprocess.PIPE, stdin=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=self.shell_execution, env=env)
except:
self._process = subprocess.Popen(
command,
bufsize=1, stdout=subprocess.PIPE, stdin=subprocess.PIPE,
stderr = subprocess.STDOUT, shell = self.shell_execution, env=env)
t = Thread(target=self._forward_stdout, args=(self._process,))
t.start()
def _forward_stdout(self, process):
'''
Reads the stdout of `process` and forwards lines and progress
to any interested subscribers
'''
while True:
line = process.stdout.readline()
if not line:
break
_progress = self._extract_progress(line)
pub.send_message(events.PROGRESS_UPDATE, progress=_progress)
if _progress is None or self.hide_progress_msg is False:
pub.send_message(events.CONSOLE_UPDATE,
msg=line.decode(self.encoding))
pub.send_message(events.EXECUTION_COMPLETE)
def _extract_progress(self, text):
'''
Finds progress information in the text using the
user-supplied regex and calculation instructions
'''
# monad-ish dispatch to avoid the if/else soup
find = partial(re.search, string=text.strip().decode(self.encoding))
regex = unit(self.progress_regex)
match = bind(regex, find)
result = bind(match, self._calculate_progress)
return result
def _calculate_progress(self, match):
'''
Calculates the final progress value found by the regex
'''
if not self.progress_expr:
return safe_float(match.group(1))
else:
return self._eval_progress(match)
def _eval_progress(self, match):
'''
Runs the user-supplied progress calculation rule
'''
_locals = {k: safe_float(v) for k, v in match.groupdict().items()}
if "x" not in _locals:
_locals["x"] = [safe_float(x) for x in match.groups()]
try:
return int(eval(self.progress_expr, {}, _locals))
except:
return None
|
box_rpc.py
|
import copy
import datetime
import socket
import subprocess
import threading
import time
try:
from queue import Queue
except ImportError:
from Queue import Queue
import plumbum
import rpyc
from sqlalchemy import sql
from zeroconf import ServiceBrowser, Zeroconf
import config
from web import db_schema
from common import auto_restart, get_routed_ip, max_mtime
class Box(object):
def __init__(self, name, ip, port, properties, deregister_callback):
self.name = name # name of remote box
self.ip = ip # IP address
self.port = port # port on which atles_remote.py is accepting connections
# callback function for this box to deregister itself w/ zeroconf
self.deregister_callback = deregister_callback
# information on git commit status for remote code
self.gitshort = properties[b'gitshort'].decode()
self.gitlong = properties[b'gitlong'].decode()
# does this box have a display for "background images"
self.hasdisplay = properties[b'hasdisplay']
# username for SSH login to remote box
self.user = properties[b'user'].decode()
# paths to track data directories on remote box
self.appdir = properties[b'appdir'].decode()
# build useful paths (assumes same directory structure on remote)
self.trackdir = self.appdir / config.TRACKDIR.relative_to(config.BASEDIR)
self.archivedir = self.appdir / config.ARCHIVEDIR.relative_to(config.BASEDIR)
self.dbgframedir = self.appdir / config.DBGFRAMEDIR.relative_to(config.BASEDIR)
self.error = None # Internal error message, if any
self.local = None # True if box is actually the local machine
self._tunnel = None # SSH tunnel instance
self._rpc = None # RPC connection instance
def get_info(self):
ret = {
'name': self.name,
'ip': self.ip,
'port': self.port,
'user': self.user,
'hasdisplay': self.hasdisplay,
'connected': self.connected,
'gitshort': self.gitshort,
'gitlong': self.gitlong,
'local': self.local,
'error': self.error,
}
if self.connected:
# verify that we actually are connected
self._ping(timeout=2)
lock_data = self.lock_data()
ret.update({
'exp_running': lock_data.get('running'),
'exp_pid': lock_data.get('pid'),
'exp_starttime': lock_data.get('starttime'),
'exp_runtime': lock_data.get('runtime')
})
else:
ret['exp_running'] = False
return ret
def connect(self, done_callback=None):
self.error = "connecting..."
self.local = (self.ip == get_routed_ip())
if not self.local:
# only connect if it's a separate machine
try:
# -oBatchMode=yes to disable password auth and just fail if key auth fails
self._tunnel = plumbum.SshMachine(self.ip, user=self.user, ssh_opts=['-oBatchMode=yes'])
except (plumbum.machines.session.SSHCommsChannel2Error, plumbum.machines.session.SSHCommsError):
self.error = "SSH connection failure"
self._tunnel = None
return
self._rpc = rpyc.ssh_connect(self._tunnel, self.port)
else:
self._rpc = rpyc.connect("localhost", self.port)
self.error = None
if done_callback is not None:
done_callback(self.name)
def down(self, error=None):
if self._rpc:
try:
self._rpc.close()
except AttributeError:
pass # always throws one in Session.close()... bug?
self._rpc = None
if self._tunnel:
self._tunnel.close()
self._tunnel = None
self.error = error
def sync_data(self):
''' Copy/sync track data from this box to the local track directory.'''
print("{}: Starting sync.".format(self.name))
assert self.connected
# If data is already local, no need to sync
assert not self.local
# Double-check that this box isn't running an experiment
if self.lock_exists():
return
# Copy remote files into an archive dir, then have rsync
# delete the originals after the transfer
cp_cmd = self._tunnel["cp"]
res = cp_cmd("-r", self.trackdir, self.archivedir)
print("{}: cp command got: {}".format(self.name, res))
# Currently does *not* copy the debugframes (following line is
# commented), so they will be removed from remote entirely.
#cp_cmd("-r", self.dbgframedir, self.archivedir)
# NOTE: Source must end with / to copy the *contents* of the folder
# instead of copying the source folder into the destination as a new
# folder there.
cmd = ['rsync', '-rvt', '--remove-source-files', '%s@%s:%s/' % (self.user, self.ip, self.trackdir), str(config.TRACKDIR / self.name)]
res = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
print("{}: rsync trackfiles command got: {}".format(self.name, res))
cmd = ['rsync', '-rvt', '--remove-source-files', '%s@%s:%s/' % (self.user, self.ip, self.dbgframedir), str(config.DBGFRAMEDIR / self.name)] # '' to ensure trailing /
res = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
print("{}: rsync dbgframes command got: {}".format(self.name, res))
def _ping(self, timeout):
'''
Attempt to call a function on the server over this connection with a
given timeout (in seconds).
'''
def timeout_close():
self.down("server timed out; connection closed")
# The server didn't deregister itself, so we need to here
# so that zeroconf will properly add the service when it returns.
self.deregister_callback()
timer = threading.Timer(timeout, timeout_close)
timer.start()
try:
self.lock_exists() # any simple function call; return ignored
finally:
timer.cancel()
@property
def connected(self):
return self._rpc and not self._rpc.closed
def __getattr__(self, name):
'''Return something from self.rpc if it wasn't found in this object
directly. Lets us use one object namespace to access both "local"
methods like sync_data() and remote RPC methods.'''
if self.connected and hasattr(self._rpc.root, name):
return getattr(self._rpc.root, name)
else:
# default behavior
raise AttributeError
class BoxManager(object):
def __init__(self, engine):
self._engine = engine
self._boxes = dict()
self._boxlock = threading.Lock()
self._updatequeue = Queue()
# work around a bug in zeroconf on Cygwin
try:
zeroconf = Zeroconf()
except socket.error:
zeroconf = Zeroconf(["0.0.0.0"])
self._browser = ServiceBrowser(zeroconf, "_atlesbox._tcp.local.", self) # starts its own daemon thread
# start separate thread for:
# - polling boxes
t = threading.Thread(target=auto_restart(self._poll_boxes))
t.daemon = True
t.start()
# - handling the explicit update queue
t = threading.Thread(target=auto_restart(self._watch_queue))
t.daemon = True
t.start()
def add_service(self, zeroconf, type, name):
''' Called automatically by ServiceBrowser. '''
info = zeroconf.get_service_info(type, name)
print("Service %s added, service info: %s" % (name, info))
boxname = info.properties[b'name'].decode()
assert boxname == name.split('.')[0]
# make a function for deregistering this box
def deregister():
# Do just enough to make zeroconf register the service
# when it returns.
# (This is used when a box losesconnection without
# deregistering itself.)
del self._browser.services[info.name.lower()]
newbox = Box(name=boxname,
ip=socket.inet_ntoa(info.address),
port=info.port,
properties=info.properties,
deregister_callback=deregister
)
# connect in a separate thread so we don't have to wait for the connection here
threading.Thread(target=newbox.connect, args=[self._updatequeue.put]).start()
with self._boxlock:
self._boxes[boxname] = newbox
self._updatequeue.put(boxname)
def remove_service(self, zeroconf, type, name):
''' Called automatically by ServiceBrowser. '''
print("Service %s removed" % name)
boxname = name.split('.')[0]
with self._boxlock:
self._boxes[boxname].down()
self._updatequeue.put(boxname)
def get_boxes(self):
with self._boxlock:
return copy.copy(self._boxes)
def _update_box_db(self, box, boxinfo, conn):
# add current time to boxinfo
boxinfo['last_updated'] = time.time()
boxes = db_schema.boxes
# check whether this box is in the database yet
select = sql.select([boxes.c.name]).where(boxes.c.name == box)
box_exists = conn.execute(select).scalar()
if box_exists:
# if so, update
update = boxes.update().where(boxes.c.name == box).values(boxinfo)
conn.execute(update)
else:
# if not, insert
insert = boxes.insert(boxinfo)
conn.execute(insert)
def _update_box_datafiles(self, box, boxinfo, conn):
''' Checks for newer datafiles; syncs if any are found. '''
box_rpc = self._boxes[box]
# Get mtimes of latest remote and local data files
latest_remote = box_rpc.max_datafile_mtime()
if latest_remote is None:
# No files present on remote
return
boxtrackdir = config.TRACKDIR / box
latest_local = max_mtime(boxtrackdir)
# *Ugly* hack to "de-netref" the rpyc-returned object
# Otherwise we can't compare it to a real datetime object...
timetuple = list(latest_remote.timetuple())[:6]
timetuple.append(latest_remote.microsecond)
latest_remote = datetime.datetime(*timetuple)
# If remote has newer, sync and update latest local time
if latest_local is None or latest_local < latest_remote:
box_rpc.sync_data()
# check that update occurred
diff = abs(latest_remote - max_mtime(boxtrackdir))
if diff > datetime.timedelta(seconds=1):
# warn w/ simple print for now
print("Warning: sync may not have occurred for box {}. Got time delta {}.".format(box, diff))
def _update_box(self, box, conn):
# get updated box data
with self._boxlock:
if box in self._boxes:
boxinfo = self._boxes[box].get_info()
else:
boxinfo = {'connected': False}
self._update_box_db(box, boxinfo, conn)
if boxinfo['connected'] \
and not boxinfo['local'] \
and not boxinfo['exp_running'] \
and self._boxes[box].connected \
and not self._boxes[box].lock_exists():
self._update_box_datafiles(box, boxinfo, conn)
def _watch_queue(self):
# Runs in its own thread
# Needs a separate sqlite connection for a separate thread
conn = self._engine.connect()
while True:
box = self._updatequeue.get()
self._update_box(box, conn)
def _poll_boxes(self):
# Runs in its own thread
# Needs a separate sqlite connection for a separate thread
conn = self._engine.connect()
boxes = db_schema.boxes
select = sql.select([boxes.c.name])
while True:
# Poll/update all boxes every 2 seconds
box_names = [row['name'] for row in conn.execute(select)]
# quick sanity check: all boxes in our list of RPC objects must be registered in the DB
for box in self._boxes:
assert box in box_names
for box in box_names:
self._updatequeue.put(box)
time.sleep(2)
|
test_socket.py
|
import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import array
import platform
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import string
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8') ## test unicode string and carriage return
try:
import _thread as thread
import threading
except ImportError:
thread = None
threading = None
try:
import _socket
except ImportError:
_socket = None
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_RDS = _have_socket_rds()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
if threading:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceeded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
self.clientSetUp()
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
sock.bind(path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doens't masks failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
self.assertRaises(OSError, socket.gethostbyname, addr)
self.assertRaises(OSError, socket.gethostbyaddr, addr)
for addr in [support.HOST, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOST]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInvalidInterfaceNameIndex(self):
# test nonexistent interface index/name
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
# test with invalid values
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
def testNtoHErrors(self):
good_values = [ 1, 2, 3, 1, 2, 3 ]
bad_values = [ -1, -2, -3, -1, -2, -3 ]
for k in good_values:
socket.ntohl(k)
socket.ntohs(k)
socket.htonl(k)
socket.htons(k)
for k in bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htonl, k)
self.assertRaises(OverflowError, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:7:8:')
assertInvalid('1:2:3:4:5:6:7:8:0')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(OSError, sock.send, b"spam")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test (issue #12804).
try:
socket.gethostbyname('python.org')
except socket.gaierror as e:
if e.errno == socket.EAI_NODATA:
self.skipTest('internet access required for this test')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup choses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not stricly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
srv.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
@unittest.skipIf(os.name == 'nt', 'Will not work on Windows')
def test_uknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
#
# On Windows this trick won't work, so the test is skipped.
fd, _ = tempfile.mkstemp()
with socket.socket(family=42424, type=13331, fileno=fd) as s:
self.assertEqual(s.family, 42424)
self.assertEqual(s.type, 13331)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testCongestion(self):
# wait until the sender is done
self.evt.wait()
def _testCongestion(self):
# test the behavior in case of congestion
self.data = b'fill'
self.cli.setblocking(False)
try:
# try to lower the receiver's socket buffer size
self.cli.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 16384)
except OSError:
pass
with self.assertRaises(OSError) as cm:
try:
# fill the receiver's socket buffer
while True:
self.cli.sendto(self.data, 0, (HOST, self.port))
finally:
# signal the receiver we're done
self.evt.set()
# sendto() should have failed with ENOBUFS
self.assertEqual(cm.exception.errno, errno.ENOBUFS)
# and we should have received a congestion notification through poll
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
with self.assertRaises(socket.timeout):
while True:
self.sendmsgToServer([b"a"*512])
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux2"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
# FreeBSD < 8 doesn't always set the MSG_TRUNC flag when a truncated
# datagram is received (issue #13001).
@support.requires_freebsd_version(8)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
@support.requires_freebsd_version(8)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: None)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
self.addCleanup(self.setAlarm, 0)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises OSError with an
# errno of EINTR when interrupted by a signal.
self.setAlarm(self.alarm_time)
with self.assertRaises(OSError) as cm:
func(*args, **kwargs)
self.assertNotIsInstance(cm.exception, socket.timeout)
self.assertEqual(cm.exception.errno, errno.EINTR)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
@unittest.skipUnless(thread, 'Threading required for this test.')
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
with self.assertRaises(OSError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
self.assertNotIsInstance(cm.exception, socket.timeout)
self.assertEqual(cm.exception.errno, errno.EINTR)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
@unittest.skipUnless(thread, 'Threading required for this test.')
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(True)
self.assertIsNone(self.serv.gettimeout())
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.")
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# reinit server socket
self.serv.close()
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM |
socket.SOCK_NONBLOCK)
self.port = support.bind_port(self.serv)
self.serv.listen()
# actual testing
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error creating with non-blocking mode.")
def _testInitNonBlocking(self):
pass
def testInheritFlags(self):
# Issue #7995: when calling accept() on a listening socket with a
# timeout, the resulting socket should not be non-blocking.
self.serv.settimeout(10)
try:
conn, addr = self.serv.accept()
message = conn.recv(len(MSG))
finally:
conn.close()
self.serv.settimeout(None)
def _testInheritFlags(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
time.sleep(0.5)
self.cli.send(MSG)
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
try:
conn, addr = self.serv.accept()
except OSError:
pass
else:
self.fail("Error trying to do non-blocking accept.")
read, write, err = select.select([self.serv], [], [])
if self.serv in read:
conn, addr = self.serv.accept()
conn.close()
else:
self.fail("Error trying to do accept after select.")
def _testAccept(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
conn.close()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
conn.setblocking(0)
try:
msg = conn.recv(len(MSG))
except OSError:
pass
else:
self.fail("Error trying to do non-blocking recv.")
read, write, err = select.select([conn], [], [])
if conn in read:
msg = conn.recv(len(MSG))
conn.close()
self.assertEqual(msg, MSG)
else:
self.fail("Error during select call to non-blocking socket.")
def _testRecv(self):
self.cli.connect((HOST, self.port))
time.sleep(0.1)
self.cli.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class FileObjectInterruptedTestCase(unittest.TestCase):
"""Test that the file object correctly handles EINTR internally."""
class MockSocket(object):
def __init__(self, recv_funcs=()):
# A generator that returns callables that we'll call for each
# call to recv().
self._recv_step = iter(recv_funcs)
def recv_into(self, buffer):
data = next(self._recv_step)()
assert len(buffer) >= len(data)
buffer[:len(data)] = data
return len(data)
def _decref_socketios(self):
pass
def _textiowrap_for_test(self, buffering=-1):
raw = socket.SocketIO(self, "r")
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
return raw
buffer = io.BufferedReader(raw, buffering)
text = io.TextIOWrapper(buffer, None, None)
text.mode = "rb"
return text
@staticmethod
def _raise_eintr():
raise OSError(errno.EINTR, "interrupted")
def _textiowrap_mock_socket(self, mock, buffering=-1):
raw = socket.SocketIO(mock, "r")
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
return raw
buffer = io.BufferedReader(raw, buffering)
text = io.TextIOWrapper(buffer, None, None)
text.mode = "rb"
return text
def _test_readline(self, size=-1, buffering=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : b"This is the first line\nAnd the sec",
self._raise_eintr,
lambda : b"ond line is here\n",
lambda : b"",
lambda : b"", # XXX(gps): io library does an extra EOF read
])
fo = mock_sock._textiowrap_for_test(buffering=buffering)
self.assertEqual(fo.readline(size), "This is the first line\n")
self.assertEqual(fo.readline(size), "And the second line is here\n")
def _test_read(self, size=-1, buffering=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : b"This is the first line\nAnd the sec",
self._raise_eintr,
lambda : b"ond line is here\n",
lambda : b"",
lambda : b"", # XXX(gps): io library does an extra EOF read
])
expecting = (b"This is the first line\n"
b"And the second line is here\n")
fo = mock_sock._textiowrap_for_test(buffering=buffering)
if buffering == 0:
data = b''
else:
data = ''
expecting = expecting.decode('utf-8')
while len(data) != len(expecting):
part = fo.read(size)
if not part:
break
data += part
self.assertEqual(data, expecting)
def test_default(self):
self._test_readline()
self._test_readline(size=100)
self._test_read()
self._test_read(size=100)
def test_with_1k_buffer(self):
self._test_readline(buffering=1024)
self._test_readline(size=100, buffering=1024)
self._test_read(buffering=1024)
self._test_read(size=100, buffering=1024)
def _test_readline_no_buffer(self, size=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : b"a",
lambda : b"\n",
lambda : b"B",
self._raise_eintr,
lambda : b"b",
lambda : b"",
])
fo = mock_sock._textiowrap_for_test(buffering=0)
self.assertEqual(fo.readline(size), b"a\n")
self.assertEqual(fo.readline(size), b"Bb")
def test_no_buffer(self):
self._test_readline_no_buffer()
self._test_readline_no_buffer(size=4)
self._test_read(buffering=0)
self._test_read(size=100, buffering=0)
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid cloding the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = [ errno.ECONNREFUSED, ]
if hasattr(errno, 'ENETUNREACH'):
expected_errnos.append(errno.ENETUNREACH)
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# plaform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
try:
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
sock.bind(path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as a AF_UNIX path"
.format(path))
else:
raise
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
if not os.path.isfile("/proc/modules"):
return False
with open("/proc/modules") as f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# The is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertTrue(s.type & socket.SOCK_CLOEXEC)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
@unittest.skipUnless(hasattr(socket, "socketpair"),
"need socket.socketpair()")
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertTrue(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), timeout)
else:
self.assertFalse(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), None)
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timout value isn't transfered.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10MB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(self.TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=0.01) as sock, \
file as file:
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(thread, 'Threading required for this test.')
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
FileObjectInterruptedTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
file_server.py
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Define a simple web server for testing purpose.
Used for serves the testing html pages that are needed by the webdriver unit
tests.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import os
import threading
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Any
from urllib.request import URLopener
# External imports
import pytest
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
DEFAULT_HOST = "127.0.0.1"
DEFAULT_PORT = 8000
HTML_ROOT = os.path.dirname(__file__)
WEBDRIVER = os.environ.get('WEBDRIVER', "<undefined>")
__all__ = (
'file_server',
'HtmlOnlyHandler',
'SimpleWebServer',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class HtmlOnlyHandler(BaseHTTPRequestHandler):
"""Http handler."""
def do_GET(self) -> None:
"""GET method handler."""
path = self.path[1:].split("?")[0]
try:
with open(os.path.join(HTML_ROOT, path), mode="rb") as f: # lgtm [py/path-injection]
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(f.read())
except OSError:
self.send_error(404, f"File Not Found: {path}")
def log_message(self, format: str, *args: Any) -> None:
"""Override default to avoid trashing stderr"""
pass
class SimpleWebServer:
"""A very basic web server."""
def __init__(self, host: str = DEFAULT_HOST, port: int = DEFAULT_PORT) -> None:
self.stop_serving = False
while True:
try:
self.server = HTTPServer((host, port), HtmlOnlyHandler)
self.host = host
self.port = port
break
except OSError:
log.debug(f"port {port} is in use, trying to next one")
port += 1
self.thread = threading.Thread(target=self._run_web_server)
def _run_web_server(self) -> None:
"""Runs the server loop."""
log.debug("web server started")
while not self.stop_serving:
self.server.handle_request()
self.server.server_close()
def start(self) -> None:
"""Starts the server."""
self.thread.start()
def stop(self) -> None:
"""Stops the server."""
self.stop_serving = True
try:
# This is to force stop the server loop
URLopener().open(f"http://{self.host}:{self.port}")
except OSError:
pass
log.info("Shutting down the webserver")
self.thread.join()
def where_is(self, path: str) -> str:
return f"http://{self.host}:{self.port}/{path}"
@pytest.fixture(scope='session')
def file_server(request: pytest.FixtureRequest) -> SimpleWebServer:
server = SimpleWebServer()
server.start()
request.addfinalizer(server.stop)
return server
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
_html_root_error_message = "Can't find 'common_web' directory, try setting WEBDRIVER environment variable WEBDRIVER:" + WEBDRIVER + " HTML_ROOT:" + HTML_ROOT
if not os.path.isdir(HTML_ROOT):
log.error(_html_root_error_message)
assert 0, _html_root_error_message
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Taken from
# https://github.com/SeleniumHQ/selenium/blob/52e9d6407248bce5de2b6a73103a50bb0e670c1f/py/test/selenium/webdriver/common/webserver.py
# with small modifications
|
FaceDetection.py
|
import time
import cv2
import numpy as np
import os, sys
import pickle
import nep
import threading
import sharo
show_image = 1
try:
print (sys.argv[1])
show_image = int(sys.argv[1])
print ("Show image: " + show_image)
except:
pass
node = nep.node('face_detection')
sub_image = node.new_sub('robot_image', 'image')
pub_position = node.new_pub('face_positions', 'json')
perception_face = sharo.BooleanPerception(node, "human_detected", "value", 1, 3) # --------------- Sharo ------------------
frame = ""
bounding_boxes = ""
def thread_function(name):
global sub_image, frame
while True:
s, img = sub_image.listen()
if s:
frame = cv2.resize(img, (640,480), interpolation = cv2.INTER_AREA)
face_positions = threading.Thread(target=thread_function, args=(1,))
face_positions.start()
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
model_filename = 'models/haarcascade_frontalface_default.xml'
faceCascade = cv2.CascadeClassifier(model_filename)
time.sleep(1)
people_detected_msg_sended = False
people_non_detected_msg_sended = False
n_detected = 0
non_detected = 0
start_detection = time.time()
start_alone = time.time()
print('Start!')
def draw_bounding_box(face_coordinates, image_array, color):
x, y, w, h = face_coordinates
cv2.rectangle(image_array, (x, y), (x + w, y + h), color, 2)
main_face = [0,0]
nb_faces = 0
timer_faces_yes = 0
timer_faces_no = 0
while True:
newImage = frame.copy()
close = 0
gray = cv2.cvtColor(newImage, cv2.COLOR_BGR2GRAY)
matrix = faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5,minSize=(30, 30),)
# n saves the number of face detected
try:
n,m = matrix.shape
except:
n = 0
if n > 0:
#boxes = [[int(matrix[0,0]), int(matrix[0,1]), int(matrix[0,0]) + int(matrix[0,2]), int(matrix[0,1]) + int(matrix[0,3])]]
# If only 1 face that is the main face
if n == 1:
x_m = int(matrix[close,0]) + int(matrix[close,2]/2)
y_m = int(matrix[close,1]) + int(matrix[close,3]/2)
main_face = [x_m, y_m] # Center of face
# If more than 1 face track the closest one to the main face
else:
dist = 10000000
close = 0 # Index of closest face
for i in range(n):
x_m = int(matrix[i,0]) + int(matrix[i,2]/2)
y_m = int(matrix[i,1]) + int(matrix[i,3]/2)
dist_temp = ((main_face[0] - x_m)**2 + (main_face[1] - y_m)**2)**.5
if dist_temp < dist:
dist = dist_temp
close = i
x_m = int(matrix[close,0]) + int(matrix[close,2]/2)
y_m = int(matrix[close,1]) + int(matrix[close,3]/2)
main_face = [x_m, y_m] # Center of face
boxes = []
for i in range(n):
boxes.append([int(matrix[i,0]), int(matrix[i,1]), int(matrix[i,2]), int(matrix[i,3])])
#Send only main face
bounding_boxes = np.array(boxes)
end = time.time()
# Only send one face
center = {"x":boxes[close][0], "y":boxes[close][1]}
size = {"w":boxes[close][2] , "h": boxes[close][3]}
box = {"x1" : int(matrix[close,0]), "x2" : int(matrix[close,1]),"y1" : int(matrix[close,0] + matrix[i,2]), "y2" : int(matrix[close,1] + matrix[i,3])}
pub_position.publish({"face": {"center":center, "size":size,"box":box}})
nb_faces = bounding_boxes.shape[0]
if(nb_faces > 0):
timer_faces_yes = timer_faces_yes + 1
if (timer_faces_yes > 10):
timer_faces_yes = 0
perception_face.primitive_detected() # --------------- Sharo ------------------
timer_faces_no = 0
else:
pub_position.publish({"positions": []})
if (timer_faces_no > 10):
timer_faces_no = timer_faces_no + 1
perception_face.primitive_non_detected() # --------------- Sharo ------------------
timer_faces_yes = 0
timer_faces_no = 0
else:
timer_faces_no = timer_faces_no + 1
timer_faces_yes = 0
if (timer_faces_no > 10):
timer_faces_no = 0
perception_face.primitive_non_detected() # --------------- Sharo ------------------
nb_faces = 0
if show_image == 1:
if nb_faces > 0:
for i in range(nb_faces):
if i == close:
draw_bounding_box(bounding_boxes[i], newImage, (0, 255, 0))
else:
draw_bounding_box(bounding_boxes[i], newImage, (0, 0, 200))
cv2.imshow("Face Detection", newImage)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
|
data_processing.py
|
# -*- coding: utf-8 -*-
import numpy as np
import re
import random
import json
import collections
import numpy as np
import util.parameters as params
from tqdm import tqdm
import nltk
from nltk.corpus import wordnet as wn
import os
from util.data_annotation import POS_dict_spacy, PADDING
import pickle
import multiprocessing
from nltk.tag import StanfordNERTagger
from nltk.tag import StanfordPOSTagger
import pdb
FIXED_PARAMETERS, config = params.load_parameters()
LABEL_MAP = {
"entailment": 0,
"neutral": 1,
"contradiction": 2,
"hidden": -1
}
base_path = os.getcwd()
nltk_data_path = base_path + "/../TF/nltk_data"
nltk.data.path.append(nltk_data_path)
stemmer = nltk.SnowballStemmer('english')
tt = nltk.tokenize.treebank.TreebankWordTokenizer()
def load_nli_data(path, snli=False, shuffle = True):
"""
Load MultiNLI or SNLI data.
If the "snli" parameter is set to True, a genre label of snli will be assigned to the data.
"""
data = []
with open(path, encoding='utf-8') as f:
for line in tqdm(f):
loaded_example = json.loads(line)
if loaded_example["gold_label"] not in LABEL_MAP:
continue
loaded_example["label"] = LABEL_MAP[loaded_example["gold_label"]]
if snli:
loaded_example["genre"] = "snli"
data.append(loaded_example)
if shuffle:
random.seed(1)
random.shuffle(data)
return data
def load_quora_data(path, shuffle = True):
"""
Load quora question pair data.
The format of qqp is label\tsent1\tsent2.
"""
data = []
with open(path, encoding='utf8') as f:
for line in tqdm(f):
ls = line.split("\t")
loaded_example = {}
loaded_example["label"] = ls[0]
loaded_example["sentence1"] = int(ls[1])
loaded_example["sentence2"] = int(ls[2])
data.append(loaded_example)
if shuffle:
random.seed(1)
random.shuffle(data)
return data
def load_nli_data_genre(path, genre, snli=True, shuffle = True):
"""
Load a specific genre's examples from MultiNLI, or load SNLI data and assign a "snli" genre to the examples.
If the "snli" parameter is set to True, a genre label of snli will be assigned to the data. If set to true, it will overwrite the genre label for MultiNLI data.
"""
data = []
j = 0
with open(path) as f:
for line in f:
loaded_example = json.loads(line)
if loaded_example["gold_label"] not in LABEL_MAP:
continue
loaded_example["label"] = LABEL_MAP[loaded_example["gold_label"]]
if snli:
loaded_example["genre"] = "snli"
if loaded_example["genre"] == genre:
data.append(loaded_example)
if shuffle:
random.seed(1)
random.shuffle(data)
return data
def is_exact_match(token1, token2):
token1 = token1.lower()
token2 = token2.lower()
token1_stem = stemmer.stem(token1)
if token1 == token2:
return True
for synsets in wn.synsets(token2):
for lemma in synsets.lemma_names():
if token1_stem == stemmer.stem(lemma):
return True
if token1 == "n't" and token2 == "not":
return True
elif token1 == "not" and token2 == "n't":
return True
elif token1_stem == stemmer.stem(token2):
return True
return False
def is_antonyms(token1, token2):
token1 = token1.lower()
token2 = token2.lower()
token1_stem = stemmer.stem(token1)
antonym_lists_for_token2 = []
for synsets in wn.synsets(token2):
for lemma_synsets in [wn.synsets(l) for l in synsets.lemma_names()]:
for lemma_syn in lemma_synsets:
for lemma in lemma_syn.lemmas():
for antonym in lemma.antonyms():
antonym_lists_for_token2.append(antonym.name())
# if token1_stem == stemmer.stem(antonym.name()):
# return True
antonym_lists_for_token2 = list(set(antonym_lists_for_token2))
for atnm in antonym_lists_for_token2:
if token1_stem == stemmer.stem(atnm):
return True
return False
def worker(shared_content, dataset):
def tokenize(string):
string = re.sub(r'\(|\)', '', string)
return string.split()
for example in tqdm(dataset):
s1_tokenize = tokenize(example['sentence1_binary_parse'])
s2_tokenize = tokenize(example['sentence2_binary_parse'])
s1_token_exact_match = [0] * len(s1_tokenize)
s2_token_exact_match = [0] * len(s2_tokenize)
s1_token_antonym = [0] * len(s1_tokenize)
s2_token_antonym = [0] * len(s2_tokenize)
for i, word in enumerate(s1_tokenize):
matched = False
for j, w2 in enumerate(s2_tokenize):
matched = is_exact_match(word, w2)
if matched:
s1_token_exact_match[i] = 1
s2_token_exact_match[j] = 1
content = {}
content['sentence1_token_exact_match_with_s2'] = s1_token_exact_match
content['sentence2_token_exact_match_with_s1'] = s2_token_exact_match
shared_content[example["pairID"]] = content
# print(shared_content[example["pairID"]])
# print(shared_content)
def load_shared_content(fh, shared_content):
for line in fh:
row = line.rstrip().split("\t")
key = row[0]
value = json.loads(row[1])
shared_content[key] = value
def load_mnli_shared_content():
shared_file_exist = False
# shared_path = config.datapath + "/shared_2D_EM.json"
# shared_path = config.datapath + "/shared_anto.json"
# shared_path = config.datapath + "/shared_NER.json"
shared_path = config.datapath + "/shared.jsonl"
# shared_path = "../shared.json"
print(shared_path)
if os.path.isfile(shared_path):
shared_file_exist = True
# shared_content = {}
assert shared_file_exist
# if not shared_file_exist and config.use_exact_match_feature:
# with open(shared_path, 'w') as f:
# json.dump(dict(reconvert_shared_content), f)
# elif config.use_exact_match_feature:
with open(shared_path) as f:
shared_content = {}
load_shared_content(f, shared_content)
# shared_content = json.load(f)
return shared_content
def sentences_to_padded_index_sequences(datasets, indices_to_words=None, word_indices=None, indices_to_char=None, char_indices=None):
"""
Annotate datasets with feature vectors. Adding right-sided padding.
"""
# Extract vocabulary
word_counter = collections.Counter()
char_counter = collections.Counter()
# mgr = multiprocessing.Manager()
# shared_content = mgr.dict()
# process_num = config.num_process_prepro
# process_num = 1
##if vocabulary doesnt exist, collect tokens and build vocabulary
if not word_indices or not char_indices:
for i, dataset in enumerate(datasets):
# if not shared_file_exist:
# num_per_share = len(dataset) / process_num + 1
# jobs = [ multiprocessing.Process(target=worker, args=(shared_content, dataset[i * num_per_share : (i + 1) * num_per_share] )) for i in range(process_num)]
# for j in jobs:
# j.start()
# for j in jobs:
# j.join()
for example in tqdm(dataset):
#get the tokens
s1_tokenize = example['sentence1_tokens']
s2_tokenize = example['sentence2_tokens']
word_counter.update(s1_tokenize)
word_counter.update(s2_tokenize)
for i, word in enumerate(s1_tokenize):
char_counter.update([c for c in word])
for word in s2_tokenize:
char_counter.update([c for c in word])
# shared_content = {k:v for k, v in shared_content.items()}
#Create vocabulary for both word and char
vocabulary = set([word for word in word_counter])
vocabulary = list(vocabulary)
if config.embedding_replacing_rare_word_with_UNK:
vocabulary = [PADDING, "<UNK>"] + vocabulary
else:
vocabulary = [PADDING] + vocabulary
# print(char_counter)
word_indices = dict(zip(vocabulary, range(len(vocabulary))))
indices_to_words = {v: k for k, v in word_indices.items()}
char_vocab = set([char for char in char_counter])
char_vocab = list(char_vocab)
char_vocab = [PADDING] + char_vocab
char_indices = dict(zip(char_vocab, range(len(char_vocab))))
indices_to_char = {v: k for k, v in char_indices.items()}
####Pad sentence and words and fill in inverse term frequency for words
for i, dataset in enumerate(datasets):
for example in tqdm(dataset):
for sentence in ['sentence1', 'sentence2']:
example[sentence + '_index_sequence'] = np.zeros((FIXED_PARAMETERS["seq_length"]), dtype=np.int32)
example[sentence + '_inverse_term_frequency'] = np.zeros((FIXED_PARAMETERS["seq_length"]), dtype=np.float32)
token_sequence = example[sentence+'_tokens']
padding = FIXED_PARAMETERS["seq_length"] - len(token_sequence)
for i in range(FIXED_PARAMETERS["seq_length"]):
if i >= len(token_sequence):
index = word_indices[PADDING]
itf = 0
else:
if config.embedding_replacing_rare_word_with_UNK:
index = word_indices[token_sequence[i]] if word_counter[token_sequence[i]] >= config.UNK_threshold else word_indices["<UNK>"]
else:
index = word_indices[token_sequence[i]]
itf = 1 / (word_counter[token_sequence[i]] + 1)
example[sentence + '_index_sequence'][i] = index
example[sentence + '_inverse_term_frequency'][i] = itf
example[sentence + '_char_index'] = np.zeros((FIXED_PARAMETERS["seq_length"], config.char_in_word_size), dtype=np.int32)
for i in range(FIXED_PARAMETERS["seq_length"]):
if i >= len(token_sequence):
continue
else:
chars = [c for c in token_sequence[i]]
for j in range(config.char_in_word_size):
if j >= (len(chars)):
break
else:
index = char_indices[chars[j]]
example[sentence + '_char_index'][i,j] = index
return indices_to_words, word_indices, char_indices, indices_to_char
def get_subword_list(token):
token = token.lower()
token = "<" + token + ">"
subword_list = []
for i in [3,4,5,6]:
for j in range(len(token) - i + 1):
subword_list.append(token[j : j + i])
return subword_list
def load_subword_list(sentences, rand = False):
list_of_vectors = []
for sentence in sentences:
sentence_vector = []
for i in range(config.seq_length):
if i < len(sentence):
idx = range(len(sentence[i]))
if rand:
random.shuffle(idx)
token_subword_feature_list = [sentence[i][index] for index in idx][:config.subword_feature_len]
if len(token_subword_feature_list) < config.subword_feature_len:
token_subword_feature_list += [0] * (config.subword_feature_len - len(token_subword_feature_list))
sentence_vector.append(token_subword_feature_list)
else:
sentence_vector.append([0] * config.subword_feature_len)
list_of_vectors.append(sentence_vector)
return np.array(list_of_vectors)
def parse_to_pos_vector(pos_vector, left_padding_and_cropping_pair = (0,0)): # ONE HOT
left_padding, left_cropping = left_padding_and_cropping_pair
vector = np.zeros((FIXED_PARAMETERS["seq_length"],len(POS_dict_spacy)))
assert left_padding == 0 or left_cropping == 0
for i in range(FIXED_PARAMETERS["seq_length"]):
if i < len(pos_vector):
vector[i + left_padding, pos_vector[i + left_cropping]] = 1
else:
break
return vector
def generate_pos_feature_tensor(pos_vecs, left_padding_and_cropping_pairs):
pos = [[(idx, posid) for idx, posid in enumerate(pos_vec)] for pos_vec in pos_vecs]
return construct_one_hot_feature_tensor(pos, left_padding_and_cropping_pairs, 2, column_size=len(POS_dict_spacy))
def generate_quora_pos_feature_tensor(pos, left_padding_and_cropping_pairs):
pos = [(idx, posid) for idx, posid in enumerate(pos)]
return construct_one_hot_feature_tensor(pos, left_padding_and_cropping_pairs, 2, column_size=len(POS_dict_spacy))
def generate_crop_pad_pairs(sequences):
seq_len = FIXED_PARAMETERS["seq_length"]
list_of_pairs = []
for sequence in sequences:
left_padding = 0
left_cropping = 0
if len(sequence) < seq_len:
left_padding = int(random.uniform(0,1) * (seq_len - len(sequence)))
elif len(sequence) > seq_len:
left_cropping = int(random.uniform(0,1) * (len(sequence) - seq_len))
list_of_pairs.append((left_padding, left_cropping))
return list_of_pairs
def fill_feature_vector_with_cropping_or_padding(sequences, left_padding_and_cropping_pairs, dim, column_size=None, dtype=np.int32):
if dim == 1:
list_of_vectors = []
for sequence, pad_crop_pair in zip(sequences, left_padding_and_cropping_pairs):
vec = np.zeros((config.seq_length))
left_padding, left_cropping = pad_crop_pair
for i in range(config.seq_length):
if i + left_padding < config.seq_length and i - left_cropping < len(sequence):
vec[i + left_padding] = sequence[i + left_cropping]
else:
break
list_of_vectors.append(vec)
return np.array(list_of_vectors, dtype=dtype)
elif dim == 2:
assert column_size
tensor_list = []
for sequence, pad_crop_pair in zip(sequences, left_padding_and_cropping_pairs):
left_padding, left_cropping = pad_crop_pair
mtrx = np.zeros((config.seq_length, column_size))
for row_idx in range(config.seq_length):
if row_idx + left_padding < config.seq_length and row_idx < len(sequence) + left_cropping:
for col_idx, content in enumerate(sequence[row_idx + left_cropping]):
mtrx[row_idx + left_padding, col_idx] = content
else:
break
tensor_list.append(mtrx)
return np.array(tensor_list, dtype=dtype)
else:
raise NotImplementedError
def construct_one_hot_feature_tensor(sequences, left_padding_and_cropping_pairs, dim, column_size=None, dtype=np.int32):
"""
sequences: [[(idx, val)... ()]...[]]
left_padding_and_cropping_pairs: [[(0,0)...] ... []]
"""
tensor_list = []
for sequence, pad_crop_pair in zip(sequences, left_padding_and_cropping_pairs):
left_padding, left_cropping = pad_crop_pair
if dim == 1:
vec = np.zeros((config.seq_length))
for num in sequence:
if num + left_padding - left_cropping < config.seq_length and num + left_padding - left_cropping >= 0:
vec[num + left_padding - left_cropping] = 1
tensor_list.append(vec)
elif dim == 2:
assert column_size
mtrx = np.zeros((config.seq_length, column_size))
for row, col in sequence:
if row + left_padding - left_cropping < config.seq_length and row + left_padding - left_cropping >= 0 and col < column_size:
mtrx[row + left_padding - left_cropping, col] = 1
tensor_list.append(mtrx)
else:
raise NotImplementedError
return np.array(tensor_list, dtype=dtype)
def generate_manual_sample_minibatch(s1_tokenize, s2_tokenize, word_indices, char_indices):
nst = StanfordNERTagger('/home/users/yichen.gong/Stanford/stanford-ner-2014-08-27/classifiers/english.muc.7class.distsim.crf.ser.gz', '//home/users/yichen.gong/Stanford/stanford-ner-2014-08-27/stanford-ner.jar',encoding='utf-8')
pst = StanfordPOSTagger('/home/users/yichen.gong/Stanford/stanford-postagger-2014-08-27/models/english-bidirectional-distsim.tagger', \
'/home/users/yichen.gong/Stanford/stanford-postagger-2014-08-27/stanford-postagger.jar')
premise_vectors = np.zeros((1, config.seq_length))
hypothesis_vectors = np.zeros((1, config.seq_length))
premise_char_vectors = np.zeros((1, config.seq_length, config.char_in_word_size))
hypothesis_char_vectors = np.zeros((1, config.seq_length, config.char_in_word_size))
premise_exact_match = np.zeros((1, config.seq_length))
hypothesis_exact_match = np.zeros((1, config.seq_length))
for idx, w1 in enumerate(s1_tokenize):
premise_vectors[0, idx] = word_indices.get(w1, 0)
for ci, c in enumerate(w1):
premise_char_vectors[0, idx, ci] = char_indices.get(c, 0)
for s2idx, w2 in enumerate(s2_tokenize):
if is_exact_match(w1, w2):
premise_exact_match[0, idx] = 1
hypothesis_exact_match[0, s2idx] = 1
for idx, w2 in enumerate(s2_tokenize):
hypothesis_vectors[0, idx] = word_indices.get(w2, 0)
for ci, c in enumerate(w2):
hypothesis_char_vectors[0, idx, ci] = char_indices.get(c, 0)
premise_pos_vectors = np.zeros((1, config.seq_length, len(POS_dict_spacy.keys())))
hypothesis_pos_vectors = np.zeros((1, config.seq_length, len(POS_dict_spacy.keys())))
s1_pos = pst.tag(s1_tokenize)
s2_pos = pst.tag(s2_tokenize)
for idx, pair in enumerate(s1_pos):
word, tag = pair
premise_pos_vectors[0, idx, POS_dict_spacy[tag]] = 1
for idx, pair in enumerate(s2_pos):
word, tag = pair
hypothesis_pos_vectors[0, idx, POS_dict_spacy[tag]] = 1
# s1_ner = nst.tag(s1_tokenize)
# s2_ner = nst.tag(s2_tokenize)
# not used
labels = np.zeros((1))
genres = np.zeros((1))
pairIDs = np.zeros((1))
premise_inverse_term_frequency = np.zeros((1, config.seq_length, 1), dtype=np.float32)
hypothesis_inverse_term_frequency = np.zeros((1, config.seq_length, 1), dtype=np.float32)
premise_antonym_feature = np.zeros((1, config.seq_length))
hypothesis_antonym_feature = np.zeros((1, config.seq_length))
premise_NER_feature = np.zeros((1, config.seq_length, 7))
hypothesis_NER_feature = np.zeros((1, config.seq_length, 7))
premise_exact_match = np.expand_dims(premise_exact_match, 2)
hypothesis_exact_match = np.expand_dims(hypothesis_exact_match, 2)
premise_antonym_feature = np.expand_dims(premise_antonym_feature, 2)
hypothesis_antonym_feature = np.expand_dims(hypothesis_antonym_feature, 2)
return premise_vectors, hypothesis_vectors, labels, genres, premise_pos_vectors, \
hypothesis_pos_vectors, pairIDs, premise_char_vectors, hypothesis_char_vectors, \
premise_exact_match, hypothesis_exact_match, premise_inverse_term_frequency, hypothesis_inverse_term_frequency, \
premise_antonym_feature, hypothesis_antonym_feature, premise_NER_feature, hypothesis_NER_feature
def loadEmbedding_zeros(path, word_indices):
"""
Load GloVe embeddings. Initializng OOV words to vector of zeros.
"""
emb = np.zeros((len(word_indices), FIXED_PARAMETERS["word_embedding_dim"]), dtype='float32')
with open(path, 'r') as f:
for i, line in enumerate(f):
if FIXED_PARAMETERS["embeddings_to_load"] != None:
if i >= FIXED_PARAMETERS["embeddings_to_load"]:
break
s = line.split()
if s[0] in word_indices:
emb[word_indices[s[0]], :] = np.asarray(s[1:])
return emb
def loadEmbedding_fully_rand(path, word_indices, divident = 1.0):
n = len(word_indices)
m = FIXED_PARAMETERS["word_embedding_dim"]
emb = np.empty((n, m), dtype=np.float32)
emb[:,:] = np.random.normal(size=(n,m)) / divident
# Explicitly assign embedding of <PAD> to be zeros.
emb[0, :] = np.zeros((1,m), dtype="float32")
return emb
def loadEmbedding_rand(path, word_indices, divident = 1.0): # TODO double embedding
"""
Load GloVe embeddings. Doing a random normal initialization for OOV words.
"""
j = 0
n = len(word_indices)
m = FIXED_PARAMETERS["word_embedding_dim"]
emb = np.empty((n, m), dtype=np.float32)
emb[:,:] = np.random.normal(size=(n,m)) / divident
# Explicitly assign embedding of <PAD> to be zeros.
emb[0, :] = np.zeros((1,m), dtype="float32")
with open(path, 'r', encoding='utf-8') as f:
for i, line in enumerate(f):
if FIXED_PARAMETERS["embeddings_to_load"] != None:
if i >= FIXED_PARAMETERS["embeddings_to_load"]:
break
s = line.split()
if s[0] in word_indices:
try:
emb[word_indices[s[0]], :] = np.asarray(s[1:])
except ValueError:
print(s[0])
continue
return emb
def all_lemmas(token):
t = token.lower()
lemmas = []
for synsets in wn.synsets(t):
for lemma in synsets.lemma_names():
lemmas.append(lemma)
return list(set(lemmas))
def loadEmbedding_with_lemma(path, word_indices):
j = 0
n = len(word_indices)
m = FIXED_PARAMETERS["word_embedding_dim"]
emb = np.empty((n, m), dtype=np.float32)
emb[:,:] = np.random.normal(size=(n,m))
# Explicitly assign embedding of <PAD> to be zeros.
emb[0, :] = np.zeros((1,m), dtype="float32")
records = np.zeros((n))
indices_to_words = [""] * n
for key, val in word_indices.items():
indices_to_words[val] = key
print("OOV words: {}".format(n - np.sum(records) - 1))
print("Loading embedding for first round")
with open(path, 'r') as f:
for i, line in tqdm(enumerate(f)):
if FIXED_PARAMETERS["embeddings_to_load"] != None:
if i >= FIXED_PARAMETERS["embeddings_to_load"]:
break
s = line.split()
if s[0] in word_indices:
try:
emb[word_indices[s[0]], :] = np.asarray(s[1:])
records[word_indices[s[0]]] = 1
except ValueError:
print(s[0])
continue
print("OOV words: {}".format(n - np.sum(records) - 1))
print("Building OOV lemma sets")
OOV_word_indices = {}
for i in range(n):
if records[i] == 0:
for lemma in all_lemmas(indices_to_words[i]):
try:
OOV_word_indices[lemma].append(i)
except:
OOV_word_indices[lemma] = [i]
print("Loading embedding for second round")
with open(path, 'r') as f:
for i, line in tqdm(enumerate(f)):
if FIXED_PARAMETERS["embeddings_to_load"] != None:
if i >= FIXED_PARAMETERS["embeddings_to_load"]:
break
s = line.split()
if s[0] in OOV_word_indices:
for idx in OOV_word_indices[s[0]]:
if records[idx] == 0:
try:
emb[idx, :] = np.asarray(s[1:])
records[idx] = 1
except ValueError:
print(s[0])
continue
print("OOV words: {}".format(n - np.sum(records) - 1))
return emb
def save_submission(path, ids, pred_ids):
assert(ids.shape[0] == pred_ids.shape[0])
reverse_label_map = {str(value): key for key, value in LABEL_MAP.items()}
f = open(path, 'w')
f.write("pairID,gold_label\n")
for i in range(ids.shape[0]):
pred = pred_ids[i]
f.write("{},{}\n".format(str(ids[i]), reverse_label_map[str(pred)]))
# f.write("{},{}\n".format(str(ids[i]), str(pred)))
f.close()
|
DashboardServiceServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from DashboardService.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'DashboardService'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from DashboardService.DashboardServiceImpl import DashboardService # noqa @IgnorePep8
impl_DashboardService = DashboardService(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'DashboardService'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_DashboardService.list_all_narratives,
name='DashboardService.list_all_narratives',
types=[dict])
self.method_authentication['DashboardService.list_all_narratives'] = 'optional' # noqa
self.rpc_service.add(impl_DashboardService.create_narrative,
name='DashboardService.create_narrative',
types=[dict])
self.method_authentication['DashboardService.create_narrative'] = 'required' # noqa
self.rpc_service.add(impl_DashboardService.delete_narrative,
name='DashboardService.delete_narrative',
types=[dict])
self.method_authentication['DashboardService.delete_narrative'] = 'required' # noqa
self.rpc_service.add(impl_DashboardService.share_narrative,
name='DashboardService.share_narrative',
types=[dict])
self.method_authentication['DashboardService.share_narrative'] = 'required' # noqa
self.rpc_service.add(impl_DashboardService.unshare_narrative,
name='DashboardService.unshare_narrative',
types=[dict])
self.method_authentication['DashboardService.unshare_narrative'] = 'required' # noqa
self.rpc_service.add(impl_DashboardService.share_narrative_global,
name='DashboardService.share_narrative_global',
types=[dict])
self.method_authentication['DashboardService.share_narrative_global'] = 'required' # noqa
self.rpc_service.add(impl_DashboardService.unshare_narrative_global,
name='DashboardService.unshare_narrative_global',
types=[dict])
self.method_authentication['DashboardService.unshare_narrative_global'] = 'required' # noqa
self.rpc_service.add(impl_DashboardService.status,
name='DashboardService.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'DashboardService ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
bot.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from sending_schelude import job_sending
from longpool import job_longpool
from threading import Thread
print('Бот запущен...')
th_1 = Thread(target = job_longpool)
th_2 = Thread(target = job_sending)
# функция запуска многопоточной работы бота
if __name__ == '__main__':
th_1.start()
th_2.start()
|
kombu_server.py
|
# Copyright 2015 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import amqp
import socket
import threading
import time
import kombu
from oslo_config import cfg
from oslo_log import log as logging
from stevedore import driver
from mistral import context as auth_ctx
from mistral import exceptions as exc
from mistral.rpc import base as rpc_base
from mistral.rpc.kombu import base as kombu_base
from mistral.rpc.kombu import kombu_hosts
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
_pool_opts = [
cfg.IntOpt(
'executor_thread_pool_size',
default=64,
deprecated_name="rpc_thread_pool_size",
help='Size of executor thread pool when'
' executor is threading or eventlet.'
),
]
class KombuRPCServer(rpc_base.RPCServer, kombu_base.Base):
def __init__(self, conf):
super(KombuRPCServer, self).__init__(conf)
CONF.register_opts(_pool_opts)
kombu_base.set_transport_options()
self._register_mistral_serialization()
self.topic = conf.topic
self.server_id = conf.host
self._hosts = kombu_hosts.KombuHosts(CONF)
self._executor_threads = CONF.executor_thread_pool_size
self.exchange = CONF.control_exchange
# TODO(rakhmerov): We shouldn't rely on any properties related
# to oslo.messaging. Only "transport_url" should matter.
self.virtual_host = CONF.oslo_messaging_rabbit.rabbit_virtual_host
self.durable_queue = CONF.oslo_messaging_rabbit.amqp_durable_queues
self.auto_delete = CONF.oslo_messaging_rabbit.amqp_auto_delete
self.routing_key = self.topic
self.channel = None
self.conn = None
self._running = threading.Event()
self._stopped = threading.Event()
self.endpoints = []
self._worker = None
self._thread = None
# TODO(ddeja): Those 2 options should be gathered from config.
self._sleep_time = 1
self._max_sleep_time = 10
@property
def is_running(self):
"""Return whether server is running."""
return self._running.is_set()
def run(self, executor='blocking'):
if self._thread is None:
self._thread = threading.Thread(target=self._run, args=(executor,))
self._thread.daemon = True
self._thread.start()
def _run(self, executor):
"""Start the server."""
self._prepare_worker(executor)
while True:
try:
_retry_connection = False
host = self._hosts.get_host()
self.conn = self._make_connection(
host.hostname,
host.port,
host.username,
host.password,
self.virtual_host,
)
conn = kombu.connections[self.conn].acquire(block=True)
exchange = self._make_exchange(
self.exchange,
durable=self.durable_queue,
auto_delete=self.auto_delete
)
queue = self._make_queue(
self.topic,
exchange,
routing_key=self.routing_key,
durable=self.durable_queue,
auto_delete=self.auto_delete
)
with conn.Consumer(
queues=queue,
callbacks=[self._process_message],
) as consumer:
consumer.qos(prefetch_count=1)
self._running.set()
self._stopped.clear()
LOG.info(
"Connected to AMQP at %s:%s",
host.hostname,
host.port
)
self._sleep_time = 1
while self.is_running:
try:
conn.drain_events(timeout=1)
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
LOG.info(
"Server with id='%w' stopped.",
self.server_id
)
return
except (socket.error, amqp.exceptions.ConnectionForced) as e:
LOG.debug("Broker connection failed: %s", e)
_retry_connection = True
finally:
self._stopped.set()
if _retry_connection:
LOG.debug(
"Sleeping for %s seconds, then retrying "
"connection",
self._sleep_time
)
time.sleep(self._sleep_time)
self._sleep_time = min(
self._sleep_time * 2,
self._max_sleep_time
)
def stop(self, graceful=False):
self._running.clear()
if graceful:
self.wait()
def wait(self):
self._stopped.wait()
try:
self._worker.shutdown(wait=True)
except AttributeError as e:
LOG.warning("Cannot stop worker in graceful way: %s", e)
def _get_rpc_method(self, method_name):
for endpoint in self.endpoints:
if hasattr(endpoint, method_name):
return getattr(endpoint, method_name)
return None
@staticmethod
def _set_auth_ctx(ctx):
if not isinstance(ctx, dict):
return
context = auth_ctx.MistralContext.from_dict(ctx)
auth_ctx.set_ctx(context)
return context
def publish_message(self, body, reply_to, corr_id, res_type='response'):
if res_type != 'error':
body = self._serialize_message({'body': body})
with kombu.producers[self.conn].acquire(block=True) as producer:
producer.publish(
body=body,
exchange=self.exchange,
routing_key=reply_to,
correlation_id=corr_id,
serializer='pickle' if res_type == 'error' else 'json',
type=res_type
)
def _on_message_safe(self, request, message):
try:
return self._on_message(request, message)
except Exception as e:
LOG.warning(
"Got exception while consuming message. Exception would be "
"send back to the caller."
)
LOG.debug("Exceptions: %s", str(e))
# Wrap exception into another exception for compatibility
# with oslo.
self.publish_message(
exc.KombuException(e),
message.properties['reply_to'],
message.properties['correlation_id'],
res_type='error'
)
finally:
message.ack()
def _on_message(self, request, message):
LOG.debug('Received message %s', request)
is_async = request.get('async', False)
rpc_ctx = request.get('rpc_ctx')
redelivered = message.delivery_info.get('redelivered')
rpc_method_name = request.get('rpc_method')
arguments = self._deserialize_message(request.get('arguments'))
correlation_id = message.properties['correlation_id']
reply_to = message.properties['reply_to']
if redelivered is not None:
rpc_ctx['redelivered'] = redelivered
rpc_context = self._set_auth_ctx(rpc_ctx)
rpc_method = self._get_rpc_method(rpc_method_name)
if not rpc_method:
raise exc.MistralException("No such method: %s" % rpc_method_name)
response = rpc_method(rpc_ctx=rpc_context, **arguments)
if not is_async:
LOG.debug(
"RPC server sent a reply [reply_to = %s, correlation_id = %s",
reply_to,
correlation_id
)
self.publish_message(
response,
reply_to,
correlation_id
)
def register_endpoint(self, endpoint):
self.endpoints.append(endpoint)
def _process_message(self, request, message):
self._worker.submit(self._on_message_safe, request, message)
def _prepare_worker(self, executor='blocking'):
mgr = driver.DriverManager('kombu_driver.executors', executor)
executor_opts = {}
if executor == 'threading':
executor_opts['max_workers'] = self._executor_threads
self._worker = mgr.driver(**executor_opts)
|
srxdbepinexinstallerui.pyw
|
from tkinter import *
from tkinter import messagebox
from tkinter.filedialog import askdirectory
from tkinter import ttk
import threading
import sys
from os import path
from modules.gui import GuiUtils, PrintLogger
from modules.steamutils import SteamUtils
from modules.github import GitHubUtils
from modules.installer import Installer
class GUIWindow:
def __init__(self, win : Tk):
self.win = win
# Define UI Variables
self.paddingInt = 3
self.canInstall = False
self.initConsole()
# Init Modules and VersionVar
print(f"Initialising...")
self.steamutils = SteamUtils()
print(f"Found Game Path: {self.steamutils.gameDirectory}")
threading.Thread(target=self.initLongModules, daemon=True).start()
self.initTheme()
self.initUI()
def initConsole(self):
self.consoleOutputText = Text(self.win, bg="black", fg="white", border=0, state=DISABLED)
self.consoleOutputText.pack(expand=1, fill=BOTH)
self.consoleOutputText.grid(row=0, column=0, columnspan=4, sticky=NSEW,)
pl = PrintLogger(self.consoleOutputText)
sys.stdout = pl
print(GuiUtils().asciiArt)
def initLongModules(self):
self.bepinutils = GitHubUtils()
if (len(self.bepinutils.downloadURLs) != 0):
print(f"Got BepInEx Metadata from: {self.bepinutils.baseUrl}")
self.canInstall = True
self.initDropDown()
print(f"Initialisation Finished.")
def initTheme(self):
# Theme
style = ttk.Style(self.win)
style.theme_names()
if(hasattr(sys, "_MEIPASS")):
baseStylePath = sys._MEIPASS
else:
baseStylePath = path.dirname(__file__)
try:
self.win.tk.call('source', path.join(baseStylePath, "assets", "themes", "azure-dark.tcl"))
style.theme_use('azure-dark')
except:
self
def initUI(self):
self.selectedVersion = StringVar()
# Softening Grid:
n_rows =1
for i in range(n_rows):
self.win.grid_rowconfigure(i, weight =1)
self.win.grid_columnconfigure(1, weight =1)
self.win.grid_columnconfigure(2, weight =1)
self.initDirectorySelector()
self.initDropDown()
self.initButtons()
def initDirectorySelector(self):
# Directory
self.DirectoryInput = ttk.Entry(self.win)
self.DirectoryInput.grid(row=1, column=1, sticky=S+E+W, columnspan=2, pady=self.paddingInt, padx=self.paddingInt)
self.DirectoryInput.insert(END, self.steamutils.gameDirectory)
# Selector
self.DirectoryInputButton = ttk.Button(self.win, text=f'Select')
self.DirectoryInputButton.grid(row=1, column=0, sticky=S+E+W, pady=self.paddingInt, padx=self.paddingInt)
self.DirectoryInputButton.bind("<Button>", self.choose)
def choose(self, args):
selectorGameDirectory = askdirectory()
if(len(selectorGameDirectory) != 0):
self.DirectoryInput.delete(0, END)
self.DirectoryInput.insert(0, selectorGameDirectory)
def initDropDown(self):
self.VersionDropDown = ttk.Combobox(self.win, textvariable=self.selectedVersion, width=5)
self.VersionDropDown.grid(row=1, column=3, sticky=S+E+W, pady=self.paddingInt, padx=self.paddingInt)
try:
self.VersionDropDown['values'] = self.bepinutils.downloadVersions
self.VersionDropDown.set(self.bepinutils.downloadVersions[0])
except:
self.VersionDropDown.set("Loading...")
def initButtons(self):
# Install Button
self.installButton = ttk.Button(self.win, text=f'Install', command=lambda isUninstall=False, : self.install(isUninstall))
self.installButton.grid(row=2, column=0, sticky=E+W, columnspan = 3, pady=self.paddingInt, padx=self.paddingInt)
# Uninstall Button
self.uninstallButton = ttk.Button(self.win, text=f'Uninstall', command=lambda isUninstall=True, : self.install(isUninstall))
self.uninstallButton.grid(row=2, column=3, sticky=E+W, columnspan = 1, pady=self.paddingInt, padx=self.paddingInt)
def install(self, isUninstall):
self.steamutils.gameDirectory = self.DirectoryInput.get()
if(path.exists(self.steamutils.gameDirectory) or self.steamutils.gameDirectory != "" ):
installerInstance = Installer(self.steamutils.gameDirectory)
installthread = None
if (not isUninstall and self.canInstall):
installUnityLibs = False
downloadUrl = self.bepinutils.downloadURLs[self.bepinutils.downloadVersions.index(self.selectedVersion.get())]
if (self.selectedVersion.get().isnumeric() and int(self.selectedVersion.get()) < 378):
installUnityLibs = True
installthread = threading.Thread(target=installerInstance.install, args=(downloadUrl, installUnityLibs), daemon=True).start()
elif (isUninstall):
preservePlugins = bool(messagebox.askyesno('Preserve Backup Files', 'Would you still like to keep the "plugins" and "config" folders inside "BepInEx"?'))
installthread = threading.Thread(target=installerInstance.uninstall, args=(preservePlugins, ), daemon=True).start()
else:
print("Installer may not have initialised properly yet.")
else:
print("Please Enter a Valid Path.")
try:
window=Tk()
mywin=GUIWindow(window)
window.title('SRXDBepInExInstaller')
window.geometry("520x405")
window.mainloop()
except Exception as e:
messagebox.Message().show(title=e, message=e)
|
main.py
|
import os
import json
import urllib.request
import zipfile
import io
import sys
import gzip
import platform
import time
import threading
import subprocess
import secrets
from config import PORT, TLS_DOMAIN, AUTHTOKEN
from http.server import HTTPServer, BaseHTTPRequestHandler
class GETHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(tls_link.encode("utf-8"))
def stay_alive():
server_address = ("0.0.0.0", 8080)
http_server = HTTPServer(server_address, GETHandler)
http_server_thread = threading.Thread(target=http_server.serve_forever)
http_server_thread.start()
def get_ngrok():
os.system(
"killall ngrok > /dev/null 2>&1"
) if uname.system != "Windows" else os.system("taskkill /f /im ngrok.exe 2> nul")
ngrok_file = open(
"ngrok.exe" if uname.system == "Windows" else "ngrok",
"wb",
)
link = ""
if uname.system == "Windows":
if uname.machine == "x86_64" or uname.machine == "AMD64":
link = "https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-windows-amd64.zip"
elif uname.system == "Linux":
if uname.machine == "aarch64" or uname.machine == "arm64":
link = "https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-arm64.tgz"
if uname.machine == "x86_64":
link = "https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip"
if "armv" in uname.machine:
link = "https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-arm.zip"
elif uname.system == "Darwin":
if uname.machine == "x86_64":
link = "https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-darwin-amd64.zip"
elif uname.machine == "arm64":
pass
# ios or m1
else:
sys.stderr.write("Machine not supported")
sys.exit()
elif uname.system == "FreeBSD":
pass
else:
sys.stderr.write("Machine not supported")
sys.exit()
if link:
ngrok = urllib.request.urlopen(link).read()
if link.endswith(".tgz"):
tar_file = io.BytesIO(ngrok)
binary = gzip.GzipFile(fileobj=tar_file)
ngrok_file.write(binary.read())
if link.endswith(".zip"):
with zipfile.ZipFile(io.BytesIO(ngrok)) as zipped:
for zipinfo in zipped.infolist():
with zipped.open(zipinfo) as binary:
ngrok_file.write(binary.read())
ngrok_file.close()
os.system("chmod +x ngrok") if uname.system != "Windows" else None
def expose_server():
token_cmd = f"./ngrok authtoken {AUTHTOKEN} > /dev/null"
ngrok_cmd = f"./ngrok tcp {PORT} --log=stdout > ngrok.log &"
if uname.system == "Windows":
ngrok_path = os.path.join(os.getcwd(), "ngrok.exe")
token_cmd = f'"{ngrok_path}" authtoken {AUTHTOKEN} > nul'
ngrok_cmd = f'"{ngrok_path}" tcp {PORT} --log=stdout > ngrok.log &'
tries = 5
os.system(token_cmd)
ngrok_thread = threading.Thread(target=os.system, args=[ngrok_cmd])
ngrok_thread.start()
while tries != 0:
try:
url = json.loads(
urllib.request.urlopen("http://localhost:4040/api/tunnels")
.read()
.decode("utf-8")
)["tunnels"][0]["public_url"]
url = url.replace("tcp://", "")
return url.split(":")[0], url.split(":")[1]
except Exception:
time.sleep(1)
tries -= 1
raise Exception("Timeout")
if __name__ == "__main__":
uname = platform.uname()
python_exec = sys.executable
if platform.system() == "Windows":
pass
file_dir = os.path.dirname(__file__)
os.chdir(file_dir if file_dir != '' else '.')
if not os.path.exists("ngrok"):
get_ngrok()
url, port = expose_server()
secret = secrets.token_hex(nbytes=16)
open("secret", "w").write(secret)
tls_secret = "ee" + secret + TLS_DOMAIN.encode().hex()
params = {"server": url, "port": port, "secret": tls_secret}
params_encodeded = urllib.parse.urlencode(params, safe=":")
tls_link = "tg://proxy?{}".format(params_encodeded)
print(tls_link)
if os.getenv("REPL_ID") or os.getenv("RUN_HTTP"):
stay_alive()
os.system(
f"{sys.executable} mtprotoproxy.py > /dev/null 2>&1"
) if uname.system != "Windows" else subprocess.run(f"\"{python_exec}\" \"{file_dir+'/'+'mtprotoproxy.py'}\" > nul 2>&1", shell=True)
|
TCController.py
|
import logging
import os
import threading
import time
from Utility.util import get_current_unix
from TrafficController.BWEstimator import BWEstimator
LOGGING_LEVEL = logging.INFO
handler = logging.StreamHandler()
handler.setLevel(LOGGING_LEVEL)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger(__name__)
logger.setLevel(LOGGING_LEVEL)
logger.addHandler(handler)
class TCController:
"""
Used to control the interaction between the python script and the tc shell scripts
"""
def __init__(self,
policy=None,
network_interface='wlp4s0',
pw=None,
BW_Estimator_Rate=2.0,
logging=True):
self.pw = pw
self.dev_interface = network_interface
self.policy = policy
self.logging_path = 'tc_dataframe_' + policy.name
self.tc_process = None
self.run_calculation = False
self.tc_path = '/sbin/tc'
self.BW_Estimator = BWEstimator(BW_Estimator_Rate=BW_Estimator_Rate, network_interface=network_interface)
self.logging_file = None
self.logging = logging
def start_BW_Estimator(self):
self.BW_Estimator.start()
def stop_BW_Estimator(self):
self.BW_Estimator.stop()
def obtain_BW_estimate(self):
return self.BW_Estimator.obtain_estimate()
def start_throttle_thread(self):
"""
Starts a thread which recalculate every 3seconds the bandwidth
"""
if not self.run_calculation:
self.run_calculation = True
self.init_throttle()
self.throttle_thread = threading.Thread(target=self.throttle_routine)
self.throttle_thread.daemon = True
self.throttle_thread.start()
def stop(self):
logger.info('Stoping TC Thread')
"""
Stops the thread
"""
if self.run_calculation:
self.run_calculation = False
self.throttle_thread.join()
def init_throttle(self):
"""
Init the traffic control
https://serverfault.com/questions/350023/tc-ingress-policing-and-ifb-mirroring
:return:
"""
"""
init_throttle = ['sudo modprobe ifb',
'sudo ip link set dev ifb0 up',
'sudo %s qdisc add dev %s ingress' % (self.tc_path, self.dev_interface),
'sudo %s filter add dev %s parent ffff: protocol ip u32 match u32 0 0 flowid 1:1 action mirred egress redirect dev ifb0' % (
self.tc_path, self.dev_interface),
'sudo %s qdisc add dev ifb0 root tbf rate 1mbit latency 50ms burst 1540' % self.tc_path]
"""
init_throttle = [
'sudo modprobe ifb numifbs=1',
# --------- Add relay
'sudo tc qdisc add dev wlp4s0 handle ffff: ingress',
# ----------- enable the ifb interfaces:
'sudo ifconfig ifb0 up',
# -------- And redirect ingress traffic from the physical interfaces to corresponding ifb interface. For wlp4s0 -> ifb0:
'sudo %s filter add dev %s parent ffff: protocol all u32 match u32 0 0 action mirred egress redirect dev ifb0' % (
self.tc_path, self.dev_interface),
# -------------- Limit Speed
'sudo %s qdisc add dev ifb0 root tbf rate 1mbit latency 50ms burst 1540' % self.tc_path
]
for cmd in init_throttle:
logger.debug('Spawning %s ' % cmd)
# os.popen("sudo -S %s" % (cmd), 'w').write(self.pw)
os.system('echo %s|sudo -S %s' % (self.pw, cmd))
def set_logging_path(self, path_name):
self.logging_path = path_name
def throttle_routine(self):
"""
Contains the routine which continously samples from the policy
:return:
"""
while self.run_calculation:
time_sleep, bw = self.policy.sample()
self.throttle(bandwidth=bw, duration=time_sleep)
self.stop_throttle()
def throttle(self, bandwidth, duration):
"""
:param bandwidth: bandwidth in mbit to which we want to restrict the download speed
:param duration: duration of the limitation
:return:
"""
if self.logging_file is None and self.logging:
self.logging_file = open(self.logging_path, 'w')
throttle_cmd = 'sudo %s qdisc change dev ifb0 root tbf rate %.5fmbit latency 50ms burst 1540'
cmd = throttle_cmd % (self.tc_path, bandwidth)
logger.debug('Spawning %s' % cmd)
# os.popen("sudo -S %s" % (cmd), 'w').write(self.pw)
os.system('echo %s | sudo -S %s' % (self.pw, cmd))
time.sleep(duration)
logging_output = '%.3f\t%.3f\n' % (get_current_unix(), bandwidth)
self.logging_file.write(logging_output)
logger.debug('Writing %s' % logging_output)
def stop_throttle(self):
if self.logging_file is not None:
self.logging_file.close()
self.logging_file = None
cleanup_cmd = ['sudo %s qdisc del dev %s ingress' % (self.tc_path, str(self.dev_interface)),
'sudo %s qdisc del dev ifb0 root' % self.tc_path]
# ------------------------------ Better be save than sorry, we delete all rules imposed by tc
for cmd in cleanup_cmd:
logger.debug(cmd)
#os.popen("sudo -S %s" % (cmd), 'w').write(self.pw)
os.system('echo %s|sudo -S %s' % (self.pw, cmd))
|
Synchronize_3_Robots.py
|
# Type help("robolink") or help("robodk") for more information
# Press F5 to run the script
# Documentation: https://robodk.com/doc/en/RoboDK-API.html
# Reference: https://robodk.com/doc/en/PythonAPI/index.html
#
# This example shows to synchronize multiple robots at the same time
from robodk.robolink import * # API to communicate with RoboDK for offline/online programming
from robodk.robomath import * # Robot toolbox
import threading
import queue
#----------------------------------------------
# Function definitions and global variable declarations
# Global variables used to synchronize the robot movements
# These variables are managed by SyncSet() and SynchWait()
SYNC_COUNT = 0
SYNC_TOTAL = 0
SYNC_ID = 0
lock = threading.Lock()
def SyncSet(total_sync):
"""SyncSet will set the number of total robot programs (threads) that must be synchronized togeter.
Every time SyncSet is called SYNC_ID is increased by one."""
global SYNC_COUNT
global SYNC_TOTAL
global SYNC_ID
with lock:
SYNC_COUNT = 0
SYNC_TOTAL = total_sync
SYNC_ID = SYNC_ID + 1
#print('SyncSet')
def SyncWait():
"""SyncWait will block the robot movements for a robot when necessary, synchronizing the movements sequentially.
Use SyncSet(nrobots) to define how many robots must be synchronized together."""
global SYNC_COUNT
# Save a local variable with the sync event id
sync_id = SYNC_ID
with lock:
# Increase the number of threads that are synchronized
SYNC_COUNT += 1
# Move to the next sync event if all threads reached the SyncWait (SYNC_COUNT = SYNC_TOTAL)
if SYNC_COUNT >= SYNC_TOTAL:
SyncSet(SYNC_TOTAL)
return
# Wait for a SynchSet to move forward
while sync_id >= SYNC_ID:
time.sleep(0.0001)
# Main procedure to move each robot
def DoWeld(q, robotname):
# Any interaction with RoboDK must be done through Robolink()
# Each robot movement requires a new Robolink() object (new link of communication).
# Two robots can't be moved by the same communication link.
rdk = Robolink()
# get the robot item:
robot = rdk.Item(robotname)
# get the home joints target
home = robot.JointsHome()
# get the reference welding target:
target = rdk.Item('Target')
# get the reference frame and set it to the robot
reference = target.Parent()
robot.setPoseFrame(reference)
# get the pose of the target (4x4 matrix):
poseref = target.Pose()
pose_approach = poseref * transl(0, 0, -100)
# move the robot to home, then to the center:
robot.MoveJ(home)
robot.MoveJ(pose_approach)
SyncWait()
robot.MoveL(target)
# make an hexagon around the center:
for i in range(7):
ang = i * 2 * pi / 6 #angle: 0, 60, 120, ...
posei = poseref * rotz(ang) * transl(200, 0, 0) * rotz(-ang)
SyncWait()
robot.MoveL(posei)
# move back to the center, then home:
SyncWait()
robot.MoveL(target)
robot.MoveL(pose_approach)
robot.MoveJ(home)
q.put('Robot %s finished' % robotname)
#----------------------------------------
# Python program start
# retrieve all available robots in the RoboDK station (as a list of names)
RDK = Robolink()
robots = RDK.ItemList(ITEM_TYPE_ROBOT)
print(robots)
# retrieve the number of robots to synchronize together
nrobots = len(robots)
SyncSet(nrobots)
# the queue allows sharing messages between threads
q = queue.Queue()
# Start the DoWeld program for all robots. Each robot will run on a separate thread.
threads = []
for i in range(nrobots):
robotname = robots[i]
t = threading.Thread(target=DoWeld, args=(q, robotname))
t.daemon = True
t.start()
threads.append(t)
# wait for every thead to finish
for x in threads:
x.join()
print(q.get())
print('Main program finished')
|
audio_reader.py
|
import fnmatch
import os
import random
import re
import threading
import librosa
import numpy as np
import tensorflow as tf
FILE_PATTERN = r'p([0-9]+)_([0-9]+)\.wav'
def get_category_cardinality(files):
id_reg_expression = re.compile(FILE_PATTERN)
min_id = None
max_id = None
for filename in files:
matches = id_reg_expression.findall(filename)[0]
id, recording_id = [int(id_) for id_ in matches]
if min_id is None or id < min_id:
min_id = id
if max_id is None or id > max_id:
max_id = id
return min_id, max_id
def randomize_files(files):
for file in files:
file_index = random.randint(0, (len(files) - 1))
yield files[file_index]
def find_files(directory, pattern='*.wav'):
'''Recursively finds all files matching the pattern.'''
files = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, pattern):
files.append(os.path.join(root, filename))
return files
def load_generic_audio(directory, sample_rate):
'''Generator that yields audio waveforms from the directory.'''
files = find_files(directory)
id_reg_exp = re.compile(FILE_PATTERN)
print("files length: {}".format(len(files)))
randomized_files = randomize_files(files)
for filename in randomized_files:
ids = id_reg_exp.findall(filename)
if not ids:
# The file name does not match the pattern containing ids, so
# there is no id.
category_id = None
else:
# The file name matches the pattern for containing ids.
category_id = int(ids[0][0])
audio, _ = librosa.load(filename, sr=sample_rate, mono=True)
audio = audio.reshape(-1, 1)
yield audio, filename, category_id
def trim_silence(audio, threshold, frame_length=2048):
'''Removes silence at the beginning and end of a sample.'''
if audio.size < frame_length:
frame_length = audio.size
energy = librosa.feature.rmse(audio, frame_length=frame_length)
frames = np.nonzero(energy > threshold)
indices = librosa.core.frames_to_samples(frames)[1]
# Note: indices can be an empty array, if the whole audio was silence.
return audio[indices[0]:indices[-1]] if indices.size else audio[0:0]
def not_all_have_id(files):
''' Return true iff any of the filenames does not conform to the pattern
we require for determining the category id.'''
id_reg_exp = re.compile(FILE_PATTERN)
for file in files:
ids = id_reg_exp.findall(file)
if not ids:
return True
return False
class AudioReader(object):
'''Generic background audio reader that preprocesses audio files
and enqueues them into a TensorFlow queue.'''
def __init__(self,
audio_dir,
coord,
sample_rate,
gc_enabled,
receptive_field,
sample_size=None,
silence_threshold=None,
queue_size=32):
self.audio_dir = audio_dir
self.sample_rate = sample_rate
self.coord = coord
self.sample_size = sample_size
self.receptive_field = receptive_field
self.silence_threshold = silence_threshold
self.gc_enabled = gc_enabled
self.threads = []
self.sample_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.queue = tf.PaddingFIFOQueue(queue_size,
['float32'],
shapes=[(None, 1)])
self.enqueue = self.queue.enqueue([self.sample_placeholder])
if self.gc_enabled:
self.id_placeholder = tf.placeholder(dtype=tf.int32, shape=())
self.gc_queue = tf.PaddingFIFOQueue(queue_size, ['int32'],
shapes=[()])
self.gc_enqueue = self.gc_queue.enqueue([self.id_placeholder])
# TODO Find a better way to check this.
# Checking inside the AudioReader's thread makes it hard to terminate
# the execution of the script, so we do it in the constructor for now.
files = find_files(audio_dir)
if not files:
raise ValueError("No audio files found in '{}'.".format(audio_dir))
if self.gc_enabled and not_all_have_id(files):
raise ValueError("Global conditioning is enabled, but file names "
"do not conform to pattern having id.")
# Determine the number of mutually-exclusive categories we will
# accomodate in our embedding table.
if self.gc_enabled:
_, self.gc_category_cardinality = get_category_cardinality(files)
# Add one to the largest index to get the number of categories,
# since tf.nn.embedding_lookup expects zero-indexing. This
# means one or more at the bottom correspond to unused entries
# in the embedding lookup table. But that's a small waste of memory
# to keep the code simpler, and preserves correspondance between
# the id one specifies when generating, and the ids in the
# file names.
self.gc_category_cardinality += 1
print("Detected --gc_cardinality={}".format(
self.gc_category_cardinality))
else:
self.gc_category_cardinality = None
def dequeue(self, num_elements):
output = self.queue.dequeue_many(num_elements)
return output
def dequeue_gc(self, num_elements):
return self.gc_queue.dequeue_many(num_elements)
def thread_main(self, sess):
stop = False
# Go through the dataset multiple times
while not stop:
iterator = load_generic_audio(self.audio_dir, self.sample_rate)
for audio, filename, category_id in iterator:
if self.coord.should_stop():
stop = True
break
if self.silence_threshold is not None:
# Remove silence
audio = trim_silence(audio[:, 0], self.silence_threshold)
audio = audio.reshape(-1, 1)
if audio.size == 0:
print("Warning: {} was ignored as it contains only "
"silence. Consider decreasing trim_silence "
"threshold, or adjust volume of the audio."
.format(filename))
audio = np.pad(audio, [[self.receptive_field, 0], [0, 0]],
'constant')
if self.sample_size:
# Cut samples into pieces of size receptive_field +
# sample_size with receptive_field overlap
while len(audio) > self.receptive_field:
piece = audio[:(self.receptive_field +
self.sample_size), :]
sess.run(self.enqueue,
feed_dict={self.sample_placeholder: piece})
audio = audio[self.sample_size:, :]
if self.gc_enabled:
sess.run(self.gc_enqueue, feed_dict={
self.id_placeholder: category_id})
else:
sess.run(self.enqueue,
feed_dict={self.sample_placeholder: audio})
if self.gc_enabled:
sess.run(self.gc_enqueue,
feed_dict={self.id_placeholder: category_id})
def start_threads(self, sess, n_threads=1):
for _ in range(n_threads):
thread = threading.Thread(target=self.thread_main, args=(sess,))
thread.daemon = True # Thread will close when parent quits.
thread.start()
self.threads.append(thread)
return self.threads
|
framework.py
|
#!/usr/bin/env python
from __future__ import print_function
import gc
import sys
import os
import select
import unittest
import tempfile
import time
import resource
import faulthandler
import random
from collections import deque
from threading import Thread, Event
from inspect import getdoc, isclass
from traceback import format_exception
from logging import FileHandler, DEBUG, Formatter
from scapy.packet import Raw
from hook import StepHook, PollHook
from vpp_pg_interface import VppPGInterface
from vpp_sub_interface import VppSubInterface
from vpp_lo_interface import VppLoInterface
from vpp_papi_provider import VppPapiProvider
from log import *
from vpp_object import VppObjectRegistry
if os.name == 'posix' and sys.version_info[0] < 3:
# using subprocess32 is recommended by python official documentation
# @ https://docs.python.org/2/library/subprocess.html
import subprocess32 as subprocess
else:
import subprocess
"""
Test framework module.
The module provides a set of tools for constructing and running tests and
representing the results.
"""
class _PacketInfo(object):
"""Private class to create packet info object.
Help process information about the next packet.
Set variables to default values.
"""
#: Store the index of the packet.
index = -1
#: Store the index of the source packet generator interface of the packet.
src = -1
#: Store the index of the destination packet generator interface
#: of the packet.
dst = -1
#: Store expected ip version
ip = -1
#: Store expected upper protocol
proto = -1
#: Store the copy of the former packet.
data = None
def __eq__(self, other):
index = self.index == other.index
src = self.src == other.src
dst = self.dst == other.dst
data = self.data == other.data
return index and src and dst and data
def pump_output(testclass):
""" pump output from vpp stdout/stderr to proper queues """
stdout_fragment = ""
stderr_fragment = ""
while not testclass.pump_thread_stop_flag.wait(0):
readable = select.select([testclass.vpp.stdout.fileno(),
testclass.vpp.stderr.fileno(),
testclass.pump_thread_wakeup_pipe[0]],
[], [])[0]
if testclass.vpp.stdout.fileno() in readable:
read = os.read(testclass.vpp.stdout.fileno(), 102400)
if len(read) > 0:
split = read.splitlines(True)
if len(stdout_fragment) > 0:
split[0] = "%s%s" % (stdout_fragment, split[0])
if len(split) > 0 and split[-1].endswith("\n"):
limit = None
else:
limit = -1
stdout_fragment = split[-1]
testclass.vpp_stdout_deque.extend(split[:limit])
if not testclass.cache_vpp_output:
for line in split[:limit]:
testclass.logger.debug(
"VPP STDOUT: %s" % line.rstrip("\n"))
if testclass.vpp.stderr.fileno() in readable:
read = os.read(testclass.vpp.stderr.fileno(), 102400)
if len(read) > 0:
split = read.splitlines(True)
if len(stderr_fragment) > 0:
split[0] = "%s%s" % (stderr_fragment, split[0])
if len(split) > 0 and split[-1].endswith("\n"):
limit = None
else:
limit = -1
stderr_fragment = split[-1]
testclass.vpp_stderr_deque.extend(split[:limit])
if not testclass.cache_vpp_output:
for line in split[:limit]:
testclass.logger.debug(
"VPP STDERR: %s" % line.rstrip("\n"))
# ignoring the dummy pipe here intentionally - the flag will take care
# of properly terminating the loop
def running_extended_tests():
try:
s = os.getenv("EXTENDED_TESTS")
return True if s.lower() in ("y", "yes", "1") else False
except:
return False
return False
def running_on_centos():
try:
os_id = os.getenv("OS_ID")
return True if "centos" in os_id.lower() else False
except:
return False
return False
class KeepAliveReporter(object):
"""
Singleton object which reports test start to parent process
"""
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
@property
def pipe(self):
return self._pipe
@pipe.setter
def pipe(self, pipe):
if hasattr(self, '_pipe'):
raise Exception("Internal error - pipe should only be set once.")
self._pipe = pipe
def send_keep_alive(self, test):
"""
Write current test tmpdir & desc to keep-alive pipe to signal liveness
"""
if self.pipe is None:
# if not running forked..
return
if isclass(test):
desc = test.__name__
else:
desc = test.shortDescription()
if not desc:
desc = str(test)
self.pipe.send((desc, test.vpp_bin, test.tempdir, test.vpp.pid))
class VppTestCase(unittest.TestCase):
"""This subclass is a base class for VPP test cases that are implemented as
classes. It provides methods to create and run test case.
"""
@property
def packet_infos(self):
"""List of packet infos"""
return self._packet_infos
@classmethod
def get_packet_count_for_if_idx(cls, dst_if_index):
"""Get the number of packet info for specified destination if index"""
if dst_if_index in cls._packet_count_for_dst_if_idx:
return cls._packet_count_for_dst_if_idx[dst_if_index]
else:
return 0
@classmethod
def instance(cls):
"""Return the instance of this testcase"""
return cls.test_instance
@classmethod
def set_debug_flags(cls, d):
cls.debug_core = False
cls.debug_gdb = False
cls.debug_gdbserver = False
if d is None:
return
dl = d.lower()
if dl == "core":
cls.debug_core = True
elif dl == "gdb":
cls.debug_gdb = True
elif dl == "gdbserver":
cls.debug_gdbserver = True
else:
raise Exception("Unrecognized DEBUG option: '%s'" % d)
@classmethod
def setUpConstants(cls):
""" Set-up the test case class based on environment variables """
try:
s = os.getenv("STEP")
cls.step = True if s.lower() in ("y", "yes", "1") else False
except:
cls.step = False
try:
d = os.getenv("DEBUG")
except:
d = None
try:
c = os.getenv("CACHE_OUTPUT", "1")
cls.cache_vpp_output = \
False if c.lower() in ("n", "no", "0") else True
except:
cls.cache_vpp_output = True
cls.set_debug_flags(d)
cls.vpp_bin = os.getenv('VPP_TEST_BIN', "vpp")
cls.plugin_path = os.getenv('VPP_TEST_PLUGIN_PATH')
cls.extern_plugin_path = os.getenv('EXTERN_PLUGINS')
plugin_path = None
if cls.plugin_path is not None:
if cls.extern_plugin_path is not None:
plugin_path = "%s:%s" % (
cls.plugin_path, cls.extern_plugin_path)
else:
plugin_path = cls.plugin_path
elif cls.extern_plugin_path is not None:
plugin_path = cls.extern_plugin_path
debug_cli = ""
if cls.step or cls.debug_gdb or cls.debug_gdbserver:
debug_cli = "cli-listen localhost:5002"
coredump_size = None
try:
size = os.getenv("COREDUMP_SIZE")
if size is not None:
coredump_size = "coredump-size %s" % size
except:
pass
if coredump_size is None:
coredump_size = "coredump-size unlimited"
cls.vpp_cmdline = [cls.vpp_bin, "unix",
"{", "nodaemon", debug_cli, "full-coredump",
coredump_size, "}", "api-trace", "{", "on", "}",
"api-segment", "{", "prefix", cls.shm_prefix, "}",
"plugins", "{", "plugin", "dpdk_plugin.so", "{",
"disable", "}", "}"]
if plugin_path is not None:
cls.vpp_cmdline.extend(["plugin_path", plugin_path])
cls.logger.info("vpp_cmdline: %s" % cls.vpp_cmdline)
@classmethod
def wait_for_enter(cls):
if cls.debug_gdbserver:
print(double_line_delim)
print("Spawned GDB server with PID: %d" % cls.vpp.pid)
elif cls.debug_gdb:
print(double_line_delim)
print("Spawned VPP with PID: %d" % cls.vpp.pid)
else:
cls.logger.debug("Spawned VPP with PID: %d" % cls.vpp.pid)
return
print(single_line_delim)
print("You can debug the VPP using e.g.:")
if cls.debug_gdbserver:
print("gdb " + cls.vpp_bin + " -ex 'target remote localhost:7777'")
print("Now is the time to attach a gdb by running the above "
"command, set up breakpoints etc. and then resume VPP from "
"within gdb by issuing the 'continue' command")
elif cls.debug_gdb:
print("gdb " + cls.vpp_bin + " -ex 'attach %s'" % cls.vpp.pid)
print("Now is the time to attach a gdb by running the above "
"command and set up breakpoints etc.")
print(single_line_delim)
raw_input("Press ENTER to continue running the testcase...")
@classmethod
def run_vpp(cls):
cmdline = cls.vpp_cmdline
if cls.debug_gdbserver:
gdbserver = '/usr/bin/gdbserver'
if not os.path.isfile(gdbserver) or \
not os.access(gdbserver, os.X_OK):
raise Exception("gdbserver binary '%s' does not exist or is "
"not executable" % gdbserver)
cmdline = [gdbserver, 'localhost:7777'] + cls.vpp_cmdline
cls.logger.info("Gdbserver cmdline is %s", " ".join(cmdline))
try:
cls.vpp = subprocess.Popen(cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1)
except Exception as e:
cls.logger.critical("Couldn't start vpp: %s" % e)
raise
cls.wait_for_enter()
@classmethod
def setUpClass(cls):
"""
Perform class setup before running the testcase
Remove shared memory files, start vpp and connect the vpp-api
"""
gc.collect() # run garbage collection first
random.seed()
cls.logger = getLogger(cls.__name__)
cls.tempdir = tempfile.mkdtemp(
prefix='vpp-unittest-%s-' % cls.__name__)
cls.file_handler = FileHandler("%s/log.txt" % cls.tempdir)
cls.file_handler.setFormatter(
Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s',
datefmt="%H:%M:%S"))
cls.file_handler.setLevel(DEBUG)
cls.logger.addHandler(cls.file_handler)
cls.shm_prefix = cls.tempdir.split("/")[-1]
os.chdir(cls.tempdir)
cls.logger.info("Temporary dir is %s, shm prefix is %s",
cls.tempdir, cls.shm_prefix)
cls.setUpConstants()
cls.reset_packet_infos()
cls._captures = []
cls._zombie_captures = []
cls.verbose = 0
cls.vpp_dead = False
cls.registry = VppObjectRegistry()
cls.vpp_startup_failed = False
cls.reporter = KeepAliveReporter()
# need to catch exceptions here because if we raise, then the cleanup
# doesn't get called and we might end with a zombie vpp
try:
cls.run_vpp()
cls.reporter.send_keep_alive(cls)
cls.vpp_stdout_deque = deque()
cls.vpp_stderr_deque = deque()
cls.pump_thread_stop_flag = Event()
cls.pump_thread_wakeup_pipe = os.pipe()
cls.pump_thread = Thread(target=pump_output, args=(cls,))
cls.pump_thread.daemon = True
cls.pump_thread.start()
cls.vapi = VppPapiProvider(cls.shm_prefix, cls.shm_prefix, cls)
if cls.step:
hook = StepHook(cls)
else:
hook = PollHook(cls)
cls.vapi.register_hook(hook)
cls.sleep(0.1, "after vpp startup, before initial poll")
try:
hook.poll_vpp()
except:
cls.vpp_startup_failed = True
cls.logger.critical(
"VPP died shortly after startup, check the"
" output to standard error for possible cause")
raise
try:
cls.vapi.connect()
except:
if cls.debug_gdbserver:
print(colorize("You're running VPP inside gdbserver but "
"VPP-API connection failed, did you forget "
"to 'continue' VPP from within gdb?", RED))
raise
except:
t, v, tb = sys.exc_info()
try:
cls.quit()
except:
pass
raise t, v, tb
@classmethod
def quit(cls):
"""
Disconnect vpp-api, kill vpp and cleanup shared memory files
"""
if (cls.debug_gdbserver or cls.debug_gdb) and hasattr(cls, 'vpp'):
cls.vpp.poll()
if cls.vpp.returncode is None:
print(double_line_delim)
print("VPP or GDB server is still running")
print(single_line_delim)
raw_input("When done debugging, press ENTER to kill the "
"process and finish running the testcase...")
os.write(cls.pump_thread_wakeup_pipe[1], 'ding dong wake up')
cls.pump_thread_stop_flag.set()
if hasattr(cls, 'pump_thread'):
cls.logger.debug("Waiting for pump thread to stop")
cls.pump_thread.join()
if hasattr(cls, 'vpp_stderr_reader_thread'):
cls.logger.debug("Waiting for stdderr pump to stop")
cls.vpp_stderr_reader_thread.join()
if hasattr(cls, 'vpp'):
if hasattr(cls, 'vapi'):
cls.vapi.disconnect()
del cls.vapi
cls.vpp.poll()
if cls.vpp.returncode is None:
cls.logger.debug("Sending TERM to vpp")
cls.vpp.terminate()
cls.logger.debug("Waiting for vpp to die")
cls.vpp.communicate()
del cls.vpp
if cls.vpp_startup_failed:
stdout_log = cls.logger.info
stderr_log = cls.logger.critical
else:
stdout_log = cls.logger.info
stderr_log = cls.logger.info
if hasattr(cls, 'vpp_stdout_deque'):
stdout_log(single_line_delim)
stdout_log('VPP output to stdout while running %s:', cls.__name__)
stdout_log(single_line_delim)
vpp_output = "".join(cls.vpp_stdout_deque)
with open(cls.tempdir + '/vpp_stdout.txt', 'w') as f:
f.write(vpp_output)
stdout_log('\n%s', vpp_output)
stdout_log(single_line_delim)
if hasattr(cls, 'vpp_stderr_deque'):
stderr_log(single_line_delim)
stderr_log('VPP output to stderr while running %s:', cls.__name__)
stderr_log(single_line_delim)
vpp_output = "".join(cls.vpp_stderr_deque)
with open(cls.tempdir + '/vpp_stderr.txt', 'w') as f:
f.write(vpp_output)
stderr_log('\n%s', vpp_output)
stderr_log(single_line_delim)
@classmethod
def tearDownClass(cls):
""" Perform final cleanup after running all tests in this test-case """
cls.quit()
cls.file_handler.close()
def tearDown(self):
""" Show various debug prints after each test """
self.logger.debug("--- tearDown() for %s.%s(%s) called ---" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
if not self.vpp_dead:
self.logger.debug(self.vapi.cli("show trace"))
self.logger.info(self.vapi.ppcli("show interface"))
self.logger.info(self.vapi.ppcli("show hardware"))
self.logger.info(self.vapi.ppcli("show error"))
self.logger.info(self.vapi.ppcli("show run"))
self.registry.remove_vpp_config(self.logger)
# Save/Dump VPP api trace log
api_trace = "vpp_api_trace.%s.log" % self._testMethodName
tmp_api_trace = "/tmp/%s" % api_trace
vpp_api_trace_log = "%s/%s" % (self.tempdir, api_trace)
self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace))
self.logger.info("Moving %s to %s\n" % (tmp_api_trace,
vpp_api_trace_log))
os.rename(tmp_api_trace, vpp_api_trace_log)
self.logger.info(self.vapi.ppcli("api trace dump %s" %
vpp_api_trace_log))
else:
self.registry.unregister_all(self.logger)
def setUp(self):
""" Clear trace before running each test"""
self.reporter.send_keep_alive(self)
self.logger.debug("--- setUp() for %s.%s(%s) called ---" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
if self.vpp_dead:
raise Exception("VPP is dead when setting up the test")
self.sleep(.1, "during setUp")
self.vpp_stdout_deque.append(
"--- test setUp() for %s.%s(%s) starts here ---\n" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
self.vpp_stderr_deque.append(
"--- test setUp() for %s.%s(%s) starts here ---\n" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
self.vapi.cli("clear trace")
# store the test instance inside the test class - so that objects
# holding the class can access instance methods (like assertEqual)
type(self).test_instance = self
@classmethod
def pg_enable_capture(cls, interfaces):
"""
Enable capture on packet-generator interfaces
:param interfaces: iterable interface indexes
"""
for i in interfaces:
i.enable_capture()
@classmethod
def register_capture(cls, cap_name):
""" Register a capture in the testclass """
# add to the list of captures with current timestamp
cls._captures.append((time.time(), cap_name))
# filter out from zombies
cls._zombie_captures = [(stamp, name)
for (stamp, name) in cls._zombie_captures
if name != cap_name]
@classmethod
def pg_start(cls):
""" Remove any zombie captures and enable the packet generator """
# how long before capture is allowed to be deleted - otherwise vpp
# crashes - 100ms seems enough (this shouldn't be needed at all)
capture_ttl = 0.1
now = time.time()
for stamp, cap_name in cls._zombie_captures:
wait = stamp + capture_ttl - now
if wait > 0:
cls.sleep(wait, "before deleting capture %s" % cap_name)
now = time.time()
cls.logger.debug("Removing zombie capture %s" % cap_name)
cls.vapi.cli('packet-generator delete %s' % cap_name)
cls.vapi.cli("trace add pg-input 50") # 50 is maximum
cls.vapi.cli('packet-generator enable')
cls._zombie_captures = cls._captures
cls._captures = []
@classmethod
def create_pg_interfaces(cls, interfaces):
"""
Create packet-generator interfaces.
:param interfaces: iterable indexes of the interfaces.
:returns: List of created interfaces.
"""
result = []
for i in interfaces:
intf = VppPGInterface(cls, i)
setattr(cls, intf.name, intf)
result.append(intf)
cls.pg_interfaces = result
return result
@classmethod
def create_loopback_interfaces(cls, interfaces):
"""
Create loopback interfaces.
:param interfaces: iterable indexes of the interfaces.
:returns: List of created interfaces.
"""
result = []
for i in interfaces:
intf = VppLoInterface(cls, i)
setattr(cls, intf.name, intf)
result.append(intf)
cls.lo_interfaces = result
return result
@staticmethod
def extend_packet(packet, size):
"""
Extend packet to given size by padding with spaces
NOTE: Currently works only when Raw layer is present.
:param packet: packet
:param size: target size
"""
packet_len = len(packet) + 4
extend = size - packet_len
if extend > 0:
packet[Raw].load += ' ' * extend
@classmethod
def reset_packet_infos(cls):
""" Reset the list of packet info objects and packet counts to zero """
cls._packet_infos = {}
cls._packet_count_for_dst_if_idx = {}
@classmethod
def create_packet_info(cls, src_if, dst_if):
"""
Create packet info object containing the source and destination indexes
and add it to the testcase's packet info list
:param VppInterface src_if: source interface
:param VppInterface dst_if: destination interface
:returns: _PacketInfo object
"""
info = _PacketInfo()
info.index = len(cls._packet_infos)
info.src = src_if.sw_if_index
info.dst = dst_if.sw_if_index
if isinstance(dst_if, VppSubInterface):
dst_idx = dst_if.parent.sw_if_index
else:
dst_idx = dst_if.sw_if_index
if dst_idx in cls._packet_count_for_dst_if_idx:
cls._packet_count_for_dst_if_idx[dst_idx] += 1
else:
cls._packet_count_for_dst_if_idx[dst_idx] = 1
cls._packet_infos[info.index] = info
return info
@staticmethod
def info_to_payload(info):
"""
Convert _PacketInfo object to packet payload
:param info: _PacketInfo object
:returns: string containing serialized data from packet info
"""
return "%d %d %d %d %d" % (info.index, info.src, info.dst,
info.ip, info.proto)
@staticmethod
def payload_to_info(payload):
"""
Convert packet payload to _PacketInfo object
:param payload: packet payload
:returns: _PacketInfo object containing de-serialized data from payload
"""
numbers = payload.split()
info = _PacketInfo()
info.index = int(numbers[0])
info.src = int(numbers[1])
info.dst = int(numbers[2])
info.ip = int(numbers[3])
info.proto = int(numbers[4])
return info
def get_next_packet_info(self, info):
"""
Iterate over the packet info list stored in the testcase
Start iteration with first element if info is None
Continue based on index in info if info is specified
:param info: info or None
:returns: next info in list or None if no more infos
"""
if info is None:
next_index = 0
else:
next_index = info.index + 1
if next_index == len(self._packet_infos):
return None
else:
return self._packet_infos[next_index]
def get_next_packet_info_for_interface(self, src_index, info):
"""
Search the packet info list for the next packet info with same source
interface index
:param src_index: source interface index to search for
:param info: packet info - where to start the search
:returns: packet info or None
"""
while True:
info = self.get_next_packet_info(info)
if info is None:
return None
if info.src == src_index:
return info
def get_next_packet_info_for_interface2(self, src_index, dst_index, info):
"""
Search the packet info list for the next packet info with same source
and destination interface indexes
:param src_index: source interface index to search for
:param dst_index: destination interface index to search for
:param info: packet info - where to start the search
:returns: packet info or None
"""
while True:
info = self.get_next_packet_info_for_interface(src_index, info)
if info is None:
return None
if info.dst == dst_index:
return info
def assert_equal(self, real_value, expected_value, name_or_class=None):
if name_or_class is None:
self.assertEqual(real_value, expected_value)
return
try:
msg = "Invalid %s: %d('%s') does not match expected value %d('%s')"
msg = msg % (getdoc(name_or_class).strip(),
real_value, str(name_or_class(real_value)),
expected_value, str(name_or_class(expected_value)))
except:
msg = "Invalid %s: %s does not match expected value %s" % (
name_or_class, real_value, expected_value)
self.assertEqual(real_value, expected_value, msg)
def assert_in_range(self,
real_value,
expected_min,
expected_max,
name=None):
if name is None:
msg = None
else:
msg = "Invalid %s: %s out of range <%s,%s>" % (
name, real_value, expected_min, expected_max)
self.assertTrue(expected_min <= real_value <= expected_max, msg)
@classmethod
def sleep(cls, timeout, remark=None):
if hasattr(cls, 'logger'):
cls.logger.debug("Starting sleep for %ss (%s)" % (timeout, remark))
before = time.time()
time.sleep(timeout)
after = time.time()
if after - before > 2 * timeout:
cls.logger.error("unexpected time.sleep() result - "
"slept for %ss instead of ~%ss!" % (
after - before, timeout))
if hasattr(cls, 'logger'):
cls.logger.debug(
"Finished sleep (%s) - slept %ss (wanted %ss)" % (
remark, after - before, timeout))
class TestCasePrinter(object):
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
if not hasattr(self, "_test_case_set"):
self._test_case_set = set()
def print_test_case_heading_if_first_time(self, case):
if case.__class__ not in self._test_case_set:
print(double_line_delim)
print(colorize(getdoc(case.__class__).splitlines()[0], YELLOW))
print(double_line_delim)
self._test_case_set.add(case.__class__)
class VppTestResult(unittest.TestResult):
"""
@property result_string
String variable to store the test case result string.
@property errors
List variable containing 2-tuples of TestCase instances and strings
holding formatted tracebacks. Each tuple represents a test which
raised an unexpected exception.
@property failures
List variable containing 2-tuples of TestCase instances and strings
holding formatted tracebacks. Each tuple represents a test where
a failure was explicitly signalled using the TestCase.assert*()
methods.
"""
def __init__(self, stream, descriptions, verbosity):
"""
:param stream File descriptor to store where to report test results.
Set to the standard error stream by default.
:param descriptions Boolean variable to store information if to use
test case descriptions.
:param verbosity Integer variable to store required verbosity level.
"""
unittest.TestResult.__init__(self, stream, descriptions, verbosity)
self.stream = stream
self.descriptions = descriptions
self.verbosity = verbosity
self.result_string = None
self.printer = TestCasePrinter()
def addSuccess(self, test):
"""
Record a test succeeded result
:param test:
"""
if hasattr(test, 'logger'):
test.logger.debug("--- addSuccess() %s.%s(%s) called"
% (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc))
unittest.TestResult.addSuccess(self, test)
self.result_string = colorize("OK", GREEN)
def addSkip(self, test, reason):
"""
Record a test skipped.
:param test:
:param reason:
"""
if hasattr(test, 'logger'):
test.logger.debug("--- addSkip() %s.%s(%s) called, reason is %s"
% (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc,
reason))
unittest.TestResult.addSkip(self, test, reason)
self.result_string = colorize("SKIP", YELLOW)
def symlink_failed(self, test):
logger = None
if hasattr(test, 'logger'):
logger = test.logger
if hasattr(test, 'tempdir'):
try:
failed_dir = os.getenv('VPP_TEST_FAILED_DIR')
link_path = '%s/%s-FAILED' % (failed_dir,
test.tempdir.split("/")[-1])
if logger:
logger.debug("creating a link to the failed test")
logger.debug("os.symlink(%s, %s)" %
(test.tempdir, link_path))
os.symlink(test.tempdir, link_path)
except Exception as e:
if logger:
logger.error(e)
def send_failure_through_pipe(self, test):
if hasattr(self, 'test_framework_failed_pipe'):
pipe = self.test_framework_failed_pipe
if pipe:
pipe.send(test.__class__)
def addFailure(self, test, err):
"""
Record a test failed result
:param test:
:param err: error message
"""
if hasattr(test, 'logger'):
test.logger.debug("--- addFailure() %s.%s(%s) called, err is %s"
% (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc, err))
test.logger.debug("formatted exception is:\n%s" %
"".join(format_exception(*err)))
unittest.TestResult.addFailure(self, test, err)
if hasattr(test, 'tempdir'):
self.result_string = colorize("FAIL", RED) + \
' [ temp dir used by test case: ' + test.tempdir + ' ]'
self.symlink_failed(test)
else:
self.result_string = colorize("FAIL", RED) + ' [no temp dir]'
self.send_failure_through_pipe(test)
def addError(self, test, err):
"""
Record a test error result
:param test:
:param err: error message
"""
if hasattr(test, 'logger'):
test.logger.debug("--- addError() %s.%s(%s) called, err is %s"
% (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc, err))
test.logger.debug("formatted exception is:\n%s" %
"".join(format_exception(*err)))
unittest.TestResult.addError(self, test, err)
if hasattr(test, 'tempdir'):
self.result_string = colorize("ERROR", RED) + \
' [ temp dir used by test case: ' + test.tempdir + ' ]'
self.symlink_failed(test)
else:
self.result_string = colorize("ERROR", RED) + ' [no temp dir]'
self.send_failure_through_pipe(test)
def getDescription(self, test):
"""
Get test description
:param test:
:returns: test description
"""
# TODO: if none print warning not raise exception
short_description = test.shortDescription()
if self.descriptions and short_description:
return short_description
else:
return str(test)
def startTest(self, test):
"""
Start a test
:param test:
"""
self.printer.print_test_case_heading_if_first_time(test)
unittest.TestResult.startTest(self, test)
if self.verbosity > 0:
self.stream.writeln(
"Starting " + self.getDescription(test) + " ...")
self.stream.writeln(single_line_delim)
def stopTest(self, test):
"""
Stop a test
:param test:
"""
unittest.TestResult.stopTest(self, test)
if self.verbosity > 0:
self.stream.writeln(single_line_delim)
self.stream.writeln("%-73s%s" % (self.getDescription(test),
self.result_string))
self.stream.writeln(single_line_delim)
else:
self.stream.writeln("%-73s%s" % (self.getDescription(test),
self.result_string))
def printErrors(self):
"""
Print errors from running the test case
"""
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
"""
Print error list to the output stream together with error type
and test case description.
:param flavour: error type
:param errors: iterable errors
"""
for test, err in errors:
self.stream.writeln(double_line_delim)
self.stream.writeln("%s: %s" %
(flavour, self.getDescription(test)))
self.stream.writeln(single_line_delim)
self.stream.writeln("%s" % err)
class Filter_by_test_option:
def __init__(self, filter_file_name, filter_class_name, filter_func_name):
self.filter_file_name = filter_file_name
self.filter_class_name = filter_class_name
self.filter_func_name = filter_func_name
def __call__(self, file_name, class_name, func_name):
if self.filter_file_name and file_name != self.filter_file_name:
return False
if self.filter_class_name and class_name != self.filter_class_name:
return False
if self.filter_func_name and func_name != self.filter_func_name:
return False
return True
class VppTestRunner(unittest.TextTestRunner):
"""
A basic test runner implementation which prints results to standard error.
"""
@property
def resultclass(self):
"""Class maintaining the results of the tests"""
return VppTestResult
def __init__(self, keep_alive_pipe=None, failed_pipe=None,
stream=sys.stderr, descriptions=True,
verbosity=1, failfast=False, buffer=False, resultclass=None):
# ignore stream setting here, use hard-coded stdout to be in sync
# with prints from VppTestCase methods ...
super(VppTestRunner, self).__init__(sys.stdout, descriptions,
verbosity, failfast, buffer,
resultclass)
reporter = KeepAliveReporter()
reporter.pipe = keep_alive_pipe
# this is super-ugly, but very simple to implement and works as long
# as we run only one test at the same time
VppTestResult.test_framework_failed_pipe = failed_pipe
test_option = "TEST"
def parse_test_option(self):
try:
f = os.getenv(self.test_option)
except:
f = None
filter_file_name = None
filter_class_name = None
filter_func_name = None
if f:
if '.' in f:
parts = f.split('.')
if len(parts) > 3:
raise Exception("Unrecognized %s option: %s" %
(self.test_option, f))
if len(parts) > 2:
if parts[2] not in ('*', ''):
filter_func_name = parts[2]
if parts[1] not in ('*', ''):
filter_class_name = parts[1]
if parts[0] not in ('*', ''):
if parts[0].startswith('test_'):
filter_file_name = parts[0]
else:
filter_file_name = 'test_%s' % parts[0]
else:
if f.startswith('test_'):
filter_file_name = f
else:
filter_file_name = 'test_%s' % f
return filter_file_name, filter_class_name, filter_func_name
@staticmethod
def filter_tests(tests, filter_cb):
result = unittest.suite.TestSuite()
for t in tests:
if isinstance(t, unittest.suite.TestSuite):
# this is a bunch of tests, recursively filter...
x = filter_tests(t, filter_cb)
if x.countTestCases() > 0:
result.addTest(x)
elif isinstance(t, unittest.TestCase):
# this is a single test
parts = t.id().split('.')
# t.id() for common cases like this:
# test_classifier.TestClassifier.test_acl_ip
# apply filtering only if it is so
if len(parts) == 3:
if not filter_cb(parts[0], parts[1], parts[2]):
continue
result.addTest(t)
else:
# unexpected object, don't touch it
result.addTest(t)
return result
def run(self, test):
"""
Run the tests
:param test:
"""
faulthandler.enable() # emit stack trace to stderr if killed by signal
print("Running tests using custom test runner") # debug message
filter_file, filter_class, filter_func = self.parse_test_option()
print("Active filters: file=%s, class=%s, function=%s" % (
filter_file, filter_class, filter_func))
filter_cb = Filter_by_test_option(
filter_file, filter_class, filter_func)
filtered = self.filter_tests(test, filter_cb)
print("%s out of %s tests match specified filters" % (
filtered.countTestCases(), test.countTestCases()))
if not running_extended_tests():
print("Not running extended tests (some tests will be skipped)")
return super(VppTestRunner, self).run(filtered)
class Worker(Thread):
def __init__(self, args, logger):
self.logger = logger
self.args = args
self.result = None
super(Worker, self).__init__()
def run(self):
executable = self.args[0]
self.logger.debug("Running executable w/args `%s'" % self.args)
env = os.environ.copy()
env["CK_LOG_FILE_NAME"] = "-"
self.process = subprocess.Popen(
self.args, shell=False, env=env, preexec_fn=os.setpgrp,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = self.process.communicate()
self.logger.debug("Finished running `%s'" % executable)
self.logger.info("Return code is `%s'" % self.process.returncode)
self.logger.info(single_line_delim)
self.logger.info("Executable `%s' wrote to stdout:" % executable)
self.logger.info(single_line_delim)
self.logger.info(out)
self.logger.info(single_line_delim)
self.logger.info("Executable `%s' wrote to stderr:" % executable)
self.logger.info(single_line_delim)
self.logger.error(err)
self.logger.info(single_line_delim)
self.result = self.process.returncode
|
tests.py
|
# -*- coding: utf-8; -*-
#
# Licensed to CRATE Technology GmbH ("Crate") under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. Crate licenses
# this file to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# However, if you have executed another commercial license agreement
# with Crate these terms will supersede the license and you may use the
# software solely pursuant to the terms of the relevant commercial agreement.
from __future__ import absolute_import
import json
import os
import unittest
import doctest
import re
from pprint import pprint
from datetime import datetime, date
from six.moves.BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import ssl
import time
import threading
from .compat import to_bytes
from zope.testing.renormalizing import RENormalizing
from crate.testing.layer import CrateLayer
from crate.testing.tests import crate_path, docs_path
from crate.client import connect
from crate.client.sqlalchemy.dialect import CrateDialect
from . import http
from .test_cursor import CursorTest
from .test_connection import ConnectionTest
from .test_http import (
HttpClientTest,
ThreadSafeHttpClientTest,
KeepAliveClientTest,
ParamsTest,
RetryOnTimeoutServerTest)
from .sqlalchemy.tests import test_suite as sqlalchemy_test_suite
from .sqlalchemy.types import ObjectArray
from .compat import cprint
class ClientMocked(object):
active_servers = ["http://localhost:4200"]
def __init__(self):
self.response = {}
self._server_infos = ("http://localhost:4200", "my server", "0.42.0")
def sql(self, stmt=None, parameters=None, bulk_parameters=None):
return self.response
def server_infos(self, server):
return self._server_infos
def set_next_response(self, response):
self.response = response
def set_next_server_infos(self, server, server_name, version):
self._server_infos = (server, server_name, version)
def setUpMocked(test):
test.globs['connection_client_mocked'] = ClientMocked()
crate_port = 44209
crate_transport_port = 44309
crate_layer = CrateLayer('crate',
crate_home=crate_path(),
port=crate_port,
transport_port=crate_transport_port)
crate_host = "127.0.0.1:{port}".format(port=crate_port)
crate_uri = "http://%s" % crate_host
def setUpWithCrateLayer(test):
test.globs['HttpClient'] = http.Client
test.globs['crate_host'] = crate_host
test.globs['pprint'] = pprint
test.globs['print'] = cprint
conn = connect(crate_host)
cursor = conn.cursor()
def refresh(table):
cursor.execute("refresh table %s" % table)
test.globs["refresh"] = refresh
with open(docs_path('testing/testdata/mappings/locations.sql')) as s:
stmt = s.read()
cursor.execute(stmt)
stmt = ("select count(*) from information_schema.tables "
"where table_name = 'locations'")
cursor.execute(stmt)
assert cursor.fetchall()[0][0] == 1
data_path = docs_path('testing/testdata/data/test_a.json')
# load testing data into crate
cursor.execute("copy locations from ?", (data_path,))
# refresh location table so imported data is visible immediately
refresh("locations")
# create blob table
cursor.execute("create blob table myfiles clustered into 1 shards " +
"with (number_of_replicas=0)")
def setUpCrateLayerAndSqlAlchemy(test):
setUpWithCrateLayer(test)
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
conn = connect(crate_host)
cursor = conn.cursor()
cursor.execute("""create table characters (
id string primary key,
name string,
quote string,
details object,
more_details array(object),
INDEX name_ft using fulltext(name) with (analyzer = 'english'),
INDEX quote_ft using fulltext(quote) with (analyzer = 'english')
) """)
conn.close()
engine = sa.create_engine('crate://{0}'.format(crate_host))
Base = declarative_base()
class Location(Base):
__tablename__ = 'locations'
name = sa.Column(sa.String, primary_key=True)
kind = sa.Column(sa.String)
date = sa.Column(sa.Date, default=date.today)
datetime = sa.Column(sa.DateTime, default=datetime.utcnow)
nullable_datetime = sa.Column(sa.DateTime)
nullable_date = sa.Column(sa.Date)
flag = sa.Column(sa.Boolean)
details = sa.Column(ObjectArray)
Session = sessionmaker(engine)
session = Session()
test.globs['sa'] = sa
test.globs['engine'] = engine
test.globs['Location'] = Location
test.globs['Base'] = Base
test.globs['session'] = session
test.globs['Session'] = Session
test.globs['CrateDialect'] = CrateDialect
_server = None
class HttpsTestServerLayer(object):
PORT = 65534
HOST = "localhost"
CERT_FILE = os.path.abspath(os.path.join(os.path.dirname(__file__),
"test_https.pem"))
__name__ = "httpsserver"
__bases__ = tuple()
class HttpsServer(HTTPServer):
def get_request(self):
socket, client_address = HTTPServer.get_request(self)
socket = ssl.wrap_socket(socket,
keyfile=HttpsTestServerLayer.CERT_FILE,
certfile=HttpsTestServerLayer.CERT_FILE,
server_side=True)
return socket, client_address
class HttpsHandler(BaseHTTPRequestHandler):
payload = json.dumps({"name": "test", "status": 200, })
def do_GET(self):
self.send_response(200)
self.send_header("Content-Length", len(self.payload))
self.send_header("Content-Type", "application/json; charset=UTF-8")
self.end_headers()
self.wfile.write(to_bytes(self.payload, 'UTF-8'))
return
def __init__(self):
self.server = self.HttpsServer(
(self.HOST, self.PORT),
self.HttpsHandler
)
def setUp(self):
thread = threading.Thread(target=self.serve_forever)
thread.daemon = True # quit interpreter when only thread exists
thread.start()
time.sleep(1)
def serve_forever(self):
print("listening on", self.HOST, self.PORT)
self.server.serve_forever()
print("server stopped.")
def tearDown(self):
self.server.shutdown()
def setUpWithHttps(test):
test.globs['HttpClient'] = http.Client
test.globs['crate_host'] = "https://{0}:{1}".format(
HttpsTestServerLayer.HOST, HttpsTestServerLayer.PORT
)
test.globs['invalid_ca_cert'] = os.path.abspath(
os.path.join(os.path.dirname(__file__), "invalid_ca.pem")
)
test.globs['valid_ca_cert'] = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_https_ca.pem")
)
test.globs['pprint'] = pprint
test.globs['print'] = cprint
def tearDownWithCrateLayer(test):
# clear testing data
conn = connect(crate_host)
cursor = conn.cursor()
cursor.execute("drop table locations")
cursor.execute("drop blob table myfiles")
try:
cursor.execute("drop table characters")
except:
pass
def test_suite():
suite = unittest.TestSuite()
flags = (doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS)
checker = RENormalizing([
# python 3 drops the u" prefix on unicode strings
(re.compile(r"u('[^']*')"), r"\1"),
# python 3 includes module name in exceptions
(re.compile(r"crate.client.exceptions.ProgrammingError:"),
"ProgrammingError:"),
(re.compile(r"crate.client.exceptions.ConnectionError:"),
"ConnectionError:"),
(re.compile(r"crate.client.exceptions.DigestNotFoundException:"),
"DigestNotFoundException:"),
(re.compile(r"crate.client.exceptions.BlobsDisabledException:"),
"BlobsDisabledException:"),
(re.compile(r"<type "),
"<class "),
])
s = doctest.DocFileSuite(
'cursor.txt',
'connection.txt',
checker=checker,
setUp=setUpMocked,
optionflags=flags,
encoding='utf-8'
)
suite.addTest(s)
suite.addTest(unittest.makeSuite(CursorTest))
suite.addTest(unittest.makeSuite(HttpClientTest))
suite.addTest(unittest.makeSuite(KeepAliveClientTest))
suite.addTest(unittest.makeSuite(ThreadSafeHttpClientTest))
suite.addTest(unittest.makeSuite(ParamsTest))
suite.addTest(unittest.makeSuite(ConnectionTest))
suite.addTest(unittest.makeSuite(RetryOnTimeoutServerTest))
suite.addTest(sqlalchemy_test_suite())
suite.addTest(doctest.DocTestSuite('crate.client.connection'))
suite.addTest(doctest.DocTestSuite('crate.client.http'))
s = doctest.DocFileSuite(
'../../../docs/https.txt',
checker=checker,
setUp=setUpWithHttps,
optionflags=flags,
encoding='utf-8'
)
s.layer = HttpsTestServerLayer()
suite.addTest(s)
s = doctest.DocFileSuite(
'sqlalchemy/itests.txt',
'sqlalchemy/dialect.txt',
checker=checker,
setUp=setUpCrateLayerAndSqlAlchemy,
tearDown=tearDownWithCrateLayer,
optionflags=flags,
encoding='utf-8'
)
s.layer = crate_layer
suite.addTest(s)
s = doctest.DocFileSuite(
'http.txt',
'blob.txt',
'../../../docs/client.txt',
'../../../docs/advanced_usage.txt',
'../../../docs/blobs.txt',
checker=checker,
setUp=setUpWithCrateLayer,
tearDown=tearDownWithCrateLayer,
optionflags=flags,
encoding='utf-8'
)
s.layer = crate_layer
suite.addTest(s)
s = doctest.DocFileSuite(
'../../../docs/sqlalchemy.txt',
checker=checker,
setUp=setUpCrateLayerAndSqlAlchemy,
tearDown=tearDownWithCrateLayer,
optionflags=flags,
encoding='utf-8'
)
s.layer = crate_layer
suite.addTest(s)
return suite
|
web_server_4.py
|
#!/usr/bin/python3
# file: multiprocess_web_server.py
# Created by Guang at 19-7-19
# description:
# *-* coding:utf8 *-*
import multiprocessing
import socket
import re
import time
import sys
sys.path.insert(0, "../../")
from mini_web.framework import mini_frame_4
class WSGIServer(object):
def __init__(self, ip, port):
# 1.创建套接字
self.listen_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listen_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# 2.绑定ip和port
self.local_addr = (ip, port)
self.listen_server.bind(self.local_addr)
# 3.主动变被动
self.listen_server.listen(128)
def service_client(self, new_socket):
"""为这个客户端返回数据"""
# 1.接收浏览器发送过来的请求, 即HTTP请求
# GET / HTTP/1.1
request = new_socket.recv(1024).decode('utf-8')
# print("-" * 100)
request_lines = request.splitlines() # 当客户端主动关闭, 会收到空字符串并解阻塞; 这里会生成空列表
if not request_lines:
return
print(request_lines[0])
# print(request_lines)
# GET /index.html HTTP/1.1
# GET POST DELETE
file_name = ""
ret = re.match(r'[^/]+(/[^ ]*)', request_lines[0])
if ret:
file_name = ret.group(1)
# print("*" * 50, file_name)
if file_name == "/":
file_name = "/index.html"
# 2.返回HTTP格式的数据
# 2.1 静态资源和动态资源, 假设以 xxx.py 结尾的是动态资源
if not file_name.endswith(".py"):
try:
f = open("./html" + file_name, 'rb')
except Exception as e:
response = "HTTP/1.1 404 NOT FOUND\r\n"
response += "\r\n"
response += "----------file not found --------"
new_socket.send(response.encode("utf-8"))
else:
html_content = f.read()
f.close()
# 2.1 准备发送给浏览器的数据 -- header
response = "HTTP/1.1 200 OK\r\n"
response += "\r\n"
# 2.2 准备发送给浏览器的数据 -- body
# response += “哈哈哈哈”
# 将response header 发送给浏览器
new_socket.send(response.encode("utf-8"))
# 将response body 发送给服务器
new_socket.send(html_content)
else:
# 2.2 请求动态资源
# header = "HTTP/1.1 200 OK\r\n"
# header += "\r\n"
# body = "This is a dynamic source web_app \r\n %s" % time.ctime()
# env = dict()
env = {"FILE_PATH": file_name}
body = mini_frame_4.application(env, self.set_response_header)
# header 在这里实现
header = "HTTP/1.1 %s\r\n" % self.status
for temp in self.headers:
header += "%s: %s\r\n" % temp
header += "\r\n"
response = header + body
new_socket.send(response.encode("utf-8"))
# 这里必须再关闭一次, 底层文件描述符
new_socket.close()
def set_response_header(self, status, headers):
"""作为引用传递给框架程序"""
self.status = status
# self.headers = headers
self.headers = [("Server", "mini_web v1.0")]
self.headers += headers
def runserver(self):
"""主函数: 整体控制"""
while True:
# 4.等待新客户端的连接
new_socket, client_addr = self.listen_server.accept()
# 5.为这个客户端服务
p = multiprocessing.Process(target=self.service_client, args=(new_socket, ))
p.start()
# 进程类实现的并发服务器,必须要在这里也new_socket.close一次; 原因:文件描述符 fd
new_socket.close()
# 关闭监听套接字
self.listen_server.close()
if __name__ == '__main__':
ip = ''
port = 8888
wsgi_server = WSGIServer(ip, port)
wsgi_server.runserver()
|
test_utils.py
|
# Copyright BigchainDB GmbH and BigchainDB contributors
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
import queue
from unittest.mock import patch, call
import pytest
@pytest.fixture
def mock_queue(monkeypatch):
class MockQueue:
items = []
def get(self, timeout=None):
try:
return self.items.pop()
except IndexError:
if timeout:
raise queue.Empty()
raise
def put(self, item):
self.items.append(item)
mockqueue = MockQueue()
monkeypatch.setattr('queue.Queue', lambda: mockqueue)
return mockqueue
def test_empty_pool_is_populated_with_instances(mock_queue):
from bigchaindb import utils
pool = utils.pool(lambda: 'hello', 4)
assert len(mock_queue.items) == 0
with pool() as instance:
assert instance == 'hello'
assert len(mock_queue.items) == 1
with pool() as instance:
assert instance == 'hello'
assert len(mock_queue.items) == 2
with pool() as instance:
assert instance == 'hello'
assert len(mock_queue.items) == 3
with pool() as instance:
assert instance == 'hello'
assert len(mock_queue.items) == 4
with pool() as instance:
assert instance == 'hello'
assert len(mock_queue.items) == 4
def test_pool_blocks_if_no_instances_available(mock_queue):
from bigchaindb import utils
pool = utils.pool(lambda: 'hello', 4)
assert len(mock_queue.items) == 0
# We need to manually trigger the `__enter__` method so the context
# manager will "hang" and not return the resource to the pool
assert pool().__enter__() == 'hello'
assert len(mock_queue.items) == 0
assert pool().__enter__() == 'hello'
assert len(mock_queue.items) == 0
assert pool().__enter__() == 'hello'
assert len(mock_queue.items) == 0
# We need to keep a reference of the last context manager so we can
# manually release the resource
last = pool()
assert last.__enter__() == 'hello'
assert len(mock_queue.items) == 0
# This would block using `queue.Queue` but since we mocked it it will
# just raise a IndexError because it's trying to pop from an empty list.
with pytest.raises(IndexError):
assert pool().__enter__() == 'hello'
assert len(mock_queue.items) == 0
# Release the last resource
last.__exit__(None, None, None)
assert len(mock_queue.items) == 1
assert pool().__enter__() == 'hello'
assert len(mock_queue.items) == 0
def test_pool_raises_empty_exception_when_timeout(mock_queue):
from bigchaindb import utils
pool = utils.pool(lambda: 'hello', 1, timeout=1)
assert len(mock_queue.items) == 0
with pool() as instance:
assert instance == 'hello'
assert len(mock_queue.items) == 1
# take the only resource available
assert pool().__enter__() == 'hello'
with pytest.raises(queue.Empty):
with pool() as instance:
assert instance == 'hello'
@patch('multiprocessing.Process')
def test_process_group_instantiates_and_start_processes(mock_process):
from bigchaindb.utils import ProcessGroup
def noop():
pass
concurrency = 10
pg = ProcessGroup(concurrency=concurrency, group='test_group', target=noop)
pg.start()
mock_process.assert_has_calls([call(group='test_group', target=noop,
name=None, args=(), kwargs={},
daemon=None)
for i in range(concurrency)], any_order=True)
for process in pg.processes:
process.start.assert_called_with()
def test_lazy_execution():
from bigchaindb.utils import Lazy
lz = Lazy()
lz.split(',')[1].split(' ').pop(1).strip()
result = lz.run('Like humans, cats tend to favor one paw over another')
assert result == 'cats'
class Cat:
def __init__(self, name):
self.name = name
cat = Cat('Shmui')
lz = Lazy()
lz.name.upper()
result = lz.run(cat)
assert result == 'SHMUI'
def test_process_set_title():
from uuid import uuid4
from multiprocessing import Queue
from setproctitle import getproctitle
from bigchaindb.utils import Process
queue = Queue()
uuid = str(uuid4())
process = Process(target=lambda: queue.put(getproctitle()),
name=uuid)
process.start()
assert queue.get() == uuid
|
_app.py
|
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
"""
WebSocketApp provides higher level APIs.
"""
import inspect
import select
import sys
import threading
import time
import traceback
import six
from ._abnf import ABNF
from ._core import WebSocket, getdefaulttimeout
from ._exceptions import *
from . import _logging
__all__ = ["WebSocketApp"]
class Dispatcher:
def __init__(self, app, ping_timeout):
self.app = app
self.ping_timeout = ping_timeout
def read(self, sock, read_callback, check_callback):
while self.app.keep_running:
r, w, e = select.select(
(self.app.sock.sock, ), (), (), self.ping_timeout)
if r:
if not read_callback():
break
check_callback()
class SSLDispatcher:
def __init__(self, app, ping_timeout):
self.app = app
self.ping_timeout = ping_timeout
def read(self, sock, read_callback, check_callback):
while self.app.keep_running:
r = self.select()
if r:
if not read_callback():
break
check_callback()
# print('saliendo del loop')
def select(self):
sock = self.app.sock.sock
if sock.pending():
return [sock,]
r, w, e = select.select((sock, ), (), (), self.ping_timeout)
return r
class WebSocketApp(object):
"""
Higher level of APIs are provided.
The interface is like JavaScript WebSocket object.
"""
def __init__(self, url, header=None,
on_open=None, on_message=None, on_error=None,
on_close=None, on_ping=None, on_pong=None,
on_cont_message=None,
keep_running=True, get_mask_key=None, cookie=None,
subprotocols=None,
on_data=None):
"""
url: websocket url.
header: custom header for websocket handshake.
on_open: callable object which is called at opening websocket.
this function has one argument. The argument is this class object.
on_message: callable object which is called when received data.
on_message has 2 arguments.
The 1st argument is this class object.
The 2nd argument is utf-8 string which we get from the server.
on_error: callable object which is called when we get error.
on_error has 2 arguments.
The 1st argument is this class object.
The 2nd argument is exception object.
on_close: callable object which is called when closed the connection.
this function has one argument. The argument is this class object.
on_cont_message: callback object which is called when receive continued
frame data.
on_cont_message has 3 arguments.
The 1st argument is this class object.
The 2nd argument is utf-8 string which we get from the server.
The 3rd argument is continue flag. if 0, the data continue
to next frame data
on_data: callback object which is called when a message received.
This is called before on_message or on_cont_message,
and then on_message or on_cont_message is called.
on_data has 4 argument.
The 1st argument is this class object.
The 2nd argument is utf-8 string which we get from the server.
The 3rd argument is data type. ABNF.OPCODE_TEXT or ABNF.OPCODE_BINARY will be came.
The 4th argument is continue flag. if 0, the data continue
keep_running: this parameter is obsolete and ignored.
get_mask_key: a callable to produce new mask keys,
see the WebSocket.set_mask_key's docstring for more information
subprotocols: array of available sub protocols. default is None.
"""
self.url = url
self.header = header if header is not None else []
self.cookie = cookie
self.on_open = on_open
self.on_message = on_message
self.on_data = on_data
self.on_error = on_error
self.on_close = on_close
self.on_ping = on_ping
self.on_pong = on_pong
self.on_cont_message = on_cont_message
self.keep_running = False
self.get_mask_key = get_mask_key
self.sock = None
self.last_ping_tm = 0
self.last_pong_tm = 0
self.subprotocols = subprotocols
def send(self, data, opcode=ABNF.OPCODE_TEXT):
"""
send message.
data: message to send. If you set opcode to OPCODE_TEXT,
data must be utf-8 string or unicode.
opcode: operation code of data. default is OPCODE_TEXT.
"""
if not self.sock or self.sock.send(data, opcode) == 0:
raise WebSocketConnectionClosedException(
"Connection is already closed.")
def close(self, **kwargs):
"""
close websocket connection.
"""
self.keep_running = False
if self.sock:
self.sock.close(**kwargs)
self.sock = None
def _send_ping(self, interval, event):
while not event.wait(interval):
self.last_ping_tm = time.time()
if self.sock:
try:
self.sock.ping()
except Exception as ex:
_logging.warning("send_ping routine terminated: {}".format(ex))
break
def run_forever(self, sockopt=None, sslopt=None,
ping_interval=0, ping_timeout=None,
http_proxy_host=None, http_proxy_port=None,
http_no_proxy=None, http_proxy_auth=None,
skip_utf8_validation=False,
host=None, origin=None, dispatcher=None,
suppress_origin=False, proxy_type=None,max_active_time=3600):
"""
run event loop for WebSocket framework.
This loop is infinite loop and is alive during websocket is available.
sockopt: values for socket.setsockopt.
sockopt must be tuple
and each element is argument of sock.setsockopt.
sslopt: ssl socket optional dict.
ping_interval: automatically send "ping" command
every specified period(second)
if set to 0, not send automatically.
ping_timeout: timeout(second) if the pong message is not received.
http_proxy_host: http proxy host name.
http_proxy_port: http proxy port. If not set, set to 80.
http_no_proxy: host names, which doesn't use proxy.
skip_utf8_validation: skip utf8 validation.
host: update host header.
origin: update origin header.
dispatcher: customize reading data from socket.
suppress_origin: suppress outputting origin header.
Returns
-------
False if caught KeyboardInterrupt
True if other exception was raised during a loop
"""
start_time = time.time()
if ping_timeout is not None and ping_timeout <= 0:
ping_timeout = None
if ping_timeout and ping_interval and ping_interval <= ping_timeout:
raise WebSocketException("Ensure ping_interval > ping_timeout")
if not sockopt:
sockopt = []
if not sslopt:
sslopt = {}
if self.sock:
raise WebSocketException("socket is already opened")
thread = None
self.keep_running = True
self.last_ping_tm = 0
self.last_pong_tm = 0
ping_thread = None
def teardown(close_frame=None):
"""
Tears down the connection.
If close_frame is set, we will invoke the on_close handler with the
statusCode and reason from there.
"""
if thread and thread.isAlive():
event.set()
thread.join()
self.keep_running = False
if self.sock:
self.sock.close()
close_args = self._get_close_args(
close_frame.data if close_frame else None)
self._callback(self.on_close, *close_args)
self.sock = None
if(ping_thread):
# print('esperando por ping_thread')
ping_thread.join()
# print('listo terminado todo')
try:
self.sock = WebSocket(
self.get_mask_key, sockopt=sockopt, sslopt=sslopt,
fire_cont_frame=self.on_cont_message is not None,
skip_utf8_validation=skip_utf8_validation,
enable_multithread=True if ping_interval else False)
self.sock.settimeout(getdefaulttimeout())
self.sock.connect(
self.url, header=self.header, cookie=self.cookie,
http_proxy_host=http_proxy_host,
http_proxy_port=http_proxy_port, http_no_proxy=http_no_proxy,
http_proxy_auth=http_proxy_auth, subprotocols=self.subprotocols,
host=host, origin=origin, suppress_origin=suppress_origin,
proxy_type=proxy_type)
if not dispatcher:
dispatcher = self.create_dispatcher(ping_timeout)
self._callback(self.on_open)
if ping_interval:
event = threading.Event()
thread = threading.Thread(
target=self._send_ping, args=(ping_interval, event))
thread.setDaemon(True)
thread.start()
ping_thread = thread
def read():
if not self.keep_running or time.time() > start_time + max_active_time:
# print('tearing down')
return teardown()
op_code, frame = self.sock.recv_data_frame(True)
if op_code == ABNF.OPCODE_CLOSE:
return teardown(frame)
elif op_code == ABNF.OPCODE_PING:
self._callback(self.on_ping, frame.data)
elif op_code == ABNF.OPCODE_PONG:
self.last_pong_tm = time.time()
self._callback(self.on_pong, frame.data)
elif op_code == ABNF.OPCODE_CONT and self.on_cont_message:
self._callback(self.on_data, frame.data,
frame.opcode, frame.fin)
self._callback(self.on_cont_message,
frame.data, frame.fin)
else:
data = frame.data
if six.PY3 and op_code == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
self._callback(self.on_data, data, frame.opcode, True)
self._callback(self.on_message, data)
return True
def check():
if (ping_timeout):
has_timeout_expired = time.time() - self.last_ping_tm > ping_timeout
has_pong_not_arrived_after_last_ping = self.last_pong_tm - self.last_ping_tm < 0
has_pong_arrived_too_late = self.last_pong_tm - self.last_ping_tm > ping_timeout
if (self.last_ping_tm
and has_timeout_expired
and (has_pong_not_arrived_after_last_ping or has_pong_arrived_too_late)):
raise WebSocketTimeoutException("ping/pong timed out")
return True
dispatcher.read(self.sock.sock, read, check)
except (Exception, KeyboardInterrupt, SystemExit) as e:
self._callback(self.on_error, e)
if isinstance(e, SystemExit):
# propagate SystemExit further
raise
teardown()
return not isinstance(e, KeyboardInterrupt)
def create_dispatcher(self, ping_timeout):
timeout = ping_timeout or 10
if self.sock.is_ssl():
return SSLDispatcher(self, timeout)
return Dispatcher(self, timeout)
def _get_close_args(self, data):
""" this functions extracts the code, reason from the close body
if they exists, and if the self.on_close except three arguments """
# if the on_close callback is "old", just return empty list
if sys.version_info < (3, 0):
if not self.on_close or len(inspect.getargspec(self.on_close).args) != 3:
return []
else:
if not self.on_close or len(inspect.getfullargspec(self.on_close).args) != 3:
return []
if data and len(data) >= 2:
code = 256 * six.byte2int(data[0:1]) + six.byte2int(data[1:2])
reason = data[2:].decode('utf-8')
return [code, reason]
return [None, None]
def _callback(self, callback, *args):
if callback:
try:
if inspect.ismethod(callback):
callback(*args)
else:
callback(self, *args)
except Exception as e:
_logging.error("error from callback {}: {}".format(callback, e))
if _logging.isEnabledForDebug():
_, _, tb = sys.exc_info()
traceback.print_tb(tb)
|
lisp-core.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-core.py
#
# This is the core process that is used to demux to the specific LISP
# functional components. The 4342 listen socket is centralized here.
#
#
# +------------- data encapsulation via network --------------+
# | |
# | IPC when mr & ms colocated |
# | +--------------------------------+ |
# | | | |
# | | IPC when mr & ddt colo | |
# | | +------------+ | |
# | | | | | |
# | | | v v v 4341
# +-------------+ +----------+ +----------+ +----------+ +----------+
# | lisp-[ir]tr | | lisp-mr | | lisp-ddt | | lisp-ms | | lisp-etr |
# +-------------+ +----------+ +----------+ +----------+ +----------+
# ^ IPC ^ IPC ^ IPC ^ IPC ^ IPC
# | | | | |
# | | | | |
# | | | | |
# +--------------+--------------+--------------+--------------+
# |
# | for dispatching control messages
# +-----------+
# | lisp-core |
# +-----------+
# | 4342
# |
# via network
#
# -----------------------------------------------------------------------------
from __future__ import division
from future import standard_library
standard_library . install_aliases ( )
from builtins import str
from past . utils import old_div
import lisp
import lispconfig
import multiprocessing
import threading
from subprocess import getoutput
import time
import os
import bottle
import json
import sys
import socket
if 64 - 64: i11iIiiIii
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
from cheroot . wsgi import Server as wsgi_server
from cheroot . ssl . builtin import BuiltinSSLAdapter as ssl_adaptor
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
if 30 - 30: o0oOOo0O0Ooo - O0 % o0oOOo0O0Ooo - OoooooooOO * O0 * OoooooooOO
if 60 - 60: iIii1I11I1II1 / i1IIi * oO0o - I1ii11iIi11i + o0oOOo0O0Ooo
if 94 - 94: i1IIi % Oo0Ooo
o0oO0 = ""
if 100 - 100: i1IIi
I1Ii11I1Ii1i = None
Ooo = None
o0oOoO00o = None
i1 = [ None , None , None ]
oOOoo00O0O = None
if 15 - 15: I1IiiI
if 90 - 90: IiII * i1IIi / Ii1I . OoO0O00 * oO0o
if 16 - 16: ooOoO0o * IiII % I11i . I1Ii111 / IiII % iII111i
if 27 - 27: IiII . i1IIi * OoOoOO00 % Ii1I / i1IIi
if 3 - 3: IiII / ooOoO0o
if 28 - 28: ooOoO0o + I1Ii111 - ooOoO0o . OoooooooOO
if 97 - 97: OoO0O00 . I11i
if 32 - 32: Oo0Ooo - II111iiii - i11iIiiIii % I1Ii111
@ bottle . route ( '/lisp/api' , method = "get" )
@ bottle . route ( '/lisp/api/<command>' , method = "get" )
@ bottle . route ( '/lisp/api/<command>/<data_structure>' , method = "get" )
def O0OoOoo00o ( command = "" , data_structure = "" ) :
iiiI11 = [ { "?" : [ { "?" : "not-auth" } ] } ]
if 91 - 91: o0oOOo0O0Ooo / II111iiii . I1ii11iIi11i + OOooOOo
if 47 - 47: OoOoOO00 / Ii1I * OoooooooOO
if 9 - 9: I1IiiI - Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
if ( bottle . request . auth != None ) :
I1Ii , o0oOo0Ooo0O = bottle . request . auth
if ( lispconfig . lisp_find_user_account ( I1Ii , o0oOo0Ooo0O ) == False ) :
return ( json . dumps ( iiiI11 ) )
if 81 - 81: I1ii11iIi11i * IiII * I11i - iII111i - o0oOOo0O0Ooo
else :
if ( bottle . request . headers [ "User-Agent" ] . find ( "python" ) != - 1 ) :
return ( json . dumps ( iiiI11 ) )
if 90 - 90: II111iiii + oO0o / o0oOOo0O0Ooo % II111iiii - O0
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( json . dumps ( iiiI11 ) )
if 29 - 29: o0oOOo0O0Ooo / iIii1I11I1II1
if 24 - 24: O0 % o0oOOo0O0Ooo + i1IIi + I1Ii111 + I1ii11iIi11i
if 70 - 70: Oo0Ooo % Oo0Ooo . IiII % OoO0O00 * o0oOOo0O0Ooo % oO0o
if 23 - 23: i11iIiiIii + I1IiiI
if 68 - 68: OoOoOO00 . oO0o . i11iIiiIii
if 40 - 40: oO0o . OoOoOO00 . Oo0Ooo . i1IIi
if 33 - 33: Ii1I + II111iiii % i11iIiiIii . ooOoO0o - I1IiiI
if ( command == "data" and data_structure != "" ) :
O00oooo0O = bottle . request . body . readline ( )
if ( type ( O00oooo0O ) == bytes ) : O00oooo0O = O00oooo0O . decode ( )
iiiI11 = json . loads ( O00oooo0O ) if O00oooo0O != "" else ""
if ( iiiI11 != "" ) : iiiI11 = list ( iiiI11 . values ( ) ) [ 0 ]
if ( iiiI11 == [ ] ) : iiiI11 = ""
if 22 - 22: OoooooooOO % I11i - iII111i . iIii1I11I1II1 * i11iIiiIii
if ( type ( iiiI11 ) == dict and type ( list ( iiiI11 . values ( ) ) [ 0 ] ) == dict ) :
iiiI11 = list ( iiiI11 . values ( ) ) [ 0 ]
if 32 - 32: Oo0Ooo * O0 % oO0o % Ii1I . IiII
if 61 - 61: ooOoO0o
iiiI11 = oOOO00o ( data_structure , iiiI11 )
return ( iiiI11 )
if 97 - 97: I11i % I11i + II111iiii * iII111i
if 54 - 54: I11i + IiII / iII111i
if 9 - 9: OoOoOO00 / Oo0Ooo - IiII . i1IIi / I1IiiI % IiII
if 71 - 71: I1Ii111 . O0
if 73 - 73: OOooOOo % OoOoOO00 - Ii1I
if ( command != "" ) :
command = "lisp " + command
else :
O00oooo0O = bottle . request . body . readline ( )
if ( type ( O00oooo0O ) == bytes ) : O00oooo0O = O00oooo0O . decode ( )
if ( O00oooo0O == "" ) :
iiiI11 = [ { "?" : [ { "?" : "no-body" } ] } ]
return ( json . dumps ( iiiI11 ) )
if 10 - 10: I1IiiI % I1ii11iIi11i
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
iiiI11 = json . loads ( O00oooo0O )
command = list ( iiiI11 . keys ( ) ) [ 0 ]
if 20 - 20: o0oOOo0O0Ooo
if 77 - 77: OoOoOO00 / I11i
iiiI11 = lispconfig . lisp_get_clause_for_api ( command )
return ( json . dumps ( iiiI11 ) )
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
if 53 - 53: IiII + I1IiiI * oO0o
if 61 - 61: i1IIi * OOooOOo / OoooooooOO . i11iIiiIii . OoOoOO00
if 60 - 60: I11i / I11i
def I1II1III11iii ( ) :
iiiI11 = { }
iiiI11 [ "hostname" ] = socket . gethostname ( )
iiiI11 [ "system-uptime" ] = getoutput ( "uptime" )
iiiI11 [ "lisp-uptime" ] = lisp . lisp_print_elapsed ( lisp . lisp_uptime )
iiiI11 [ "lisp-version" ] = lisp . lisp_version
if 75 - 75: iIii1I11I1II1 / OOooOOo % o0oOOo0O0Ooo * OoOoOO00
iiii11I = "yes" if os . path . exists ( "./logs/lisp-traceback.log" ) else "no"
iiiI11 [ "traceback-log" ] = iiii11I
if 96 - 96: II111iiii % Ii1I . OOooOOo + OoooooooOO * oO0o - OoOoOO00
i11i1 = lisp . lisp_myrlocs [ 0 ]
IIIii1II1II = lisp . lisp_myrlocs [ 1 ]
i11i1 = "none" if ( i11i1 == None ) else i11i1 . print_address_no_iid ( )
IIIii1II1II = "none" if ( IIIii1II1II == None ) else IIIii1II1II . print_address_no_iid ( )
iiiI11 [ "lisp-rlocs" ] = [ i11i1 , IIIii1II1II ]
return ( json . dumps ( iiiI11 ) )
if 42 - 42: Ii1I + oO0o
if 76 - 76: I1Ii111 - OoO0O00
if 70 - 70: ooOoO0o
if 61 - 61: I1ii11iIi11i . I1ii11iIi11i
if 10 - 10: OoOoOO00 * iII111i . I11i + II111iiii - ooOoO0o * i1IIi
if 56 - 56: o0oOOo0O0Ooo * IiII * II111iiii
if 80 - 80: o0oOOo0O0Ooo * II111iiii % II111iiii
if 59 - 59: iIii1I11I1II1 + I1IiiI - o0oOOo0O0Ooo - I1IiiI + OOooOOo / I1ii11iIi11i
if 24 - 24: I11i . iII111i % OOooOOo + ooOoO0o % OoOoOO00
if 4 - 4: IiII - OoO0O00 * OoOoOO00 - I11i
if 41 - 41: OoOoOO00 . I1IiiI * oO0o % IiII
if 86 - 86: I1IiiI + Ii1I % i11iIiiIii * oO0o . ooOoO0o * I11i
if 44 - 44: oO0o
if 88 - 88: I1Ii111 % Ii1I . II111iiii
if 38 - 38: o0oOOo0O0Ooo
if 57 - 57: O0 / oO0o * I1Ii111 / OoOoOO00 . II111iiii
if 26 - 26: iII111i
def oOOO00o ( data_structure , data ) :
OOO = [ "site-cache" , "map-cache" , "system" , "map-resolver" ,
"map-server" , "database-mapping" , "site-cache-summary" ]
if 59 - 59: II111iiii + OoooooooOO * OoOoOO00 + i1IIi
if ( data_structure not in OOO ) : return ( json . dumps ( [ ] ) )
if 58 - 58: II111iiii * OOooOOo * I1ii11iIi11i / OOooOOo
if 75 - 75: oO0o
if 50 - 50: Ii1I / Oo0Ooo - oO0o - I11i % iII111i - oO0o
if 91 - 91: OoO0O00 / I11i - II111iiii . I11i
if ( data_structure == "system" ) : return ( I1II1III11iii ( ) )
if 18 - 18: o0oOOo0O0Ooo
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
if ( data != "" ) : data = json . dumps ( data )
O0ooo0O0oo0 = lisp . lisp_api_ipc ( "lisp-core" , data_structure + "%" + data )
if 91 - 91: iIii1I11I1II1 + I1Ii111
if ( data_structure in [ "map-cache" , "map-resolver" ] ) :
if ( lisp . lisp_is_running ( "lisp-rtr" ) ) :
lisp . lisp_ipc_lock . acquire ( )
lisp . lisp_ipc ( O0ooo0O0oo0 , Ooo , "lisp-rtr" )
elif ( lisp . lisp_is_running ( "lisp-itr" ) ) :
lisp . lisp_ipc_lock . acquire ( )
lisp . lisp_ipc ( O0ooo0O0oo0 , Ooo , "lisp-itr" )
else :
return ( json . dumps ( [ ] ) )
if 31 - 31: IiII . OoOoOO00 . OOooOOo
if 75 - 75: I11i + OoO0O00 . OoOoOO00 . ooOoO0o + Oo0Ooo . OoO0O00
if ( data_structure in [ "map-server" , "database-mapping" ] ) :
if ( lisp . lisp_is_running ( "lisp-etr" ) ) :
lisp . lisp_ipc_lock . acquire ( )
lisp . lisp_ipc ( O0ooo0O0oo0 , Ooo , "lisp-etr" )
elif ( lisp . lisp_is_running ( "lisp-itr" ) ) :
lisp . lisp_ipc_lock . acquire ( )
lisp . lisp_ipc ( O0ooo0O0oo0 , Ooo , "lisp-itr" )
else :
return ( json . dumps ( [ ] ) )
if 96 - 96: OOooOOo . ooOoO0o - Oo0Ooo + iIii1I11I1II1 / OoOoOO00 * OOooOOo
if 65 - 65: Ii1I . iIii1I11I1II1 / O0 - Ii1I
if ( data_structure in [ "site-cache" , "site-cache-summary" ] ) :
if ( lisp . lisp_is_running ( "lisp-ms" ) ) :
lisp . lisp_ipc_lock . acquire ( )
lisp . lisp_ipc ( O0ooo0O0oo0 , Ooo , "lisp-ms" )
else :
return ( json . dumps ( [ ] ) )
if 21 - 21: I1IiiI * iIii1I11I1II1
if 91 - 91: IiII
if 15 - 15: II111iiii
lisp . lprint ( "Waiting for api get-data '{}', parmameters: '{}'" . format ( data_structure , data ) )
if 18 - 18: i11iIiiIii . i1IIi % OoooooooOO / O0
if 75 - 75: OoOoOO00 % o0oOOo0O0Ooo % o0oOOo0O0Ooo . I1Ii111
III1iII1I1ii , oOOo0 , oo00O00oO , iIiIIIi = lisp . lisp_receive ( Ooo , True )
lisp . lisp_ipc_lock . release ( )
iIiIIIi = iIiIIIi . decode ( )
return ( iIiIIIi )
if 93 - 93: iII111i
if 10 - 10: I11i
if 82 - 82: I1ii11iIi11i - iIii1I11I1II1 / OOooOOo + Ii1I
if 87 - 87: oO0o * I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
if 83 - 83: I11i / I1IiiI
if 34 - 34: IiII
@ bottle . route ( '/lisp/api' , method = "put" )
@ bottle . route ( '/lisp/api/<command>' , method = "put" )
@ bottle . route ( '/lisp/api/<command>' , method = "delete" )
def oOo ( command = "" ) :
iiiI11 = [ { "?" : [ { "?" : "not-auth" } ] } ]
if ( bottle . request . auth == None ) : return ( iiiI11 )
if 75 - 75: I1IiiI + Oo0Ooo
if 73 - 73: O0 - OoooooooOO . OOooOOo - OOooOOo / OoOoOO00
if 45 - 45: iIii1I11I1II1 % OoO0O00
if 29 - 29: OOooOOo + Oo0Ooo . i11iIiiIii - i1IIi / iIii1I11I1II1
if ( bottle . request . auth != None ) :
I1Ii , o0oOo0Ooo0O = bottle . request . auth
if ( lispconfig . lisp_find_user_account ( I1Ii , o0oOo0Ooo0O ) == False ) :
return ( json . dumps ( iiiI11 ) )
if 26 - 26: I11i . OoooooooOO
else :
if ( bottle . request . headers [ "User-Agent" ] . find ( "python" ) != - 1 ) :
return ( json . dumps ( iiiI11 ) )
if 39 - 39: iII111i - O0 % i11iIiiIii * I1Ii111 . IiII
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( json . dumps ( iiiI11 ) )
if 58 - 58: OoO0O00 % i11iIiiIii . iII111i / oO0o
if 84 - 84: iII111i . I1ii11iIi11i / Oo0Ooo - I1IiiI / OoooooooOO / o0oOOo0O0Ooo
if 12 - 12: I1IiiI * iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if 51 - 51: O0 + iII111i
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
if ( command == "user-account" ) :
if ( lispconfig . lisp_is_user_superuser ( I1Ii ) == False ) :
iiiI11 = [ { "user-account" : [ { "?" : "not-auth" } ] } ]
return ( json . dumps ( iiiI11 ) )
if 48 - 48: O0
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
O00oooo0O = bottle . request . body . readline ( )
if ( type ( O00oooo0O ) == bytes ) : O00oooo0O = O00oooo0O . decode ( )
if ( O00oooo0O == "" ) :
iiiI11 = [ { "?" : [ { "?" : "no-body" } ] } ]
return ( json . dumps ( iiiI11 ) )
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
if 23 - 23: O0
iiiI11 = json . loads ( O00oooo0O )
if ( command != "" ) :
command = "lisp " + command
else :
command = list ( iiiI11 [ 0 ] . keys ( ) ) [ 0 ]
if 85 - 85: Ii1I
if 84 - 84: I1IiiI . iIii1I11I1II1 % OoooooooOO + Ii1I % OoooooooOO % OoO0O00
if 42 - 42: OoO0O00 / I11i / o0oOOo0O0Ooo + iII111i / OoOoOO00
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
if 53 - 53: iII111i % II111iiii . IiII - iIii1I11I1II1 - IiII * II111iiii
if 77 - 77: iIii1I11I1II1 * OoO0O00
lisp . lisp_ipc_lock . acquire ( )
if ( bottle . request . method == "DELETE" ) :
iiiI11 = lispconfig . lisp_remove_clause_for_api ( iiiI11 )
else :
iiiI11 = lispconfig . lisp_put_clause_for_api ( iiiI11 )
if 95 - 95: I1IiiI + i11iIiiIii
lisp . lisp_ipc_lock . release ( )
return ( json . dumps ( iiiI11 ) )
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
if 80 - 80: II111iiii
if 83 - 83: I11i . i11iIiiIii + II111iiii . o0oOOo0O0Ooo * I11i
if 53 - 53: II111iiii
if 31 - 31: OoO0O00
@ bottle . route ( '/lisp/show/api-doc' , method = "get" )
def o0O ( ) :
if ( os . path . exists ( "lispapi.py" ) ) : os . system ( "pydoc lispapi > lispapi.txt" )
if ( os . path . exists ( "lispapi.txt" ) == False ) :
return ( "lispapi.txt file not found" )
if 2 - 2: iIii1I11I1II1 / oO0o + OoO0O00 / OOooOOo
return ( bottle . static_file ( "lispapi.txt" , root = "./" ) )
if 9 - 9: o0oOOo0O0Ooo . ooOoO0o - Oo0Ooo - oO0o + II111iiii * i11iIiiIii
if 79 - 79: oO0o % I11i % I1IiiI
if 5 - 5: OoooooooOO % OoOoOO00 % oO0o % iII111i
if 7 - 7: II111iiii + OoooooooOO . I1Ii111 . ooOoO0o - o0oOOo0O0Ooo
if 26 - 26: Oo0Ooo / IiII % iIii1I11I1II1 / IiII + I11i
@ bottle . route ( '/lisp/show/command-doc' , method = "get" )
def oOO0O00oO0Ooo ( ) :
return ( bottle . static_file ( "lisp.config.example" , root = "./" ,
mimetype = "text/plain" ) )
if 67 - 67: OoO0O00 - OOooOOo
if 36 - 36: IiII
if 36 - 36: ooOoO0o / O0 * Oo0Ooo - OOooOOo % iIii1I11I1II1 * oO0o
if 79 - 79: O0
if 78 - 78: I1ii11iIi11i + OOooOOo - I1Ii111
if 38 - 38: o0oOOo0O0Ooo - oO0o + iIii1I11I1II1 / OoOoOO00 % Oo0Ooo
if 57 - 57: OoO0O00 / ooOoO0o
@ bottle . route ( '/lisp/show/lisp-xtr' , method = "get" )
def Ii1I1Ii ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 86 - 86: oO0o * o0oOOo0O0Ooo % i1IIi . Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
if ( os . path . exists ( "./show-ztr" ) ) :
Oo = open ( "./show-ztr" , "r" ) ; O0OOOOo0O = Oo . read ( ) ; Oo . close ( )
else :
Oo = open ( "./show-xtr" , "r" ) ; O0OOOOo0O = Oo . read ( ) ; Oo . close ( )
if 81 - 81: O0 / OoO0O00 . i1IIi + I1IiiI - I11i
if 74 - 74: iIii1I11I1II1 * I1ii11iIi11i + OoOoOO00 / i1IIi / II111iiii . Oo0Ooo
oooOo0OOOoo0 = ""
O0OOOOo0O = O0OOOOo0O . split ( "\n" )
for OOoO in O0OOOOo0O :
if ( OOoO [ 0 : 4 ] == " " ) : oooOo0OOOoo0 += lisp . lisp_space ( 4 )
if ( OOoO [ 0 : 2 ] == " " ) : oooOo0OOOoo0 += lisp . lisp_space ( 2 )
oooOo0OOOoo0 += OOoO + "<br>"
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
oooOo0OOOoo0 = lisp . convert_font ( oooOo0OOOoo0 )
return ( lisp . lisp_print_sans ( oooOo0OOOoo0 ) )
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
if 71 - 71: Oo0Ooo % OOooOOo
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
@ bottle . route ( '/lisp/show/<xtr>/keys' , method = "get" )
def OOoOO0o0o0 ( xtr ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 11 - 11: I1IiiI
I1111i = lispconfig . lisp_is_user_superuser ( None )
if 14 - 14: OOooOOo / o0oOOo0O0Ooo
if ( I1111i == False ) :
iIiIIIi = "Permission denied"
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( iIiIIIi ) ) )
if 32 - 32: I1IiiI * Oo0Ooo
if 78 - 78: OOooOOo - OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii
if ( xtr not in [ "itr" , "etr" , "rtr" ] ) :
iIiIIIi = "Invalid URL"
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( iIiIIIi ) ) )
if 29 - 29: I1IiiI % I1IiiI
Oo0O0 = "show {}-keys" . format ( xtr )
return ( lispconfig . lisp_process_show_command ( Ooo , Oo0O0 ) )
if 82 - 82: II111iiii % I11i / OoO0O00 + OoOoOO00 / o0oOOo0O0Ooo / I1Ii111
if 70 - 70: oO0o
if 59 - 59: o0oOOo0O0Ooo % oO0o
if 6 - 6: iIii1I11I1II1 % i11iIiiIii % I1ii11iIi11i
if 93 - 93: IiII * OoooooooOO + ooOoO0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
@ bottle . route ( '/lisp/geo-map/<geo_prefix>' )
def i1I1i111Ii ( geo_prefix ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 67 - 67: I1IiiI . i1IIi
if 27 - 27: ooOoO0o % I1IiiI
geo_prefix = geo_prefix . split ( "-" )
geo_prefix = "-" . join ( geo_prefix [ 0 : - 1 ] ) + "/" + geo_prefix [ - 1 ]
o0oooOO00 = lisp . lisp_geo ( "" )
o0oooOO00 . parse_geo_string ( geo_prefix )
iiIiii1IIIII , o00o = o0oooOO00 . dms_to_decimal ( )
II = o0oooOO00 . radius * 1000
if 7 - 7: I1ii11iIi11i - I1IiiI . iIii1I11I1II1 - i1IIi
o0OOOoO0 = open ( "./lispers.net-geo.html" , "r" ) ; o0OoOo00o0o = o0OOOoO0 . read ( ) ; o0OOOoO0 . close ( )
o0OoOo00o0o = o0OoOo00o0o . replace ( "$LAT" , str ( iiIiii1IIIII ) )
o0OoOo00o0o = o0OoOo00o0o . replace ( "$LON" , str ( o00o ) )
o0OoOo00o0o = o0OoOo00o0o . replace ( "$RADIUS" , str ( II ) )
return ( o0OoOo00o0o )
if 41 - 41: ooOoO0o % OoO0O00 - Oo0Ooo * I1Ii111 * Oo0Ooo
if 69 - 69: OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
if 23 - 23: i11iIiiIii
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
if 81 - 81: IiII % i1IIi . iIii1I11I1II1
if 4 - 4: i11iIiiIii % OoO0O00 % i1IIi / IiII
if 6 - 6: iII111i / I1IiiI % OOooOOo - I1IiiI
@ bottle . route ( '/lisp/login' , method = "get" )
def OOoO0 ( ) :
return ( lispconfig . lisp_login_page ( ) )
if 31 - 31: OOooOOo
if 23 - 23: I1Ii111 . IiII
if 92 - 92: OoOoOO00 + I1Ii111 * Ii1I % I1IiiI
if 42 - 42: Oo0Ooo
if 76 - 76: I1IiiI * iII111i % I1Ii111
if 57 - 57: iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * OoooooooOO % II111iiii
if 68 - 68: OoooooooOO * I11i % OoOoOO00 - IiII
if 34 - 34: I1Ii111 . iIii1I11I1II1 * OoOoOO00 * oO0o / I1Ii111 / I1ii11iIi11i
@ bottle . route ( '/lisp/login' , method = "post" )
def oOoOOo0O ( ) :
if ( lispconfig . lisp_validate_user ( ) ) :
return ( lispconfig . lisp_landing_page ( ) )
if 84 - 84: OoO0O00 + i1IIi - II111iiii . I1ii11iIi11i * OoooooooOO + I1IiiI
return ( OOoO0 ( ) )
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
if 69 - 69: IiII - O0 % I1ii11iIi11i + i11iIiiIii . OoOoOO00 / OoO0O00
@ bottle . route ( '/lisp' )
def OoOoo00Ooo00 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 57 - 57: I1Ii111
return ( lispconfig . lisp_landing_page ( ) )
if 32 - 32: Ii1I - Oo0Ooo % OoooooooOO . iII111i / IiII + I1IiiI
if 76 - 76: ooOoO0o
if 73 - 73: O0 * iII111i + Ii1I + ooOoO0o
if 40 - 40: II111iiii . OoOoOO00 * I1Ii111 + OOooOOo + OOooOOo
if 9 - 9: I11i % OoooooooOO . oO0o % I11i
if 32 - 32: i11iIiiIii
if 31 - 31: iIii1I11I1II1 / OoO0O00 / I1ii11iIi11i
@ bottle . route ( '/lisp/traceback' )
def iiIiIi ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 39 - 39: I1Ii111
if 91 - 91: OoooooooOO - iIii1I11I1II1 + OoOoOO00 / OoO0O00 . OoOoOO00 + O0
iIiii1iI1 = True
if 33 - 33: IiII % iIii1I11I1II1 * I1IiiI
if 95 - 95: ooOoO0o / ooOoO0o
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
if 55 - 55: ooOoO0o - I11i + II111iiii + iII111i % Ii1I
if ( os . path . exists ( "./logs/lisp-traceback.log" ) ) :
iIiIIIi = getoutput ( "cat ./logs/lisp-traceback.log" )
if ( iIiIIIi ) :
iIiIIIi = iIiIIIi . replace ( "----------" , "<b>----------</b>" )
iIiIIIi = iIiIIIi . replace ( "\n" , "<br>" )
iIiii1iI1 = False
if 41 - 41: i1IIi - I11i - Ii1I
if 8 - 8: OoO0O00 + I1Ii111 - o0oOOo0O0Ooo % Oo0Ooo % o0oOOo0O0Ooo * oO0o
if 9 - 9: Oo0Ooo - i11iIiiIii - OOooOOo * Ii1I + ooOoO0o
if 44 - 44: II111iiii
if 52 - 52: I1ii11iIi11i - Oo0Ooo + I1ii11iIi11i % o0oOOo0O0Ooo
if 35 - 35: iIii1I11I1II1
if ( iIiii1iI1 ) :
iIiIIIi = ""
I1i = "egrep --with-filename Traceback ./logs/*.log"
iIII = getoutput ( I1i )
iIII = iIII . split ( "\n" )
for o0o0O in iIII :
if ( o0o0O . find ( ":" ) == - 1 ) : continue
OOoO = o0o0O . split ( ":" )
if ( OOoO [ 1 ] == "0" ) : continue
iIiIIIi += "Found Tracebacks in log file {}<br>" . format ( OOoO [ 0 ] )
iIiii1iI1 = False
if 68 - 68: ooOoO0o
iIiIIIi = iIiIIIi [ 0 : - 4 ]
if 25 - 25: I1ii11iIi11i . ooOoO0o
if 24 - 24: oO0o / i11iIiiIii + oO0o
if ( iIiii1iI1 ) :
iIiIIIi = "No Tracebacks found - a stable system is a happy system"
if 20 - 20: I11i + Ii1I / O0 % iIii1I11I1II1
if 88 - 88: OoOoOO00 / II111iiii
iIiIIIi = lisp . lisp_print_cour ( iIiIIIi )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 87 - 87: I1ii11iIi11i - I1ii11iIi11i - iII111i + oO0o
if 82 - 82: oO0o / iIii1I11I1II1 . I1IiiI . OOooOOo / o0oOOo0O0Ooo
if 42 - 42: Oo0Ooo
if 19 - 19: oO0o % I1ii11iIi11i * iIii1I11I1II1 + I1IiiI
if 46 - 46: Oo0Ooo
if 1 - 1: iII111i
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
@ bottle . route ( '/lisp/show/not-supported' )
def oOoO0 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 77 - 77: iIii1I11I1II1 . iII111i % iII111i + i11iIiiIii
return ( lispconfig . lisp_not_supported ( ) )
if 72 - 72: iIii1I11I1II1 * Ii1I % ooOoO0o / OoO0O00
if 35 - 35: ooOoO0o + i1IIi % I1ii11iIi11i % I11i + oO0o
if 17 - 17: i1IIi
if 21 - 21: Oo0Ooo
if 29 - 29: I11i / II111iiii / ooOoO0o * OOooOOo
if 10 - 10: I1Ii111 % IiII * IiII . I11i / Ii1I % OOooOOo
if 49 - 49: OoO0O00 / oO0o + O0 * o0oOOo0O0Ooo
@ bottle . route ( '/lisp/show/status' )
def I1ii11 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 74 - 74: Oo0Ooo - o0oOOo0O0Ooo . i1IIi
if 43 - 43: iII111i / I1IiiI
if 58 - 58: I1IiiI + i11iIiiIii % Ii1I . OoOoOO00
if 13 - 13: i11iIiiIii + i1IIi * iIii1I11I1II1 % OoooooooOO - II111iiii * OOooOOo
if 26 - 26: OoooooooOO * I1IiiI + OOooOOo
iIiIIIi = ""
I1111i = lispconfig . lisp_is_user_superuser ( None )
if ( I1111i ) :
IiIii1i111 = lisp . lisp_button ( "show configuration" , "/lisp/show/conf" )
iI = lisp . lisp_button ( "show configuration diff" , "/lisp/show/diff" )
o0o00 = lisp . lisp_button ( "archive configuration" , "/lisp/archive/conf" )
IIi = lisp . lisp_button ( "clear configuration" , "/lisp/clear/conf/verify" )
o0o0O = lisp . lisp_button ( "log flows" , "/lisp/log/flows" )
oOoO00oo0O = lisp . lisp_button ( "install LISP software" , "/lisp/install/image" )
IiiiI = lisp . lisp_button ( "restart LISP subsystem" , "/lisp/restart/verify" )
if 61 - 61: OOooOOo % OOooOOo * o0oOOo0O0Ooo / o0oOOo0O0Ooo
iIiIIIi = "<center>{}{}{}{}{}{}{}</center><hr>" . format ( IiIii1i111 , iI , o0o00 , IIi ,
o0o0O , oOoO00oo0O , IiiiI )
if 75 - 75: IiII . ooOoO0o
if 50 - 50: OoOoOO00
O00o0OO0000oo = getoutput ( "uptime" )
i1OO0oOOoo = getoutput ( "uname -pv" )
oOOO00o000o = lisp . lisp_version . replace ( "+" , "" )
if 9 - 9: oO0o + I11i / I11i
if 12 - 12: OoooooooOO % o0oOOo0O0Ooo * I11i % iIii1I11I1II1 / Ii1I
if 27 - 27: i11iIiiIii % II111iiii % I11i . O0 - Oo0Ooo + OoOoOO00
if 57 - 57: iIii1I11I1II1 / I11i - i1IIi
if 51 - 51: IiII
ii11I1 = multiprocessing . cpu_count ( )
if 75 - 75: OoO0O00 / II111iiii % O0
Ii111iIi1iIi = O00o0OO0000oo . find ( ", load" )
O00o0OO0000oo = O00o0OO0000oo [ 0 : Ii111iIi1iIi ]
IIIII = lisp . lisp_print_elapsed ( lisp . lisp_uptime )
if 78 - 78: Ii1I * i1IIi
iI11 = "Not available"
if 96 - 96: OOooOOo
if 85 - 85: o0oOOo0O0Ooo . OoOoOO00 / ooOoO0o . O0 % I1Ii111
if 90 - 90: Oo0Ooo % O0 * iIii1I11I1II1 . iII111i
if 8 - 8: ooOoO0o + II111iiii / iII111i / I11i
ooo0O = "ps auww" if lisp . lisp_is_macos ( ) else "ps aux"
iII1iii = "egrep 'PID|python lisp|python -O lisp|python3.8 -O lisp'"
iII1iii += "| egrep -v grep"
i11i1iiiII = getoutput ( "{} | {}" . format ( ooo0O , iII1iii ) )
i11i1iiiII = i11i1iiiII . replace ( " " , lisp . space ( 1 ) )
i11i1iiiII = i11i1iiiII . replace ( "\n" , "<br>" )
if 68 - 68: i11iIiiIii * OoO0O00
if 46 - 46: OoOoOO00 / iIii1I11I1II1 % iII111i . iIii1I11I1II1 * iII111i
if 38 - 38: I1ii11iIi11i - iII111i / O0 . I1Ii111
if 45 - 45: I1Ii111
if ( i1OO0oOOoo . find ( "Darwin" ) != - 1 ) :
ii11I1 = old_div ( ii11I1 , 2 )
iI11 = getoutput ( "top -l 1 | head -50" )
iI11 = iI11 . split ( "PID" )
iI11 = iI11 [ 0 ]
if 83 - 83: OoOoOO00 . OoooooooOO
if 58 - 58: i11iIiiIii + OoooooooOO % OoooooooOO / IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if 7 - 7: OoooooooOO . IiII
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
Ii111iIi1iIi = iI11 . find ( "Load Avg" )
Oooo00 = iI11 [ 0 : Ii111iIi1iIi ] . find ( "threads" )
I111iIi1 = iI11 [ 0 : Oooo00 + 7 ]
iI11 = I111iIi1 + "<br>" + iI11 [ Ii111iIi1iIi : : ]
Ii111iIi1iIi = iI11 . find ( "CPU usage" )
iI11 = iI11 [ 0 : Ii111iIi1iIi ] + "<br>" + iI11 [ Ii111iIi1iIi : : ]
Ii111iIi1iIi = iI11 . find ( "SharedLibs:" )
iI11 = iI11 [ 0 : Ii111iIi1iIi ] + "<br>" + iI11 [ Ii111iIi1iIi : : ]
Ii111iIi1iIi = iI11 . find ( "MemRegions" )
iI11 = iI11 [ 0 : Ii111iIi1iIi ] + "<br>" + iI11 [ Ii111iIi1iIi : : ]
Ii111iIi1iIi = iI11 . find ( "PhysMem" )
iI11 = iI11 [ 0 : Ii111iIi1iIi ] + "<br>" + iI11 [ Ii111iIi1iIi : : ]
Ii111iIi1iIi = iI11 . find ( "VM:" )
iI11 = iI11 [ 0 : Ii111iIi1iIi ] + "<br>" + iI11 [ Ii111iIi1iIi : : ]
Ii111iIi1iIi = iI11 . find ( "Networks" )
iI11 = iI11 [ 0 : Ii111iIi1iIi ] + "<br>" + iI11 [ Ii111iIi1iIi : : ]
Ii111iIi1iIi = iI11 . find ( "Disks" )
iI11 = iI11 [ 0 : Ii111iIi1iIi ] + "<br>" + iI11 [ Ii111iIi1iIi : : ]
else :
if 92 - 92: ooOoO0o
if 22 - 22: Oo0Ooo % iII111i * I1ii11iIi11i / OOooOOo % i11iIiiIii * I11i
if 95 - 95: OoooooooOO - IiII * I1IiiI + OoOoOO00
if 10 - 10: o0oOOo0O0Ooo / i11iIiiIii
O0OOOOo0O = getoutput ( "top -b -n 1 | head -50" )
O0OOOOo0O = O0OOOOo0O . split ( "PID" )
O0OOOOo0O [ 1 ] = O0OOOOo0O [ 1 ] . replace ( " " , lisp . space ( 1 ) )
O0OOOOo0O = O0OOOOo0O [ 0 ] + O0OOOOo0O [ 1 ]
iI11 = O0OOOOo0O . replace ( "\n" , "<br>" )
if 92 - 92: I11i . I1Ii111
if 85 - 85: I1ii11iIi11i . I1Ii111
O0O0Ooooo000 = getoutput ( "cat release-notes.txt" )
O0O0Ooooo000 = O0O0Ooooo000 . replace ( "\n" , "<br>" )
if 65 - 65: OOooOOo * I1Ii111
iIiIIIi += '''
<br><table align="center" border="1" cellspacing="3x" cellpadding="5x">
<tr>
<td width="20%"><i>LISP Subsystem Version:<br>
LISP Release {} Build Date:</i></td>
<td width="80%"><font face="Courier New">{}<br>
{}</font></td>
</tr>
<tr>
<td width="20%"><i>LISP Subsystem Uptime:<br>System Uptime:</i></td>
<td width="80%"><font face="Courier New">{}<br>
{}</font></td>
</tr>
<tr>
<td width="20%"><i>System Architecture:<br>
Number of CPUs:<font face="Courier New">{}{}</font></td>
<td width="80%"><font face="Courier New">{}</font></td>
</tr>
<tr>
<td width="20%" valign="top"><i>LISP Process Status:</i></td>
<td width="80%">
<div style="height: 100px; overflow: auto">
<font size="2" face="Courier New">{}</font></div></td>
</tr>
<tr>
<td width="20%" valign="top"><i>System Resource Utilization:</i></td>
<td width="80%">
<div style="height: 200px; overflow: auto">
<font face="Courier New">{}</font></td>
</tr>
<tr>
<td width="20%" valign="top"><i>Release Notes:</i></td>
<td width="80%">
<div style="height: 300px; overflow: auto">
<font size="2" face="Courier New">{}</font></div></td>
</tr>
</table>
''' . format ( oOOO00o000o , lisp . lisp_version , o0oO0 , IIIII ,
O00o0OO0000oo , lisp . lisp_space ( 1 ) , ii11I1 , i1OO0oOOoo , i11i1iiiII , iI11 ,
O0O0Ooooo000 )
if 79 - 79: OoooooooOO - I1IiiI
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 69 - 69: I11i
if 95 - 95: ooOoO0o + i11iIiiIii * I1Ii111 - i1IIi * I1Ii111 - iIii1I11I1II1
if 75 - 75: OoooooooOO * IiII
if 9 - 9: IiII - II111iiii + O0 / iIii1I11I1II1 / i11iIiiIii
if 39 - 39: IiII * Oo0Ooo + iIii1I11I1II1 - IiII + OOooOOo
if 69 - 69: O0
if 85 - 85: ooOoO0o / O0
@ bottle . route ( '/lisp/show/conf' )
def iI1iIIIi1i ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 89 - 89: iIii1I11I1II1
return ( bottle . static_file ( "lisp.config" , root = "./" , mimetype = "text/plain" ) )
if 21 - 21: I11i % I11i
if 27 - 27: i11iIiiIii / I1ii11iIi11i
if 84 - 84: Oo0Ooo
if 43 - 43: oO0o - OoooooooOO
if 3 - 3: O0 / iII111i
if 31 - 31: OOooOOo + o0oOOo0O0Ooo . OoooooooOO
if 89 - 89: II111iiii + i1IIi + II111iiii
@ bottle . route ( '/lisp/show/diff' )
def IiII1II11I ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 54 - 54: IiII + O0 + I11i * I1Ii111 - OOooOOo % oO0o
return ( bottle . static_file ( "lisp.config.diff" , root = "./" ,
mimetype = "text/plain" ) )
if 13 - 13: ooOoO0o / iII111i * OoO0O00 . OoO0O00 * ooOoO0o
if 63 - 63: I1Ii111 / O0 * Oo0Ooo + II111iiii / IiII + Ii1I
if 63 - 63: OoO0O00 + I1ii11iIi11i . I1Ii111 % I1Ii111
if 57 - 57: II111iiii
if 54 - 54: Oo0Ooo + oO0o + i11iIiiIii
if 28 - 28: oO0o
if 70 - 70: IiII
@ bottle . route ( '/lisp/archive/conf' )
def i11i1iiI1i ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 87 - 87: ooOoO0o
if 45 - 45: OoO0O00 / OoooooooOO - iII111i / Ii1I % IiII
lisp . lisp_ipc_lock . acquire ( )
os . system ( "cp ./lisp.config ./lisp.config.archive" )
lisp . lisp_ipc_lock . release ( )
if 83 - 83: I1IiiI . iIii1I11I1II1 - IiII * i11iIiiIii
iIiIIIi = "Configuration file saved to "
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
iIiIIIi += lisp . lisp_print_cour ( "./lisp.config.archive" )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 20 - 20: i1IIi * I1Ii111 + II111iiii % o0oOOo0O0Ooo % oO0o
if 13 - 13: Oo0Ooo
if 60 - 60: I1ii11iIi11i * I1IiiI
if 17 - 17: OOooOOo % Oo0Ooo / I1ii11iIi11i . IiII * OOooOOo - II111iiii
if 41 - 41: Ii1I
if 77 - 77: I1Ii111
if 65 - 65: II111iiii . I1IiiI % oO0o * OoO0O00
@ bottle . route ( '/lisp/clear/conf' )
def iI11I ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 11 - 11: iII111i - oO0o + II111iiii - iIii1I11I1II1
if 7 - 7: IiII - I11i / II111iiii * Ii1I . iII111i * iII111i
os . system ( "cp ./lisp.config ./lisp.config.before-clear" )
lisp . lisp_ipc_lock . acquire ( )
O0O0oOOo0O ( )
lisp . lisp_ipc_lock . release ( )
if 19 - 19: o0oOOo0O0Ooo / I1Ii111 % o0oOOo0O0Ooo % iII111i * IiII
iIiIIIi = "Configuration cleared, a backup copy is stored in "
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
iIiIIIi += lisp . lisp_print_cour ( "./lisp.config.before-clear" )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 19 - 19: iIii1I11I1II1
if 26 - 26: OoooooooOO % I1IiiI % Oo0Ooo . I1IiiI % Ii1I
if 34 - 34: IiII / OoOoOO00
if 87 - 87: O0 * o0oOOo0O0Ooo * Oo0Ooo * II111iiii
if 6 - 6: i1IIi . I1ii11iIi11i + OoOoOO00 * I11i / OoOoOO00 % oO0o
if 18 - 18: II111iiii . OoooooooOO % OoOoOO00 % Ii1I
if 9 - 9: OoO0O00 - Oo0Ooo * OoooooooOO . Oo0Ooo
@ bottle . route ( '/lisp/clear/conf/verify' )
def ii1Ii1IiIIi ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 83 - 83: I11i / I1ii11iIi11i
if 34 - 34: I1IiiI * Oo0Ooo * I1Ii111 / OoO0O00 * I11i / iIii1I11I1II1
iIiIIIi = "<br>Are you sure you want to clear the configuration?"
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
if 74 - 74: Oo0Ooo / i11iIiiIii - II111iiii * o0oOOo0O0Ooo
IIi1IIIIi = lisp . lisp_button ( "yes" , "/lisp/clear/conf" )
OOOoO = lisp . lisp_button ( "cancel" , "/lisp" )
iIiIIIi += IIi1IIIIi + OOOoO + "<br>"
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 14 - 14: I11i . iIii1I11I1II1 . OoooooooOO . II111iiii / o0oOOo0O0Ooo
if 21 - 21: i11iIiiIii / i1IIi + I1IiiI * OOooOOo . I1Ii111
if 84 - 84: O0 . I11i - II111iiii . ooOoO0o / II111iiii
if 47 - 47: OoooooooOO
if 4 - 4: I1IiiI % I11i
if 10 - 10: IiII . OoooooooOO - OoO0O00 + IiII - O0
if 82 - 82: ooOoO0o + II111iiii
if 39 - 39: oO0o % iIii1I11I1II1 % O0 % OoooooooOO * I1ii11iIi11i + iII111i
if 68 - 68: Oo0Ooo + i11iIiiIii
def Oo0oOooo000OO ( ) :
oo00O00oO = ""
if 98 - 98: o0oOOo0O0Ooo + O0 % i1IIi - OOooOOo + Oo0Ooo
for OoOo000oOo0oo in [ "443" , "-8080" , "8080" ] :
oO0O = 'ps auxww | egrep "lisp-core.pyo {}" | egrep -v grep' . format ( OoOo000oOo0oo )
iIiIIIi = getoutput ( oO0O )
if ( iIiIIIi == "" ) : continue
if 86 - 86: OoOoOO00 . iIii1I11I1II1 - OoO0O00
iIiIIIi = iIiIIIi . split ( "\n" ) [ 0 ]
iIiIIIi = iIiIIIi . split ( " " )
if ( iIiIIIi [ - 2 ] == "lisp-core.pyo" and iIiIIIi [ - 1 ] == OoOo000oOo0oo ) : oo00O00oO = OoOo000oOo0oo
break
if 56 - 56: O0
return ( oo00O00oO )
if 61 - 61: o0oOOo0O0Ooo / OOooOOo / Oo0Ooo * O0
if 23 - 23: oO0o - OOooOOo + I11i
if 12 - 12: I1IiiI / ooOoO0o % o0oOOo0O0Ooo / i11iIiiIii % OoooooooOO
if 15 - 15: iIii1I11I1II1 % OoooooooOO - Oo0Ooo * Ii1I + I11i
if 11 - 11: iII111i * Ii1I - OoOoOO00
if 66 - 66: OoOoOO00 . i11iIiiIii - iII111i * o0oOOo0O0Ooo + OoooooooOO * I1ii11iIi11i
if 74 - 74: Oo0Ooo
@ bottle . route ( '/lisp/restart' )
def OO000o00 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 46 - 46: OoO0O00
if 71 - 71: I11i / I11i * oO0o * oO0o / II111iiii
if 35 - 35: OOooOOo * o0oOOo0O0Ooo * I1IiiI % Oo0Ooo . OoOoOO00
if 58 - 58: I11i + II111iiii * iII111i * i11iIiiIii - iIii1I11I1II1
if 68 - 68: OoooooooOO % II111iiii
if 26 - 26: II111iiii % i11iIiiIii % iIii1I11I1II1 % I11i * I11i * I1ii11iIi11i
OOoO = getoutput ( "egrep requiretty /etc/sudoers" ) . split ( " " )
if ( OOoO [ - 1 ] == "requiretty" and OOoO [ 0 ] == "Defaults" ) :
iIiIIIi = "Need to remove 'requiretty' from /etc/sudoers"
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 24 - 24: II111iiii % I1Ii111 - ooOoO0o + I1IiiI * I1ii11iIi11i
if 2 - 2: Ii1I - IiII
lisp . lprint ( lisp . bold ( "LISP subsystem restart request received" , False ) )
if 83 - 83: oO0o % o0oOOo0O0Ooo % Ii1I - II111iiii * OOooOOo / OoooooooOO
if 18 - 18: OoO0O00 + iIii1I11I1II1 - II111iiii - I1IiiI
if 71 - 71: OoooooooOO
if 33 - 33: I1Ii111
if 62 - 62: I1ii11iIi11i + Ii1I + i1IIi / OoooooooOO
oo00O00oO = Oo0oOooo000OO ( )
if 7 - 7: o0oOOo0O0Ooo + i1IIi . I1IiiI / Oo0Ooo
if 22 - 22: ooOoO0o - ooOoO0o % OOooOOo . I1Ii111 + oO0o
if 63 - 63: I1IiiI % I1Ii111 * o0oOOo0O0Ooo + I1Ii111 / Oo0Ooo % iII111i
if 45 - 45: IiII
Oo0O0 = "sleep 1; sudo ./RESTART-LISP {}" . format ( oo00O00oO )
threading . Thread ( target = Ii1Iii111IiI1 , args = [ Oo0O0 ] ) . start ( )
if 98 - 98: I1Ii111 - OoooooooOO % I1IiiI + O0 . Ii1I
iIiIIIi = lisp . lisp_print_sans ( "Restarting LISP subsystem ..." )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 56 - 56: II111iiii / oO0o + i11iIiiIii + OOooOOo
if 54 - 54: Ii1I - I11i - I1Ii111 . iIii1I11I1II1
if 79 - 79: Ii1I . OoO0O00
if 40 - 40: o0oOOo0O0Ooo + Oo0Ooo . o0oOOo0O0Ooo % ooOoO0o
if 15 - 15: Ii1I * Oo0Ooo % I1ii11iIi11i * iIii1I11I1II1 - i11iIiiIii
if 60 - 60: I1IiiI * I1Ii111 % OoO0O00 + oO0o
if 52 - 52: i1IIi
def Ii1Iii111IiI1 ( command ) :
os . system ( command )
if 84 - 84: Ii1I / IiII
if 86 - 86: OoOoOO00 * II111iiii - O0 . OoOoOO00 % iIii1I11I1II1 / OOooOOo
if 11 - 11: I1IiiI * oO0o + I1ii11iIi11i / I1ii11iIi11i
if 37 - 37: i11iIiiIii + i1IIi
if 23 - 23: iII111i + I11i . OoOoOO00 * I1IiiI + I1ii11iIi11i
if 18 - 18: IiII * o0oOOo0O0Ooo . IiII / O0
if 8 - 8: o0oOOo0O0Ooo
@ bottle . route ( '/lisp/restart/verify' )
def II1II1 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 48 - 48: i1IIi + I11i % OoOoOO00 / Oo0Ooo - o0oOOo0O0Ooo
if 67 - 67: oO0o % o0oOOo0O0Ooo . OoooooooOO + OOooOOo * I11i * OoOoOO00
iIiIIIi = "<br>Are you sure you want to restart the LISP subsystem?"
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
if 36 - 36: O0 + Oo0Ooo
IIi1IIIIi = lisp . lisp_button ( "yes" , "/lisp/restart" )
OOOoO = lisp . lisp_button ( "cancel" , "/lisp" )
iIiIIIi += IIi1IIIIi + OOOoO + "<br>"
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 5 - 5: Oo0Ooo * OoOoOO00
if 46 - 46: ooOoO0o
if 33 - 33: iII111i - II111iiii * OoooooooOO - Oo0Ooo - OOooOOo
if 84 - 84: I1Ii111 + Oo0Ooo - OoOoOO00 * OoOoOO00
if 61 - 61: OoooooooOO . oO0o . OoooooooOO / Oo0Ooo
if 72 - 72: i1IIi
if 82 - 82: OoOoOO00 + OoooooooOO / i11iIiiIii * I1ii11iIi11i . OoooooooOO
@ bottle . route ( '/lisp/install' , method = "post" )
def oooo0OOo ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 72 - 72: O0 / ooOoO0o + OoooooooOO * iII111i
if 61 - 61: OoooooooOO % II111iiii - I1IiiI % I1ii11iIi11i + i1IIi
i1II = bottle . request . forms . get ( "image_url" )
if ( i1II . find ( "lispers.net" ) == - 1 or i1II . find ( ".tgz" ) == - 1 ) :
iIi1IiI = "Invalid install request for file {}" . format ( i1II )
lisp . lprint ( lisp . bold ( iIi1IiI , False ) )
iIiIIIi = lisp . lisp_print_sans ( "Invalid lispers.net tarball file name" )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 14 - 14: IiII % oO0o % Oo0Ooo - i11iIiiIii
if 53 - 53: Ii1I % Oo0Ooo
if ( lisp . lisp_is_python2 ( ) ) :
O0ooOo0o0Oo = "python -O "
OooO0oOo = "pyo"
if 66 - 66: OoO0O00 * Oo0Ooo
if ( lisp . lisp_is_python3 ( ) ) :
O0ooOo0o0Oo = "python3.8 -O "
OooO0oOo = "pyc"
if 28 - 28: OoO0O00 % OoOoOO00 % I1ii11iIi11i + I1IiiI / I1IiiI
if ( lisp . lisp_is_ubuntu ( ) ) :
oO0O = "{} lisp-get-bits.{} {} force 2>&1 > /dev/null" . format ( O0ooOo0o0Oo , OooO0oOo , i1II )
if 71 - 71: OOooOOo * OoO0O00 % OoooooooOO % OoO0O00 / I1IiiI
else :
oO0O = "{} lisp-get-bits.{} {} force >& /dev/null" . format ( O0ooOo0o0Oo , OooO0oOo , i1II )
if 56 - 56: OoooooooOO % i11iIiiIii * iIii1I11I1II1 . OoO0O00 * O0
if 23 - 23: i11iIiiIii
if 39 - 39: o0oOOo0O0Ooo - I1ii11iIi11i % iII111i * OoO0O00 - OOooOOo / iII111i
if 29 - 29: I1ii11iIi11i
if 52 - 52: i11iIiiIii / i1IIi
if 1 - 1: ooOoO0o
i11i1iiiII = os . system ( oO0O )
if 78 - 78: I1ii11iIi11i + I11i - O0
i1I1iIi1IiI = i1II . split ( "/" ) [ - 1 ]
if 11 - 11: II111iiii
if ( os . path . exists ( i1I1iIi1IiI ) ) :
O00O00O000OOO = i1II . split ( "release-" ) [ 1 ]
O00O00O000OOO = O00O00O000OOO . split ( ".tgz" ) [ 0 ]
if 3 - 3: O0
iIiIIIi = "Install completed for release {}" . format ( O00O00O000OOO )
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
if 64 - 64: i1IIi % ooOoO0o / i11iIiiIii - i1IIi % OOooOOo . iII111i
iIiIIIi += "<br><br>" + lisp . lisp_button ( "restart LISP subsystem" ,
"/lisp/restart/verify" ) + "<br>"
else :
iIi1IiI = lisp . lisp_print_cour ( i1II )
iIiIIIi = "Install failed for file {}" . format ( iIi1IiI )
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
if 8 - 8: Oo0Ooo + II111iiii * OOooOOo * OoOoOO00 * I11i / IiII
if 21 - 21: oO0o / OoooooooOO
iIi1IiI = "Install request for file {} {}" . format ( i1II ,
"succeeded" if ( i11i1iiiII == 0 ) else "failed" )
lisp . lprint ( lisp . bold ( iIi1IiI , False ) )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 11 - 11: OOooOOo % Ii1I - i11iIiiIii - oO0o + ooOoO0o + IiII
if 87 - 87: I1Ii111 * i1IIi / I1ii11iIi11i
if 6 - 6: o0oOOo0O0Ooo + Oo0Ooo - OoooooooOO % OOooOOo * OoOoOO00
if 69 - 69: i1IIi
if 59 - 59: II111iiii - o0oOOo0O0Ooo
if 24 - 24: Oo0Ooo - i1IIi + I11i
if 38 - 38: OoooooooOO / I1ii11iIi11i . O0 / i1IIi / Oo0Ooo + iIii1I11I1II1
@ bottle . route ( '/lisp/install/image' )
def ooO00O00oOO ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 40 - 40: iII111i . oO0o + I1IiiI + I1ii11iIi11i + I1Ii111
if 26 - 26: iIii1I11I1II1
iIi1IiI = lisp . lisp_print_sans ( "<br>Enter lispers.net tarball URL:" )
iIiIIIi = '''
<form action="/lisp/install" method="post" style="display: inline;">
{}
<input type="text" name="image_url" size="75" required/>
<input type="submit" style="background-color:transparent;border-radius:10px;" value="Submit" />
</form><br>''' . format ( iIi1IiI )
if 87 - 87: I1ii11iIi11i / OoooooooOO - Oo0Ooo % OoOoOO00 % IiII % Oo0Ooo
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 29 - 29: OoooooooOO . I1IiiI % I1ii11iIi11i - iII111i
if 8 - 8: i1IIi
if 32 - 32: oO0o / II111iiii
if 45 - 45: I1ii11iIi11i + OoO0O00 * i11iIiiIii / OOooOOo % I11i * O0
if 17 - 17: O0
if 88 - 88: Oo0Ooo . O0 % OoooooooOO / OOooOOo
if 89 - 89: II111iiii / oO0o
if 14 - 14: OOooOOo . I1IiiI * ooOoO0o + II111iiii - ooOoO0o + OOooOOo
@ bottle . route ( '/lisp/log/flows' )
def IIIIIiII1 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 45 - 45: I1IiiI / iII111i . iII111i
if 35 - 35: I1Ii111 . OoOoOO00 * i11iIiiIii
os . system ( "touch ./log-flows" )
if 44 - 44: i11iIiiIii / Oo0Ooo
iIiIIIi = lisp . lisp_print_sans ( "Flow data appended to file " )
Ii1IIi = "<a href='/lisp/show/log/lisp-flow/100'>logs/lisp-flows.log</a>"
iIiIIIi += lisp . lisp_print_cour ( Ii1IIi )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 43 - 43: I1Ii111 % iII111i
if 69 - 69: iII111i % OoO0O00
if 86 - 86: oO0o / oO0o
if 28 - 28: i11iIiiIii / o0oOOo0O0Ooo . iIii1I11I1II1 / II111iiii
if 72 - 72: OoooooooOO / I1IiiI + Ii1I / OoOoOO00 * Ii1I
if 34 - 34: O0 * O0 % OoooooooOO + iII111i * iIii1I11I1II1 % Ii1I
if 25 - 25: I11i + OoOoOO00 . o0oOOo0O0Ooo % OoOoOO00 * OOooOOo
if 32 - 32: i11iIiiIii - I1Ii111
@ bottle . route ( '/lisp/search/log/<name>/<num>/<keyword>' )
def oo00ooOoo ( name = "" , num = "" , keyword = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 28 - 28: Ii1I
if 1 - 1: Ii1I
Oo0O0 = "tail -n {} logs/{}.log | egrep -B10 -A10 {}" . format ( num , name ,
keyword )
iIiIIIi = getoutput ( Oo0O0 )
if 48 - 48: O0 + O0 . I1Ii111 - ooOoO0o
if ( iIiIIIi ) :
o00oo0000 = iIiIIIi . count ( keyword )
iIiIIIi = lisp . convert_font ( iIiIIIi )
iIiIIIi = iIiIIIi . replace ( "--\n--\n" , "--\n" )
iIiIIIi = iIiIIIi . replace ( "\n" , "<br>" )
iIiIIIi = iIiIIIi . replace ( "--<br>" , "<hr>" )
iIiIIIi = "Found <b>{}</b> occurences<hr>" . format ( o00oo0000 ) + iIiIIIi
else :
iIiIIIi = "Keyword {} not found" . format ( keyword )
if 44 - 44: Oo0Ooo % iIii1I11I1II1
if 90 - 90: II111iiii + OoooooooOO % OoooooooOO
if 35 - 35: iII111i / I1ii11iIi11i * OoooooooOO . II111iiii / Oo0Ooo
if 1 - 1: OoooooooOO + IiII . i1IIi % I11i
if 66 - 66: o0oOOo0O0Ooo + I1ii11iIi11i + I1IiiI - oO0o
I1 = "<font color='blue'><b>{}</b>" . format ( keyword )
iIiIIIi = iIiIIIi . replace ( keyword , I1 )
iIiIIIi = iIiIIIi . replace ( keyword , keyword + "</font>" )
if 13 - 13: OoOoOO00 / I1ii11iIi11i . OOooOOo * I11i - Oo0Ooo / oO0o
iIiIIIi = lisp . lisp_print_cour ( iIiIIIi )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 8 - 8: OoOoOO00 / O0 * O0 % I1Ii111 - Oo0Ooo + I11i
if 83 - 83: O0 . I1IiiI
if 95 - 95: I11i . OoooooooOO - i1IIi - OoooooooOO - OoO0O00 % iIii1I11I1II1
if 64 - 64: OOooOOo + OoooooooOO * OoooooooOO
if 41 - 41: ooOoO0o . Oo0Ooo + I1IiiI
if 100 - 100: Ii1I + OoO0O00
if 73 - 73: i1IIi - I1Ii111 % ooOoO0o / OoO0O00
@ bottle . post ( '/lisp/search/log/<name>/<num>' )
def III1iii1i11iI ( name = "" , num = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 56 - 56: i11iIiiIii . iIii1I11I1II1 + I1ii11iIi11i + iII111i / Oo0Ooo . I1Ii111
if 74 - 74: OoooooooOO % OOooOOo % I1Ii111 - I1IiiI - I11i
o0 = bottle . request . forms . get ( "keyword" )
return ( oo00ooOoo ( name , num , o0 ) )
if 35 - 35: IiII + i1IIi * oO0o - Ii1I . Oo0Ooo
if 31 - 31: o0oOOo0O0Ooo
if 15 - 15: O0 / Oo0Ooo % I1ii11iIi11i + o0oOOo0O0Ooo
if 23 - 23: iIii1I11I1II1 + O0
if 58 - 58: Oo0Ooo
if 9 - 9: iIii1I11I1II1 % I1ii11iIi11i . OOooOOo + OoooooooOO
if 62 - 62: O0 / I1IiiI % O0 * OoO0O00 % I1IiiI
@ bottle . route ( '/lisp/show/log/<name>/<num>' )
def Ii ( name = "" , num = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 99 - 99: OoO0O00 * i11iIiiIii . OoooooooOO % Oo0Ooo
if 76 - 76: O0 . I1Ii111 * iII111i * OOooOOo . OoOoOO00 . i11iIiiIii
if 21 - 21: o0oOOo0O0Ooo / OoOoOO00 / iIii1I11I1II1 % OOooOOo
if 2 - 2: i11iIiiIii - II111iiii / oO0o % O0
if 66 - 66: Oo0Ooo
if ( num == "" ) : num = 100
if 28 - 28: IiII - IiII . i1IIi - ooOoO0o + I1IiiI . IiII
oO0ooOOO = '''
<form action="/lisp/search/log/{}/{}" method="post">
<i>Keyword search:</i>
<input type="text" name="keyword" />
<input style="background-color:transparent;border-radius:10px;" type="submit" value="Submit" />
</form><hr>
''' . format ( name , num )
if 16 - 16: oO0o + ooOoO0o / o0oOOo0O0Ooo
if ( os . path . exists ( "logs/{}.log" . format ( name ) ) ) :
iIiIIIi = getoutput ( "tail -n {} logs/{}.log" . format ( num , name ) )
iIiIIIi = lisp . convert_font ( iIiIIIi )
iIiIIIi = iIiIIIi . replace ( "\n" , "<br>" )
iIiIIIi = oO0ooOOO + lisp . lisp_print_cour ( iIiIIIi )
else :
O00oOoo0OoO0 = lisp . lisp_print_sans ( "File" )
Ooo0 = lisp . lisp_print_cour ( "logs/{}.log" . format ( name ) )
oooO00o0 = lisp . lisp_print_sans ( "does not exist" )
iIiIIIi = "{} {} {}" . format ( O00oOoo0OoO0 , Ooo0 , oooO00o0 )
if 53 - 53: ooOoO0o
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 98 - 98: I1Ii111
if 92 - 92: I1Ii111 - iIii1I11I1II1
if 32 - 32: Ii1I % OoO0O00 * OoO0O00 + IiII * II111iiii * Ii1I
if 11 - 11: oO0o % II111iiii
if 57 - 57: OOooOOo / Oo0Ooo
if 69 - 69: oO0o - Oo0Ooo % IiII
if 50 - 50: OoooooooOO
@ bottle . route ( '/lisp/debug/<name>' )
def IiI1i111IiIiIi1 ( name = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 39 - 39: I11i - I1ii11iIi11i
if 53 - 53: o0oOOo0O0Ooo % iII111i + ooOoO0o . Oo0Ooo - I1ii11iIi11i % o0oOOo0O0Ooo
if 64 - 64: II111iiii
if 40 - 40: OoOoOO00 % OoO0O00
if 62 - 62: o0oOOo0O0Ooo
if ( name == "disable%all" ) :
iiiI11 = lispconfig . lisp_get_clause_for_api ( "lisp debug" )
if ( "lisp debug" in iiiI11 [ 0 ] ) :
oooOo0OOOoo0 = [ ]
for I1i111i in iiiI11 [ 0 ] [ "lisp debug" ] :
iI1i = list ( I1i111i . keys ( ) ) [ 0 ]
oooOo0OOOoo0 . append ( { iI1i : "no" } )
if 46 - 46: I1Ii111 % Ii1I
oooOo0OOOoo0 = { "lisp debug" : oooOo0OOOoo0 }
lispconfig . lisp_put_clause_for_api ( oooOo0OOOoo0 )
if 72 - 72: iIii1I11I1II1
if 45 - 45: Oo0Ooo - o0oOOo0O0Ooo % I1Ii111
iiiI11 = lispconfig . lisp_get_clause_for_api ( "lisp xtr-parameters" )
if ( "lisp xtr-parameters" in iiiI11 [ 0 ] ) :
oooOo0OOOoo0 = [ ]
for I1i111i in iiiI11 [ 0 ] [ "lisp xtr-parameters" ] :
iI1i = list ( I1i111i . keys ( ) ) [ 0 ]
if ( iI1i in [ "data-plane-logging" , "flow-logging" ] ) :
oooOo0OOOoo0 . append ( { iI1i : "no" } )
else :
oooOo0OOOoo0 . append ( { iI1i : I1i111i [ iI1i ] } )
if 38 - 38: I1Ii111 % OOooOOo - OoooooooOO
if 87 - 87: OoO0O00 % I1IiiI
oooOo0OOOoo0 = { "lisp xtr-parameters" : oooOo0OOOoo0 }
lispconfig . lisp_put_clause_for_api ( oooOo0OOOoo0 )
if 77 - 77: iIii1I11I1II1 - i1IIi . oO0o
if 26 - 26: o0oOOo0O0Ooo * IiII . i1IIi
return ( lispconfig . lisp_landing_page ( ) )
if 59 - 59: O0 + i1IIi - o0oOOo0O0Ooo
if 62 - 62: i11iIiiIii % OOooOOo . IiII . OOooOOo
if 84 - 84: i11iIiiIii * OoO0O00
if 18 - 18: OOooOOo - Ii1I - OoOoOO00 / I1Ii111 - O0
if 30 - 30: O0 + I1ii11iIi11i + II111iiii
name = name . split ( "%" )
III1I = name [ 0 ]
iiii11I = name [ 1 ]
if 11 - 11: ooOoO0o - OOooOOo + ooOoO0o * oO0o / I1IiiI
OoOOOO = [ "data-plane-logging" , "flow-logging" ]
if 18 - 18: ooOoO0o % i11iIiiIii . iIii1I11I1II1 - iII111i
OOOOoo = "lisp xtr-parameters" if ( III1I in OoOOOO ) else "lisp debug"
if 29 - 29: o0oOOo0O0Ooo / i11iIiiIii / I1IiiI % oO0o % i11iIiiIii
if 18 - 18: OOooOOo + I1Ii111
iiiI11 = lispconfig . lisp_get_clause_for_api ( OOOOoo )
if 80 - 80: oO0o + o0oOOo0O0Ooo * Ii1I + OoO0O00
if ( OOOOoo in iiiI11 [ 0 ] ) :
oooOo0OOOoo0 = { }
for I1i111i in iiiI11 [ 0 ] [ OOOOoo ] :
oooOo0OOOoo0 [ list ( I1i111i . keys ( ) ) [ 0 ] ] = list ( I1i111i . values ( ) ) [ 0 ]
if ( III1I in oooOo0OOOoo0 ) : oooOo0OOOoo0 [ III1I ] = iiii11I
if 75 - 75: I11i / o0oOOo0O0Ooo / OOooOOo / IiII % ooOoO0o + II111iiii
oooOo0OOOoo0 = { OOOOoo : oooOo0OOOoo0 }
lispconfig . lisp_put_clause_for_api ( oooOo0OOOoo0 )
if 4 - 4: iII111i - Oo0Ooo - IiII - I11i % i11iIiiIii / OoO0O00
return ( lispconfig . lisp_landing_page ( ) )
if 50 - 50: ooOoO0o + i1IIi
if 31 - 31: Ii1I
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
if 47 - 47: o0oOOo0O0Ooo
if 66 - 66: I1IiiI - IiII
if 33 - 33: I1IiiI / OoO0O00
@ bottle . route ( '/lisp/clear/<name>' )
@ bottle . route ( '/lisp/clear/etr/<etr_name>/<stats_name>' )
@ bottle . route ( '/lisp/clear/rtr/<rtr_name>/<stats_name>' )
@ bottle . route ( '/lisp/clear/itr/<itr_name>' )
@ bottle . route ( '/lisp/clear/rtr/<rtr_name>' )
def iiIIi ( name = "" , itr_name = '' , rtr_name = "" , etr_name = "" ,
stats_name = "" ) :
if 36 - 36: I11i . II111iiii
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 25 - 25: oO0o
if 34 - 34: OoOoOO00 . iIii1I11I1II1 % O0
if 43 - 43: I1ii11iIi11i - iII111i
if 70 - 70: iII111i / OOooOOo % ooOoO0o - Ii1I
if 47 - 47: iII111i
if ( lispconfig . lisp_is_user_superuser ( None ) == False ) :
iIiIIIi = lisp . lisp_print_sans ( "Not authorized" )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 92 - 92: OOooOOo + OoOoOO00 % i1IIi
if 23 - 23: I1Ii111 - OOooOOo + Ii1I - OoOoOO00 * OoOoOO00 . Oo0Ooo
O0ooo0O0oo0 = "clear"
if ( name == "referral" ) :
iIii11iI1II = "lisp-mr"
I1II1I1I = "Referral"
elif ( itr_name == "map-cache" ) :
iIii11iI1II = "lisp-itr"
I1II1I1I = "ITR <a href='/lisp/show/itr/map-cache'>map-cache</a>"
elif ( rtr_name == "map-cache" ) :
iIii11iI1II = "lisp-rtr"
I1II1I1I = "RTR <a href='/lisp/show/rtr/map-cache'>map-cache</a>"
elif ( etr_name == "stats" ) :
iIii11iI1II = "lisp-etr"
I1II1I1I = ( "ETR '{}' decapsulation <a href='/lisp/show/" + "database'>stats</a>" ) . format ( stats_name )
if 79 - 79: OOooOOo / I1Ii111 . OoOoOO00 - I1ii11iIi11i
O0ooo0O0oo0 += "%" + stats_name
elif ( rtr_name == "stats" ) :
iIii11iI1II = "lisp-rtr"
I1II1I1I = ( "RTR '{}' decapsulation <a href='/lisp/show/" + "rtr/map-cache'>stats</a>" ) . format ( stats_name )
if 47 - 47: OoooooooOO % O0 * iII111i . Ii1I
O0ooo0O0oo0 += "%" + stats_name
else :
iIiIIIi = lisp . lisp_print_sans ( "Invalid command" )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 38 - 38: O0 - IiII % I1Ii111
if 64 - 64: iIii1I11I1II1
if 15 - 15: I1ii11iIi11i + OOooOOo / I1ii11iIi11i / I1Ii111
if 31 - 31: ooOoO0o + O0 + ooOoO0o . iIii1I11I1II1 + Oo0Ooo / o0oOOo0O0Ooo
if 6 - 6: Oo0Ooo % IiII * I11i / I1IiiI + Oo0Ooo
O0ooo0O0oo0 = lisp . lisp_command_ipc ( O0ooo0O0oo0 , "lisp-core" )
lisp . lisp_ipc ( O0ooo0O0oo0 , Ooo , iIii11iI1II )
if 39 - 39: OoOoOO00 - Oo0Ooo / iII111i * OoooooooOO
if 100 - 100: O0 . I11i . OoO0O00 + O0 * oO0o
if 42 - 42: oO0o % OoooooooOO + o0oOOo0O0Ooo
if 56 - 56: OoooooooOO + I1ii11iIi11i - iII111i
III1I1 = getoutput ( "egrep 'lisp map-cache' ./lisp.config" )
if ( III1I1 != "" ) :
os . system ( "touch ./lisp.config" )
if 12 - 12: iIii1I11I1II1 % ooOoO0o % ooOoO0o
if 78 - 78: IiII . OoOoOO00 . I11i
iIiIIIi = lisp . lisp_print_sans ( "{} cleared" . format ( I1II1I1I ) )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 97 - 97: oO0o
if 80 - 80: I1IiiI . Ii1I
if 47 - 47: I11i + ooOoO0o + II111iiii % i11iIiiIii
if 93 - 93: I1ii11iIi11i % OoOoOO00 . O0 / iII111i * oO0o
if 29 - 29: o0oOOo0O0Ooo
if 86 - 86: II111iiii . IiII
if 2 - 2: OoooooooOO
@ bottle . route ( '/lisp/show/map-server' )
def o0o0O00 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 35 - 35: iIii1I11I1II1
if 94 - 94: OoOoOO00
return ( lispconfig . lisp_process_show_command ( Ooo ,
"show map-server" ) )
if 100 - 100: OoO0O00 / i1IIi - I1IiiI % Ii1I - iIii1I11I1II1
if 17 - 17: I11i / o0oOOo0O0Ooo % Oo0Ooo
if 71 - 71: IiII . I1Ii111 . OoO0O00
if 68 - 68: i11iIiiIii % oO0o * OoO0O00 * IiII * II111iiii + O0
if 66 - 66: I11i % I1ii11iIi11i % OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / iII111i % O0 . OoO0O00 . i1IIi
if 29 - 29: O0 . I1Ii111
@ bottle . route ( '/lisp/show/database' )
def OO0o0oO0O000o ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 47 - 47: I1Ii111 - OoO0O00 / Ii1I * OoooooooOO / Ii1I . Oo0Ooo
return ( lispconfig . lisp_process_show_command ( Ooo ,
"show database-mapping" ) )
if 34 - 34: ooOoO0o
if 27 - 27: I1Ii111 + OoooooooOO - OoOoOO00
if 15 - 15: oO0o / I11i * O0 . II111iiii - OoO0O00
if 90 - 90: oO0o
if 94 - 94: I11i / I1ii11iIi11i * I1Ii111 - OoOoOO00
if 44 - 44: Ii1I % i11iIiiIii - iII111i * I1ii11iIi11i + Oo0Ooo * OOooOOo
if 41 - 41: O0 * ooOoO0o - OoOoOO00 . Ii1I
@ bottle . route ( '/lisp/show/itr/map-cache' )
def oO ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 20 - 20: oO0o * O0 + I11i - OoooooooOO . I11i
return ( lispconfig . lisp_process_show_command ( Ooo ,
"show itr-map-cache" ) )
if 60 - 60: o0oOOo0O0Ooo . o0oOOo0O0Ooo / iII111i
if 45 - 45: O0 . i11iIiiIii % iII111i . OoOoOO00 % IiII % iIii1I11I1II1
if 58 - 58: iIii1I11I1II1 . OoOoOO00 - i11iIiiIii * iIii1I11I1II1 % i11iIiiIii / I1IiiI
if 80 - 80: I1ii11iIi11i / iIii1I11I1II1 % OoOoOO00
if 80 - 80: OoO0O00 % iII111i
if 99 - 99: ooOoO0o / iIii1I11I1II1 - Ii1I * I1ii11iIi11i % I1IiiI
if 13 - 13: OoO0O00
@ bottle . route ( '/lisp/show/itr/rloc-probing' )
def O0oo0O0 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 2 - 2: OoooooooOO . OOooOOo . IiII
return ( lispconfig . lisp_process_show_command ( Ooo ,
"show itr-rloc-probing" ) )
if 42 - 42: OOooOOo % oO0o / OoO0O00 - oO0o * i11iIiiIii
if 19 - 19: oO0o * I1IiiI % i11iIiiIii
if 24 - 24: o0oOOo0O0Ooo
if 10 - 10: o0oOOo0O0Ooo % Ii1I / OOooOOo
if 28 - 28: OOooOOo % ooOoO0o
if 48 - 48: i11iIiiIii % oO0o
if 29 - 29: iII111i + i11iIiiIii % I11i
@ bottle . post ( '/lisp/show/itr/map-cache/lookup' )
def oOo00Ooo0o0 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 33 - 33: I11i
if 87 - 87: OoOoOO00 / IiII + iIii1I11I1II1
oo0O0o = bottle . request . forms . get ( "eid" )
if ( lispconfig . lisp_validate_input_address_string ( oo0O0o ) == False ) :
iIiIIIi = "Address '{}' has invalid format" . format ( oo0O0o )
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 13 - 13: iIii1I11I1II1 . OoOoOO00 * I1IiiI / oO0o * Ii1I
if 64 - 64: ooOoO0o / O0 * OoOoOO00 * ooOoO0o
Oo0O0 = "show itr-map-cache" + "%" + oo0O0o
return ( lispconfig . lisp_process_show_command ( Ooo ,
Oo0O0 ) )
if 60 - 60: I11i / i1IIi % I1ii11iIi11i / I1ii11iIi11i * I1ii11iIi11i . i11iIiiIii
if 99 - 99: OoOoOO00
if 77 - 77: o0oOOo0O0Ooo
if 48 - 48: OoOoOO00 % I1ii11iIi11i / I11i . iIii1I11I1II1 * II111iiii
if 65 - 65: OoOoOO00
if 31 - 31: I11i * OoOoOO00 . IiII % Ii1I + Oo0Ooo
if 47 - 47: O0 * I1IiiI * OoO0O00 . II111iiii
@ bottle . route ( '/lisp/show/rtr/map-cache' )
@ bottle . route ( '/lisp/show/rtr/map-cache/<dns>' )
def O0o00o000oO ( dns = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 62 - 62: I1ii11iIi11i / I11i . i1IIi
if 99 - 99: OoOoOO00 . I1Ii111
if ( dns == "dns" ) :
return ( lispconfig . lisp_process_show_command ( Ooo ,
"show rtr-map-cache-dns" ) )
else :
return ( lispconfig . lisp_process_show_command ( Ooo ,
"show rtr-map-cache" ) )
if 59 - 59: I11i / Oo0Ooo / OOooOOo / O0 / OoOoOO00 + o0oOOo0O0Ooo
if 13 - 13: o0oOOo0O0Ooo % oO0o / I1Ii111 % I1Ii111 % O0
if 90 - 90: IiII . ooOoO0o / iIii1I11I1II1
if 28 - 28: IiII + oO0o - ooOoO0o / iIii1I11I1II1 - I1IiiI
if 45 - 45: O0 / i1IIi * oO0o * OoO0O00
if 35 - 35: I1ii11iIi11i / iII111i % I1IiiI + iIii1I11I1II1
if 79 - 79: OoOoOO00 / ooOoO0o
if 77 - 77: Oo0Ooo
@ bottle . route ( '/lisp/show/rtr/rloc-probing' )
def i1i111Iiiiiii ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 19 - 19: I1IiiI . Oo0Ooo + OoooooooOO - I1IiiI
return ( lispconfig . lisp_process_show_command ( Ooo ,
"show rtr-rloc-probing" ) )
if 93 - 93: iIii1I11I1II1 + I1IiiI + i11iIiiIii
if 74 - 74: I11i / II111iiii + ooOoO0o * iIii1I11I1II1 - I1Ii111 - OoO0O00
if 69 - 69: iIii1I11I1II1 * I1IiiI - iII111i + O0 + O0
if 65 - 65: I1Ii111 / i11iIiiIii / OoO0O00 - OOooOOo
if 9 - 9: I1IiiI / I1Ii111 - Oo0Ooo * iIii1I11I1II1
if 86 - 86: II111iiii + ooOoO0o + IiII
if 9 - 9: ooOoO0o + II111iiii % ooOoO0o % IiII + iIii1I11I1II1
@ bottle . post ( '/lisp/show/rtr/map-cache/lookup' )
def oO00 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 7 - 7: O0 % I1Ii111 + I1ii11iIi11i + Ii1I % OoooooooOO . Oo0Ooo
if 56 - 56: iII111i
oo0O0o = bottle . request . forms . get ( "eid" )
if ( lispconfig . lisp_validate_input_address_string ( oo0O0o ) == False ) :
iIiIIIi = "Address '{}' has invalid format" . format ( oo0O0o )
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 84 - 84: OoOoOO00 - i11iIiiIii
if 1 - 1: iII111i * OoOoOO00
Oo0O0 = "show rtr-map-cache" + "%" + oo0O0o
return ( lispconfig . lisp_process_show_command ( Ooo ,
Oo0O0 ) )
if 66 - 66: OoOoOO00 + i1IIi % II111iiii . O0 * I1ii11iIi11i % I1ii11iIi11i
if 87 - 87: OOooOOo + o0oOOo0O0Ooo . iII111i - OoooooooOO
if 6 - 6: iIii1I11I1II1 * OoooooooOO
if 28 - 28: Oo0Ooo * o0oOOo0O0Ooo / I1Ii111
if 52 - 52: O0 / o0oOOo0O0Ooo % iII111i * I1IiiI % OOooOOo
if 69 - 69: I1ii11iIi11i
if 83 - 83: o0oOOo0O0Ooo
@ bottle . route ( '/lisp/show/referral' )
def i1iiii ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 90 - 90: o0oOOo0O0Ooo % I1ii11iIi11i - iIii1I11I1II1 % OoOoOO00
return ( lispconfig . lisp_process_show_command ( Ooo ,
"show referral-cache" ) )
if 8 - 8: OoOoOO00 * Oo0Ooo / IiII % Ii1I - I1IiiI
if 71 - 71: iII111i
if 23 - 23: i1IIi . iIii1I11I1II1 . OOooOOo . O0 % Ii1I % i11iIiiIii
if 11 - 11: O0 - II111iiii . OOooOOo . Ii1I % I1Ii111
if 21 - 21: Oo0Ooo / iII111i . I1Ii111 * OoooooooOO + I11i - i1IIi
if 58 - 58: I1ii11iIi11i
if 2 - 2: II111iiii / I1Ii111
@ bottle . post ( '/lisp/show/referral/lookup' )
def OoO ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 71 - 71: OoO0O00 - OoooooooOO * Oo0Ooo
if 38 - 38: iIii1I11I1II1 / ooOoO0o
oo0O0o = bottle . request . forms . get ( "eid" )
if ( lispconfig . lisp_validate_input_address_string ( oo0O0o ) == False ) :
iIiIIIi = "Address '{}' has invalid format" . format ( oo0O0o )
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 13 - 13: iIii1I11I1II1
if 77 - 77: i11iIiiIii - iIii1I11I1II1 / oO0o / ooOoO0o / OoO0O00
Oo0O0 = "show referral-cache" + "%" + oo0O0o
return ( lispconfig . lisp_process_show_command ( Ooo , Oo0O0 ) )
if 56 - 56: OoooooooOO * O0
if 85 - 85: OoooooooOO % OoOoOO00 * iIii1I11I1II1
if 44 - 44: iIii1I11I1II1 . I1ii11iIi11i + I1Ii111 . ooOoO0o
if 7 - 7: I1ii11iIi11i + iIii1I11I1II1 * I11i * I11i / II111iiii - Ii1I
if 65 - 65: oO0o + OoOoOO00 + II111iiii
if 77 - 77: II111iiii
if 50 - 50: O0 . O0 . ooOoO0o % Oo0Ooo
@ bottle . route ( '/lisp/show/delegations' )
def ooo000oOO ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 27 - 27: o0oOOo0O0Ooo * i11iIiiIii * OoO0O00
return ( lispconfig . lisp_process_show_command ( Ooo ,
"show delegations" ) )
if 92 - 92: Oo0Ooo / i11iIiiIii + I1ii11iIi11i
if 87 - 87: OoOoOO00 % iIii1I11I1II1
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
if 48 - 48: Oo0Ooo - ooOoO0o + Oo0Ooo - I1IiiI * i11iIiiIii . iII111i
if 35 - 35: IiII . O0 + Oo0Ooo + OOooOOo + i1IIi
if 65 - 65: O0 * I1IiiI / I1IiiI . OoOoOO00
if 87 - 87: II111iiii * I1ii11iIi11i % Oo0Ooo * Oo0Ooo
@ bottle . post ( '/lisp/show/delegations/lookup' )
def O0O ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 51 - 51: oO0o + OoO0O00 + iII111i + iII111i % o0oOOo0O0Ooo
if 29 - 29: ooOoO0o
oo0O0o = bottle . request . forms . get ( "eid" )
if ( lispconfig . lisp_validate_input_address_string ( oo0O0o ) == False ) :
iIiIIIi = "Address '{}' has invalid format" . format ( oo0O0o )
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 41 - 41: O0 % iII111i
if 10 - 10: iII111i . i1IIi + Ii1I
Oo0O0 = "show delegations" + "%" + oo0O0o
return ( lispconfig . lisp_process_show_command ( Ooo , Oo0O0 ) )
if 66 - 66: OoO0O00 % o0oOOo0O0Ooo
if 21 - 21: OoOoOO00 - OoooooooOO % i11iIiiIii
if 71 - 71: i1IIi - I11i * I1Ii111 + oO0o - OoO0O00 % I1ii11iIi11i
if 63 - 63: iIii1I11I1II1 + OOooOOo . OoO0O00 / I1IiiI
if 84 - 84: i1IIi
if 42 - 42: II111iiii - OoO0O00 - OoooooooOO . iII111i / OoOoOO00
if 56 - 56: i11iIiiIii - iIii1I11I1II1 . II111iiii
if 81 - 81: IiII / OoOoOO00 * IiII . O0
if 61 - 61: OoO0O00 * OOooOOo + I1Ii111 . iIii1I11I1II1 % I11i . I1Ii111
@ bottle . route ( '/lisp/show/site' )
@ bottle . route ( '/lisp/show/site/<eid_prefix>' )
def O0o0oo0oOO0oO ( eid_prefix = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 15 - 15: OoO0O00 * II111iiii
if 59 - 59: I1Ii111 + OoO0O00 / OOooOOo
Oo0O0 = "show site"
if 97 - 97: Oo0Ooo * iII111i % ooOoO0o . iII111i - I1Ii111 - OOooOOo
if ( eid_prefix != "" ) :
Oo0O0 = lispconfig . lisp_parse_eid_in_url ( Oo0O0 , eid_prefix )
if 79 - 79: I1IiiI - ooOoO0o
return ( lispconfig . lisp_process_show_command ( Ooo , Oo0O0 ) )
if 37 - 37: IiII . Oo0Ooo * Oo0Ooo * II111iiii * O0
if 83 - 83: IiII / I1Ii111
if 64 - 64: OoO0O00 % IiII . I1Ii111 % OoO0O00 + I11i * IiII
if 83 - 83: o0oOOo0O0Ooo % oO0o + I11i % i11iIiiIii + O0
if 65 - 65: iIii1I11I1II1 % oO0o + O0 / OoooooooOO
if 52 - 52: Ii1I % OOooOOo * I1IiiI % I11i + OOooOOo / iII111i
if 80 - 80: OoooooooOO + IiII
@ bottle . route ( '/lisp/show/itr/dynamic-eid/<eid_prefix>' )
def O00O ( eid_prefix = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 63 - 63: OoooooooOO * OoooooooOO % OoO0O00 + O0 / I1Ii111 + iIii1I11I1II1
if 72 - 72: OoOoOO00 * iIii1I11I1II1 % I11i
Oo0O0 = "show itr-dynamic-eid"
if 20 - 20: II111iiii % iIii1I11I1II1 + oO0o * II111iiii * OoO0O00 % OoO0O00
if ( eid_prefix != "" ) :
Oo0O0 = lispconfig . lisp_parse_eid_in_url ( Oo0O0 , eid_prefix )
if 15 - 15: oO0o / I1Ii111
return ( lispconfig . lisp_process_show_command ( Ooo , Oo0O0 ) )
if 37 - 37: i11iIiiIii + I1IiiI . OOooOOo % I11i % I11i
if 26 - 26: O0
if 34 - 34: ooOoO0o * I1Ii111
if 97 - 97: i11iIiiIii % oO0o / Oo0Ooo / Oo0Ooo
if 97 - 97: II111iiii - I1Ii111 - iIii1I11I1II1 * I1IiiI
if 54 - 54: iIii1I11I1II1
if 5 - 5: IiII
@ bottle . route ( '/lisp/show/etr/dynamic-eid/<eid_prefix>' )
def Oo0O0oo0o00o0 ( eid_prefix = "" ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 66 - 66: iIii1I11I1II1 . i11iIiiIii / I11i / ooOoO0o + I1Ii111
if 5 - 5: OoOoOO00 % iII111i + IiII
Oo0O0 = "show etr-dynamic-eid"
if 13 - 13: IiII
if ( eid_prefix != "" ) :
Oo0O0 = lispconfig . lisp_parse_eid_in_url ( Oo0O0 , eid_prefix )
if 19 - 19: II111iiii - IiII
return ( lispconfig . lisp_process_show_command ( Ooo , Oo0O0 ) )
if 59 - 59: o0oOOo0O0Ooo * OoO0O00 - Ii1I . OOooOOo
if 89 - 89: OOooOOo
if 69 - 69: ooOoO0o - OoooooooOO * O0
if 84 - 84: ooOoO0o + i11iIiiIii - OOooOOo * ooOoO0o
if 33 - 33: ooOoO0o % i1IIi - oO0o . O0 / O0
if 96 - 96: OoooooooOO + IiII * O0
if 86 - 86: Ii1I
@ bottle . post ( '/lisp/show/site/lookup' )
def IiII1i1iI ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 84 - 84: IiII + I1ii11iIi11i + Ii1I + iII111i
if 62 - 62: i11iIiiIii + OoOoOO00 + i1IIi
oo0O0o = bottle . request . forms . get ( "eid" )
if ( lispconfig . lisp_validate_input_address_string ( oo0O0o ) == False ) :
iIiIIIi = "Address '{}' has invalid format" . format ( oo0O0o )
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 69 - 69: OoOoOO00
if 63 - 63: OoO0O00 / OoOoOO00 * iIii1I11I1II1 . I1Ii111
Oo0O0 = "show site" + "%" + oo0O0o + "@lookup"
return ( lispconfig . lisp_process_show_command ( Ooo , Oo0O0 ) )
if 85 - 85: i11iIiiIii / i11iIiiIii . OoO0O00 . O0
if 67 - 67: II111iiii / o0oOOo0O0Ooo . OOooOOo . OoooooooOO
if 19 - 19: IiII . I1ii11iIi11i / OoOoOO00
if 68 - 68: ooOoO0o / OoooooooOO * I11i / oO0o
if 88 - 88: o0oOOo0O0Ooo
if 1 - 1: OoooooooOO
if 48 - 48: ooOoO0o * OoOoOO00 - ooOoO0o - OOooOOo + OOooOOo
@ bottle . post ( '/lisp/lig' )
def iii ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 2 - 2: i1IIi * oO0o - oO0o + OoooooooOO % OoOoOO00 / OoOoOO00
if 3 - 3: OoooooooOO
O0OoO0o = bottle . request . forms . get ( "eid" )
I111IIiIII = bottle . request . forms . get ( "mr" )
OO0OOoo0OOO = bottle . request . forms . get ( "count" )
ooooOoo0OO = "no-info" if bottle . request . forms . get ( "no-nat" ) == "yes" else ""
if 85 - 85: II111iiii . ooOoO0o % OOooOOo % I11i
if 80 - 80: oO0o * I11i / iIii1I11I1II1 % oO0o / iIii1I11I1II1
if 42 - 42: i1IIi / i11iIiiIii . Oo0Ooo * iII111i . i11iIiiIii * O0
if 44 - 44: i1IIi . I1IiiI / i11iIiiIii + IiII
if ( I111IIiIII == "" ) : I111IIiIII = "localhost"
if 27 - 27: OOooOOo
if 52 - 52: I1Ii111 % OoOoOO00 + iIii1I11I1II1 * oO0o . Ii1I
if 95 - 95: iIii1I11I1II1 . IiII - OoooooooOO * OoO0O00 / o0oOOo0O0Ooo
if 74 - 74: oO0o
if ( O0OoO0o == "" ) :
iIiIIIi = "Need to supply EID address"
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( iIiIIIi ) ) )
if 34 - 34: iII111i
if 44 - 44: i1IIi % I1IiiI % o0oOOo0O0Ooo
iIIi1Ii1III = ""
if os . path . exists ( "lisp-lig.pyo" ) : iIIi1Ii1III = "python -O lisp-lig.pyo"
if os . path . exists ( "lisp-lig.pyc" ) : iIIi1Ii1III = "python3.8 -O lisp-lig.pyc"
if os . path . exists ( "lisp-lig.py" ) : iIIi1Ii1III = "python lisp-lig.py"
if 86 - 86: i11iIiiIii + i11iIiiIii . I1Ii111 % I1IiiI . ooOoO0o
if 17 - 17: Ii1I
if 67 - 67: O0 * I11i - o0oOOo0O0Ooo - II111iiii
if 41 - 41: I1IiiI - I1Ii111 % II111iiii . I1Ii111 - I11i
if ( iIIi1Ii1III == "" ) :
iIiIIIi = "Cannot find lisp-lig.py or lisp-lig.pyo"
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( iIiIIIi ) ) )
if 45 - 45: Ii1I - OOooOOo
if 70 - 70: OoO0O00 % I1IiiI / I1IiiI . I11i % ooOoO0o . II111iiii
if ( OO0OOoo0OOO != "" ) : OO0OOoo0OOO = "count {}" . format ( OO0OOoo0OOO )
if 10 - 10: Ii1I - i11iIiiIii . I1ii11iIi11i % i1IIi
Oo0O0 = '{} "{}" to {} {} {}' . format ( iIIi1Ii1III , O0OoO0o , I111IIiIII , OO0OOoo0OOO , ooooOoo0OO )
if 78 - 78: iIii1I11I1II1 * Oo0Ooo . Oo0Ooo - OOooOOo . iIii1I11I1II1
iIiIIIi = getoutput ( Oo0O0 )
iIiIIIi = iIiIIIi . replace ( "\n" , "<br>" )
iIiIIIi = lisp . convert_font ( iIiIIIi )
if 30 - 30: ooOoO0o + ooOoO0o % IiII - o0oOOo0O0Ooo - I1ii11iIi11i
i111IiiI1Ii = lisp . space ( 2 ) + "RLOC:"
iIiIIIi = iIiIIIi . replace ( "RLOC:" , i111IiiI1Ii )
OooOOOOOo = lisp . space ( 2 ) + "Empty,"
iIiIIIi = iIiIIIi . replace ( "Empty," , OooOOOOOo )
o0oooOO00 = lisp . space ( 4 ) + "geo:"
iIiIIIi = iIiIIIi . replace ( "geo:" , o0oooOO00 )
i1I11ii = lisp . space ( 4 ) + "elp:"
iIiIIIi = iIiIIIi . replace ( "elp:" , i1I11ii )
o0ooO00O0O = lisp . space ( 4 ) + "rle:"
iIiIIIi = iIiIIIi . replace ( "rle:" , o0ooO00O0O )
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( iIiIIIi ) ) )
if 41 - 41: I1ii11iIi11i
if 5 - 5: Oo0Ooo
if 100 - 100: Ii1I + iIii1I11I1II1
if 59 - 59: IiII
if 89 - 89: OoOoOO00 % iIii1I11I1II1
if 35 - 35: I1ii11iIi11i + I1Ii111 - OoOoOO00 % oO0o % o0oOOo0O0Ooo % OoOoOO00
if 45 - 45: I1IiiI * OOooOOo % OoO0O00
@ bottle . post ( '/lisp/rig' )
def i111I11I ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 80 - 80: iIii1I11I1II1 - OoooooooOO - I1ii11iIi11i - I1ii11iIi11i . OoooooooOO
if 48 - 48: I1Ii111 . i11iIiiIii / i1IIi % IiII % iII111i + oO0o
O0OoO0o = bottle . request . forms . get ( "eid" )
iiII1iiiiiii = bottle . request . forms . get ( "ddt" )
iiIiii = "follow-all-referrals" if bottle . request . forms . get ( "follow" ) == "yes" else ""
if 39 - 39: I1IiiI + Oo0Ooo
if 83 - 83: i1IIi
if 76 - 76: Ii1I + iIii1I11I1II1 + OoOoOO00 . OoO0O00
if 49 - 49: IiII / ooOoO0o / OOooOOo
if 25 - 25: I1IiiI % O0 + i1IIi - ooOoO0o
if ( iiII1iiiiiii == "" ) : iiII1iiiiiii = "localhost"
if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii
if 94 - 94: iII111i - Oo0Ooo + oO0o
if 59 - 59: I11i . I1IiiI - iIii1I11I1II1 + iIii1I11I1II1
if 56 - 56: oO0o + ooOoO0o
if ( O0OoO0o == "" ) :
iIiIIIi = "Need to supply EID address"
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( iIiIIIi ) ) )
if 32 - 32: II111iiii + OoOoOO00 % ooOoO0o / OoOoOO00 + I1ii11iIi11i
if 2 - 2: i11iIiiIii - I1Ii111 + OoO0O00 % I11i * Ii1I
Ooo000O00 = ""
if os . path . exists ( "lisp-rig.pyo" ) : Ooo000O00 = "python -O lisp-rig.pyo"
if os . path . exists ( "lisp-rig.pyc" ) : Ooo000O00 = "python3.8 -O lisp-rig.pyo"
if os . path . exists ( "lisp-rig.py" ) : Ooo000O00 = "python lisp-rig.py"
if 36 - 36: OOooOOo % i11iIiiIii
if 47 - 47: i1IIi + II111iiii . Oo0Ooo * oO0o . I11i / i1IIi
if 50 - 50: I1Ii111 / i1IIi % OoooooooOO
if 83 - 83: I1ii11iIi11i * I1ii11iIi11i + OOooOOo
if ( Ooo000O00 == "" ) :
iIiIIIi = "Cannot find lisp-rig.py or lisp-rig.pyo"
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( iIiIIIi ) ) )
if 57 - 57: O0 - O0 . I1ii11iIi11i / o0oOOo0O0Ooo / Ii1I
if 20 - 20: OOooOOo * II111iiii - OoOoOO00 - oO0o * I1Ii111
Oo0O0 = '{} "{}" to {} {}' . format ( Ooo000O00 , O0OoO0o , iiII1iiiiiii , iiIiii )
if 6 - 6: ooOoO0o + OOooOOo / Oo0Ooo + IiII % II111iiii / OoO0O00
iIiIIIi = getoutput ( Oo0O0 )
iIiIIIi = iIiIIIi . replace ( "\n" , "<br>" )
iIiIIIi = lisp . convert_font ( iIiIIIi )
if 45 - 45: OoooooooOO
I1oo = lisp . space ( 2 ) + "Referrals:"
iIiIIIi = iIiIIIi . replace ( "Referrals:" , I1oo )
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( iIiIIIi ) ) )
if 17 - 17: O0 - OoOoOO00
if 81 - 81: I1IiiI - iIii1I11I1II1 / I1IiiI / O0
if 34 - 34: Ii1I * Ii1I - I1ii11iIi11i - O0 . i11iIiiIii
if 32 - 32: iIii1I11I1II1 . OoO0O00 * oO0o / OOooOOo . II111iiii - Oo0Ooo
if 10 - 10: I1ii11iIi11i / i11iIiiIii - Ii1I + oO0o * I1IiiI
if 94 - 94: I1IiiI + iIii1I11I1II1 / O0 - OoooooooOO % I1ii11iIi11i
if 64 - 64: I11i + OoO0O00
if 25 - 25: I1IiiI . ooOoO0o + I1IiiI % Ii1I * iIii1I11I1II1
def iiI1iI ( eid1 , eid2 ) :
iIIi1Ii1III = None
if os . path . exists ( "lisp-lig.pyo" ) : iIIi1Ii1III = "python -O lisp-lig.pyo"
if os . path . exists ( "lisp-lig.pyc" ) : iIIi1Ii1III = "python3.8 -O lisp-lig.pyc"
if os . path . exists ( "lisp-lig.py" ) : iIIi1Ii1III = "python lisp-lig.py"
if ( iIIi1Ii1III == None ) : return ( [ None , None ] )
if 84 - 84: OoooooooOO + I1Ii111 / I1IiiI % OOooOOo % I1ii11iIi11i * I1IiiI
if 58 - 58: OoO0O00 - OoOoOO00 . i11iIiiIii % i11iIiiIii / i1IIi / oO0o
if 24 - 24: I1IiiI * i1IIi % ooOoO0o / O0 + i11iIiiIii
if 12 - 12: I1ii11iIi11i / Ii1I
ii11 = getoutput ( "egrep -A 2 'lisp map-resolver {' ./lisp.config" )
I111IIiIII = None
for o0 in [ "address = " , "dns-name = " ] :
I111IIiIII = None
Ii11 = ii11 . find ( o0 )
if ( Ii11 == - 1 ) : continue
I111IIiIII = ii11 [ Ii11 + len ( o0 ) : : ]
Ii11 = I111IIiIII . find ( "\n" )
if ( Ii11 == - 1 ) : continue
I111IIiIII = I111IIiIII [ 0 : Ii11 ]
break
if 3 - 3: Ii1I + I1Ii111 . i1IIi / OOooOOo % I1Ii111
if ( I111IIiIII == None ) : return ( [ None , None ] )
if 98 - 98: IiII * iIii1I11I1II1 . Ii1I * Oo0Ooo / I1ii11iIi11i + ooOoO0o
if 25 - 25: oO0o
if 19 - 19: I1IiiI % Ii1I . IiII * ooOoO0o
if 89 - 89: OoOoOO00 . OOooOOo
IIIIIiI11Ii = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
Iiii1Ii1I = [ ]
for O0OoO0o in [ eid1 , eid2 ] :
if 94 - 94: iIii1I11I1II1 - OoO0O00 . Oo0Ooo
if 59 - 59: OoO0O00 - OoO0O00 + iII111i
if 32 - 32: i1IIi / Oo0Ooo - O0
if 85 - 85: Ii1I - O0 * i11iIiiIii . i1IIi
if 20 - 20: iII111i / OOooOOo
if ( IIIIIiI11Ii . is_geo_string ( O0OoO0o ) ) :
Iiii1Ii1I . append ( O0OoO0o )
continue
if 28 - 28: ooOoO0o * I11i % i11iIiiIii * iII111i / Ii1I
if 41 - 41: OOooOOo - o0oOOo0O0Ooo + Ii1I
Oo0O0 = '{} "{}" to {} count 1' . format ( iIIi1Ii1III , O0OoO0o , I111IIiIII )
for I1i in [ Oo0O0 , Oo0O0 + " no-info" ] :
iIiIIIi = getoutput ( Oo0O0 )
Ii11 = iIiIIIi . find ( "geo: " )
if ( Ii11 == - 1 ) :
if ( I1i != Oo0O0 ) : Iiii1Ii1I . append ( None )
continue
if 15 - 15: I11i / o0oOOo0O0Ooo + Ii1I
iIiIIIi = iIiIIIi [ Ii11 + len ( "geo: " ) : : ]
Ii11 = iIiIIIi . find ( "\n" )
if ( Ii11 == - 1 ) :
if ( I1i != Oo0O0 ) : Iiii1Ii1I . append ( None )
continue
if 76 - 76: Ii1I + OoooooooOO / OOooOOo % OoO0O00 / I1ii11iIi11i
Iiii1Ii1I . append ( iIiIIIi [ 0 : Ii11 ] )
break
if 38 - 38: I1Ii111 . iII111i . I1IiiI * OoO0O00
if 69 - 69: o0oOOo0O0Ooo % i11iIiiIii / Ii1I
return ( Iiii1Ii1I )
if 93 - 93: ooOoO0o
if 34 - 34: oO0o - ooOoO0o * Oo0Ooo / o0oOOo0O0Ooo
if 19 - 19: I1ii11iIi11i
if 46 - 46: iIii1I11I1II1 . i11iIiiIii - OoOoOO00 % O0 / II111iiii * i1IIi
if 66 - 66: O0
if 52 - 52: OoO0O00 * OoooooooOO
if 12 - 12: O0 + IiII * i1IIi . OoO0O00
@ bottle . post ( '/lisp/geo' )
def o0OO0oooo ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 40 - 40: I1Ii111 - OoOoOO00 * I11i - IiII / OoOoOO00
if 71 - 71: oO0o / OoooooooOO % IiII / OoOoOO00 % I1Ii111
O0OoO0o = bottle . request . forms . get ( "geo-point" )
I1i1iI = bottle . request . forms . get ( "geo-prefix" )
iIiIIIi = ""
if 30 - 30: I11i % OoOoOO00 / I1ii11iIi11i * O0 * Ii1I . I1IiiI
if 46 - 46: OoOoOO00 - O0
if 70 - 70: I11i + Oo0Ooo * iIii1I11I1II1 . I1IiiI * I11i
if 49 - 49: o0oOOo0O0Ooo
if 25 - 25: iII111i . OoooooooOO * iIii1I11I1II1 . o0oOOo0O0Ooo / O0 + Ii1I
ooo0o0 = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
O00Oooo00 = lisp . lisp_geo ( "" )
ooO0 = lisp . lisp_geo ( "" )
ii111iiIii , oO0oiIiI = iiI1iI ( O0OoO0o , I1i1iI )
if 46 - 46: iII111i
if 65 - 65: i1IIi . I1ii11iIi11i / ooOoO0o
if 11 - 11: IiII * ooOoO0o / ooOoO0o - OOooOOo
if 68 - 68: I1IiiI % IiII - IiII / I1IiiI + I1ii11iIi11i - Oo0Ooo
if 65 - 65: ooOoO0o - i1IIi
if ( ooo0o0 . is_geo_string ( O0OoO0o ) ) :
if ( O00Oooo00 . parse_geo_string ( O0OoO0o ) == False ) :
iIiIIIi = "Could not parse geo-point format"
if 62 - 62: I11i / oO0o % Oo0Ooo . OoooooooOO / i11iIiiIii / I1Ii111
elif ( ii111iiIii == None ) :
iIiIIIi = "EID {} lookup could not find geo-point" . format (
lisp . bold ( O0OoO0o , True ) )
elif ( O00Oooo00 . parse_geo_string ( ii111iiIii ) == False ) :
iIiIIIi = "Could not parse geo-point format returned from lookup"
if 60 - 60: I1IiiI % oO0o / o0oOOo0O0Ooo % oO0o * i11iIiiIii / iII111i
if 34 - 34: I1Ii111 - OOooOOo
if 25 - 25: oO0o % I1IiiI + i11iIiiIii + O0 * OoooooooOO
if 64 - 64: i1IIi
if 10 - 10: I1Ii111 % O0 / I1IiiI % I11i
if 25 - 25: II111iiii / OoO0O00
if ( iIiIIIi == "" ) :
if ( ooo0o0 . is_geo_string ( I1i1iI ) ) :
if ( ooO0 . parse_geo_string ( I1i1iI ) == False ) :
iIiIIIi = "Could not parse geo-prefix format"
if 64 - 64: O0 % ooOoO0o
elif ( oO0oiIiI == None ) :
iIiIIIi = "EID-prefix {} lookup could not find geo-prefix" . format ( lisp . bold ( I1i1iI , True ) )
if 40 - 40: o0oOOo0O0Ooo + I11i
elif ( ooO0 . parse_geo_string ( oO0oiIiI ) == False ) :
iIiIIIi = "Could not parse geo-prefix format returned from lookup"
if 77 - 77: i11iIiiIii % IiII + I1Ii111 % OoooooooOO - I11i
if 26 - 26: Oo0Ooo + O0 - iIii1I11I1II1
if 47 - 47: OoooooooOO
if 2 - 2: OoOoOO00 % I1Ii111 * Oo0Ooo * OoOoOO00
if 65 - 65: i11iIiiIii + Oo0Ooo * OoooooooOO - OoO0O00
if 26 - 26: o0oOOo0O0Ooo % OOooOOo + OOooOOo % I11i * i11iIiiIii / iII111i
if 64 - 64: oO0o % OoOoOO00 / II111iiii % ooOoO0o - iII111i
if ( iIiIIIi == "" ) :
O0OoO0o = "" if ( O0OoO0o == ii111iiIii ) else ", EID {}" . format ( O0OoO0o )
I1i1iI = "" if ( I1i1iI == oO0oiIiI ) else ", EID-prefix {}" . format ( I1i1iI )
if 2 - 2: I1Ii111 - I1ii11iIi11i + o0oOOo0O0Ooo * OoO0O00 / iII111i
if 26 - 26: OOooOOo * Oo0Ooo
i1iI1Ii11Ii1 = O00Oooo00 . print_geo_url ( )
o0OoO0oo0O0o = ooO0 . print_geo_url ( )
ii1III1iiIi = ooO0 . radius
I1ii1iI = O00Oooo00 . dms_to_decimal ( )
I1ii1iI = ( round ( I1ii1iI [ 0 ] , 6 ) , round ( I1ii1iI [ 1 ] , 6 ) )
ooO000OO = ooO0 . dms_to_decimal ( )
ooO000OO = ( round ( ooO000OO [ 0 ] , 6 ) , round ( ooO000OO [ 1 ] , 6 ) )
i111IIiIiiI1 = round ( ooO0 . get_distance ( O00Oooo00 ) , 2 )
OO0 = "inside" if ooO0 . point_in_circle ( O00Oooo00 ) else "outside"
if 28 - 28: Oo0Ooo % OOooOOo - OoO0O00 + ooOoO0o / ooOoO0o
if 82 - 82: Oo0Ooo
IIIIIi11111iiiII1I = lisp . space ( 2 )
I1I1i = lisp . space ( 1 )
iii1IiI1i = lisp . space ( 3 )
if 93 - 93: i1IIi % OoOoOO00 / iIii1I11I1II1 * o0oOOo0O0Ooo . O0 % OOooOOo
iIiIIIi = ( "Geo-Point:{}{} {}{}<br>Geo-Prefix:{}{} {}, {} " + "kilometer radius{}<br>" ) . format ( IIIIIi11111iiiII1I , i1iI1Ii11Ii1 , I1ii1iI , O0OoO0o ,
# Ii1I * oO0o - I11i + Oo0Ooo % I1ii11iIi11i - IiII
I1I1i , o0OoO0oo0O0o , ooO000OO , ii1III1iiIi , I1i1iI )
iIiIIIi += "Distance:{}{} kilometers, point is {} of circle" . format ( iii1IiI1i ,
i111IIiIiiI1 , lisp . bold ( OO0 , True ) )
if 81 - 81: O0 . O0
return ( lispconfig . lisp_show_wrapper ( lisp . lisp_print_cour ( iIiIIIi ) ) )
if 75 - 75: iIii1I11I1II1 % IiII + I1ii11iIi11i * O0 . iII111i - ooOoO0o
if 32 - 32: Ii1I % oO0o - i1IIi
if 40 - 40: iIii1I11I1II1 + iII111i * OoOoOO00 + oO0o
if 15 - 15: I11i % I1IiiI - iIii1I11I1II1 * ooOoO0o
if 71 - 71: OoOoOO00 % Oo0Ooo % ooOoO0o
if 34 - 34: I11i / I11i % IiII . OoOoOO00 / Oo0Ooo
if 99 - 99: ooOoO0o * I1IiiI - ooOoO0o % Ii1I
if 40 - 40: OOooOOo / IiII / iIii1I11I1II1 + Ii1I
if 59 - 59: I11i * OoooooooOO + OOooOOo . iIii1I11I1II1 / i1IIi
def O0Oo0O00o0oo0OO ( addr_str , port , nonce ) :
if ( addr_str != None ) :
for OooO00 in list ( lisp . lisp_info_sources_by_address . values ( ) ) :
o0O00OoOOo = OooO00 . address . print_address_no_iid ( )
if ( o0O00OoOOo == addr_str and OooO00 . port == port ) :
return ( OooO00 )
if 9 - 9: o0oOOo0O0Ooo % I1ii11iIi11i . I1ii11iIi11i
if 28 - 28: OoooooooOO % oO0o + I1ii11iIi11i + O0 . I1Ii111
return ( None )
if 80 - 80: i11iIiiIii % I1ii11iIi11i
if 54 - 54: o0oOOo0O0Ooo + I11i - iIii1I11I1II1 % ooOoO0o % IiII
if ( nonce != None ) :
if ( nonce not in lisp . lisp_info_sources_by_nonce ) : return ( None )
return ( lisp . lisp_info_sources_by_nonce [ nonce ] )
if 19 - 19: I1ii11iIi11i / iIii1I11I1II1 % i1IIi . OoooooooOO
return ( None )
if 57 - 57: ooOoO0o . Oo0Ooo - OoO0O00 - i11iIiiIii * I1Ii111 / o0oOOo0O0Ooo
if 79 - 79: I1ii11iIi11i + o0oOOo0O0Ooo % Oo0Ooo * o0oOOo0O0Ooo
if 21 - 21: iII111i
if 24 - 24: iII111i / ooOoO0o
if 61 - 61: iIii1I11I1II1 + oO0o
if 8 - 8: I1Ii111 + OoO0O00
if 9 - 9: OOooOOo + o0oOOo0O0Ooo
if 8 - 8: OOooOOo * Oo0Ooo / iII111i - OoO0O00 - OoooooooOO
def oOiIi ( lisp_sockets , info_source , packet ) :
if 65 - 65: II111iiii + i1IIi * i11iIiiIii
if 38 - 38: iIii1I11I1II1 + OoooooooOO * I1IiiI % OoOoOO00 % I11i - IiII
if 56 - 56: OoooooooOO * Oo0Ooo * I11i + ooOoO0o
if 54 - 54: OoOoOO00 * i11iIiiIii . OoooooooOO - iIii1I11I1II1
I1OO0o = lisp . lisp_ecm ( 0 )
packet = I1OO0o . decode ( packet )
if ( packet == None ) :
lisp . lprint ( "Could not decode ECM packet" )
return ( True )
if 99 - 99: II111iiii - I1Ii111 + iII111i * IiII / I1Ii111
if 41 - 41: O0 . I11i
oO0ooOOO = lisp . lisp_control_header ( )
if ( oO0ooOOO . decode ( packet ) == None ) :
lisp . lprint ( "Could not decode control header" )
return ( True )
if 95 - 95: O0
if ( oO0ooOOO . type != lisp . LISP_MAP_REQUEST ) :
lisp . lprint ( "Received ECM without Map-Request inside" )
return ( True )
if 75 - 75: IiII + OoO0O00 * I11i - OoOoOO00
if 52 - 52: OOooOOo * oO0o + I11i * I11i % i1IIi % I11i
if 96 - 96: o0oOOo0O0Ooo * oO0o - OOooOOo * o0oOOo0O0Ooo * i1IIi
if 8 - 8: ooOoO0o - Oo0Ooo + iIii1I11I1II1 + i1IIi * Ii1I - iIii1I11I1II1
if 30 - 30: I11i / I1ii11iIi11i
iI1iIIIIIiIi1 = lisp . lisp_map_request ( )
packet = iI1iIIIIIiIi1 . decode ( packet , None , 0 )
iIi = iI1iIIIIIiIi1 . nonce
oOoooOo0o = info_source . address . print_address_no_iid ( )
if 44 - 44: Oo0Ooo . Oo0Ooo + OoooooooOO * i11iIiiIii / I11i + I1Ii111
if 17 - 17: OOooOOo + II111iiii
if 43 - 43: I11i % Ii1I / o0oOOo0O0Ooo * I1Ii111
if 85 - 85: iIii1I11I1II1 . OoooooooOO . o0oOOo0O0Ooo
iI1iIIIIIiIi1 . print_map_request ( )
if 77 - 77: I1IiiI % ooOoO0o
lisp . lprint ( "Process {} from info-source {}, port {}, nonce 0x{}" . format ( lisp . bold ( "nat-proxy Map-Request" , False ) ,
# II111iiii + Ii1I + OoooooooOO / i1IIi - Ii1I
lisp . red ( oOoooOo0o , False ) , info_source . port ,
lisp . lisp_hex_string ( iIi ) ) )
if 87 - 87: iII111i / I11i / I11i % OoooooooOO - I1ii11iIi11i * oO0o
if 23 - 23: i11iIiiIii
if 100 - 100: oO0o + O0 . I1IiiI + i1IIi - OoOoOO00 + o0oOOo0O0Ooo
if 65 - 65: II111iiii / Oo0Ooo
if 42 - 42: i11iIiiIii . O0
info_source . cache_nonce_for_info_source ( iIi )
if 75 - 75: I1Ii111 + iIii1I11I1II1
if 19 - 19: I1IiiI + i11iIiiIii . IiII - I11i / Ii1I + o0oOOo0O0Ooo
if 38 - 38: Oo0Ooo / iIii1I11I1II1 * iIii1I11I1II1 % I1ii11iIi11i
if 92 - 92: I11i / O0 * I1IiiI - I11i
if 99 - 99: i11iIiiIii % OoooooooOO
info_source . no_timeout = iI1iIIIIIiIi1 . subscribe_bit
if 56 - 56: IiII * I1Ii111
if 98 - 98: I11i + O0 * I1Ii111 + i11iIiiIii - OOooOOo - iIii1I11I1II1
if 5 - 5: OOooOOo % Oo0Ooo % IiII % ooOoO0o
if 17 - 17: Ii1I + II111iiii + OoooooooOO / OOooOOo / IiII
if 80 - 80: o0oOOo0O0Ooo % i1IIi / I11i
if 56 - 56: i1IIi . i11iIiiIii
for Ii1Ii1IiIIIi1 in iI1iIIIIIiIi1 . itr_rlocs :
if ( Ii1Ii1IiIIIi1 . is_local ( ) ) : return ( False )
if 55 - 55: oO0o + O0 / iII111i % ooOoO0o / OoooooooOO
if 98 - 98: Ii1I * iIii1I11I1II1 % Oo0Ooo % OOooOOo
if 88 - 88: iII111i - II111iiii / iII111i - Ii1I
if 16 - 16: Oo0Ooo % I1Ii111
if 10 - 10: IiII / OoooooooOO
IiiiIIiii = lisp . lisp_myrlocs [ 0 ]
iI1iIIIIIiIi1 . itr_rloc_count = 0
iI1iIIIIIiIi1 . itr_rlocs = [ ]
iI1iIIIIIiIi1 . itr_rlocs . append ( IiiiIIiii )
if 91 - 91: o0oOOo0O0Ooo . iII111i % Oo0Ooo - iII111i . oO0o % i11iIiiIii
packet = iI1iIIIIIiIi1 . encode ( None , 0 )
iI1iIIIIIiIi1 . print_map_request ( )
if 25 - 25: iIii1I11I1II1
o0o0O0oOOOooo = iI1iIIIIIiIi1 . target_eid
if ( o0o0O0oOOOooo . is_ipv6 ( ) ) :
Ii1iiI1i1 = lisp . lisp_myrlocs [ 1 ]
if ( Ii1iiI1i1 != None ) : IiiiIIiii = Ii1iiI1i1
if 3 - 3: OOooOOo . IiII / Oo0Ooo
if 89 - 89: OoooooooOO . iIii1I11I1II1 . Oo0Ooo * iIii1I11I1II1 - I1Ii111
if 92 - 92: OoooooooOO - I1ii11iIi11i - OoooooooOO % I1IiiI % I1IiiI % iIii1I11I1II1
if 92 - 92: iII111i * O0 % I1Ii111 . iIii1I11I1II1
if 66 - 66: I11i + Ii1I
i1ii1iIi = lisp . lisp_is_running ( "lisp-ms" )
lisp . lisp_send_ecm ( lisp_sockets , packet , o0o0O0oOOOooo , lisp . LISP_CTRL_PORT ,
o0o0O0oOOOooo , IiiiIIiii , to_ms = i1ii1iIi , ddt = False )
return ( True )
if 43 - 43: Ii1I + iII111i + i1IIi - OoOoOO00 + o0oOOo0O0Ooo
if 54 - 54: I1ii11iIi11i + I1ii11iIi11i + I11i % i1IIi % i11iIiiIii
if 100 - 100: I1ii11iIi11i
if 96 - 96: I1IiiI . IiII * II111iiii % IiII . I1Ii111 * i1IIi
if 83 - 83: iIii1I11I1II1
if 97 - 97: i11iIiiIii + Oo0Ooo * OOooOOo % iII111i . IiII
if 4 - 4: O0 . iII111i - iIii1I11I1II1
if 19 - 19: OOooOOo % OoO0O00 / Ii1I + II111iiii % OoooooooOO
if 89 - 89: Ii1I
def o00O00O0Oo0 ( lisp_sockets , info_source , packet , mr_or_mn ) :
oOoooOo0o = info_source . address . print_address_no_iid ( )
oo00O00oO = info_source . port
iIi = info_source . nonce
if 53 - 53: i1IIi . i1IIi - I11i / iII111i - OoOoOO00 % I1IiiI
mr_or_mn = "Reply" if mr_or_mn else "Notify"
mr_or_mn = lisp . bold ( "nat-proxy Map-{}" . format ( mr_or_mn ) , False )
if 65 - 65: iII111i . OoooooooOO - O0 . iII111i - i11iIiiIii
lisp . lprint ( "Forward {} to info-source {}, port {}, nonce 0x{}" . format ( mr_or_mn , lisp . red ( oOoooOo0o , False ) , oo00O00oO ,
# O0 - I1ii11iIi11i
lisp . lisp_hex_string ( iIi ) ) )
if 76 - 76: oO0o - i11iIiiIii
if 27 - 27: I1ii11iIi11i - i11iIiiIii % I1Ii111 / Oo0Ooo . Oo0Ooo / OoooooooOO
if 76 - 76: I11i * OoO0O00 . iIii1I11I1II1 % OoooooooOO % I1ii11iIi11i
if 39 - 39: II111iiii * OoOoOO00 . O0 * I11i
O0o0O0O0O = lisp . lisp_convert_4to6 ( oOoooOo0o )
lisp . lisp_send ( lisp_sockets , O0o0O0O0O , oo00O00oO , packet )
if 79 - 79: IiII + IiII + Ii1I
if 39 - 39: O0 - OoooooooOO
if 63 - 63: iIii1I11I1II1 % o0oOOo0O0Ooo * ooOoO0o
if 79 - 79: O0
if 32 - 32: II111iiii . O0 + Ii1I / OoOoOO00 / IiII / OOooOOo
if 15 - 15: I1ii11iIi11i
if 4 - 4: IiII + iIii1I11I1II1 * iII111i + Oo0Ooo * o0oOOo0O0Ooo % II111iiii
def OO0o0o0oo ( lisp_sockets , source , sport , packet ) :
global Ooo
if 40 - 40: Oo0Ooo
oO0ooOOO = lisp . lisp_control_header ( )
if ( oO0ooOOO . decode ( packet ) == None ) :
lisp . lprint ( "Could not decode control header" )
return
if 47 - 47: OoOoOO00
if 65 - 65: O0 + I1Ii111 % Ii1I * I1IiiI / ooOoO0o / OoOoOO00
if 71 - 71: i11iIiiIii / OoOoOO00 . oO0o
if 33 - 33: oO0o
if 39 - 39: OoO0O00 + O0 + ooOoO0o * II111iiii % O0 - O0
if 41 - 41: IiII % o0oOOo0O0Ooo
if 67 - 67: O0 % I1Ii111
if 35 - 35: I1IiiI . OoOoOO00 + OoooooooOO % Oo0Ooo % OOooOOo
if 39 - 39: Ii1I
if 60 - 60: OOooOOo
if ( oO0ooOOO . type == lisp . LISP_NAT_INFO ) :
if ( oO0ooOOO . info_reply == False ) :
lisp . lisp_process_info_request ( lisp_sockets , packet , source , sport ,
lisp . lisp_ms_rtr_list )
if 62 - 62: I1Ii111 * I11i
return
if 74 - 74: OoOoOO00 . iIii1I11I1II1
if 87 - 87: ooOoO0o
IIo0oo0OO = packet
packet = lisp . lisp_packet_ipc ( packet , source , sport )
if 17 - 17: ooOoO0o + I1ii11iIi11i * i11iIiiIii
if 82 - 82: IiII
if 51 - 51: oO0o % OoO0O00 + o0oOOo0O0Ooo + Ii1I - OoooooooOO . OoO0O00
if 18 - 18: Oo0Ooo - OOooOOo * II111iiii + oO0o
if ( oO0ooOOO . type in ( lisp . LISP_MAP_REGISTER , lisp . LISP_MAP_NOTIFY_ACK ) ) :
lisp . lisp_ipc ( packet , Ooo , "lisp-ms" )
return
if 93 - 93: iII111i * oO0o . OoO0O00 - Ii1I + O0 * OoO0O00
if 59 - 59: II111iiii
if 43 - 43: Oo0Ooo + OoooooooOO
if 47 - 47: ooOoO0o
if 92 - 92: I11i % i11iIiiIii % Oo0Ooo
if ( oO0ooOOO . type == lisp . LISP_MAP_REPLY ) :
ii11Ii1IiiI1 = lisp . lisp_map_reply ( )
ii11Ii1IiiI1 . decode ( IIo0oo0OO )
if 83 - 83: ooOoO0o + i1IIi * OoooooooOO * oO0o
OooO00 = O0Oo0O00o0oo0OO ( None , 0 , ii11Ii1IiiI1 . nonce )
if ( OooO00 ) :
o00O00O0Oo0 ( lisp_sockets , OooO00 , IIo0oo0OO , True )
else :
iIIi1Ii1III = "/tmp/lisp-lig"
if ( os . path . exists ( iIIi1Ii1III ) ) :
lisp . lisp_ipc ( packet , Ooo , iIIi1Ii1III )
else :
lisp . lisp_ipc ( packet , Ooo , "lisp-itr" )
if 83 - 83: iIii1I11I1II1 - ooOoO0o - I1Ii111 / OoO0O00 - O0
if 81 - 81: Ii1I - oO0o * I1ii11iIi11i / I1Ii111
return
if 21 - 21: OoO0O00
if 63 - 63: I11i . O0 * I11i + iIii1I11I1II1
if 46 - 46: i1IIi + II111iiii * i1IIi - Ii1I
if 79 - 79: II111iiii - oO0o * I1ii11iIi11i - OoOoOO00 . I1ii11iIi11i
if 11 - 11: O0 * OoOoOO00
if ( oO0ooOOO . type == lisp . LISP_MAP_NOTIFY ) :
IIii1i = lisp . lisp_map_notify ( lisp_sockets )
IIii1i . decode ( IIo0oo0OO )
if 69 - 69: I1Ii111 / OoooooooOO % i11iIiiIii
OooO00 = O0Oo0O00o0oo0OO ( None , 0 , IIii1i . nonce )
if ( OooO00 ) :
o00O00O0Oo0 ( lisp_sockets , OooO00 , IIo0oo0OO ,
False )
else :
iIIi1Ii1III = "/tmp/lisp-lig"
if ( os . path . exists ( iIIi1Ii1III ) ) :
lisp . lisp_ipc ( packet , Ooo , iIIi1Ii1III )
else :
iIii11iI1II = "lisp-rtr" if lisp . lisp_is_running ( "lisp-rtr" ) else "lisp-etr"
if 18 - 18: i11iIiiIii - ooOoO0o * oO0o + o0oOOo0O0Ooo
lisp . lisp_ipc ( packet , Ooo , iIii11iI1II )
if 16 - 16: OoooooooOO * i11iIiiIii . OoooooooOO - iIii1I11I1II1 * i1IIi
if 33 - 33: I1Ii111 % II111iiii
return
if 49 - 49: I1ii11iIi11i + I11i / o0oOOo0O0Ooo + OoooooooOO + OOooOOo / IiII
if 29 - 29: Ii1I - Ii1I / ooOoO0o
if 49 - 49: I11i + oO0o % OoO0O00 - Oo0Ooo - O0 - OoooooooOO
if 4 - 4: II111iiii - oO0o % Oo0Ooo * i11iIiiIii
if 18 - 18: Oo0Ooo % O0
if 66 - 66: iIii1I11I1II1 % i11iIiiIii / I1IiiI
if ( oO0ooOOO . type == lisp . LISP_MAP_REFERRAL ) :
Ooo000O00 = "/tmp/lisp-rig"
if ( os . path . exists ( Ooo000O00 ) ) :
lisp . lisp_ipc ( packet , Ooo , Ooo000O00 )
else :
lisp . lisp_ipc ( packet , Ooo , "lisp-mr" )
if 47 - 47: I1ii11iIi11i * oO0o + iIii1I11I1II1 - oO0o / IiII
return
if 86 - 86: IiII
if 43 - 43: I1IiiI / iII111i / ooOoO0o + iIii1I11I1II1 + OoooooooOO
if 33 - 33: II111iiii - IiII - ooOoO0o
if 92 - 92: OoO0O00 * IiII
if 92 - 92: oO0o
if 7 - 7: iII111i
if ( oO0ooOOO . type == lisp . LISP_MAP_REQUEST ) :
iIii11iI1II = "lisp-itr" if ( oO0ooOOO . is_smr ( ) ) else "lisp-etr"
if 73 - 73: OoO0O00 % I1ii11iIi11i
if 32 - 32: OOooOOo + iII111i + iIii1I11I1II1 * Oo0Ooo
if 62 - 62: i11iIiiIii
if 2 - 2: I1IiiI
if 69 - 69: OoooooooOO / Oo0Ooo * I1Ii111
if ( oO0ooOOO . rloc_probe ) : return
if 99 - 99: II111iiii * iIii1I11I1II1 % O0 * oO0o / II111iiii % OoooooooOO
lisp . lisp_ipc ( packet , Ooo , iIii11iI1II )
return
if 14 - 14: IiII . IiII % ooOoO0o
if 42 - 42: o0oOOo0O0Ooo . OOooOOo - ooOoO0o
if 33 - 33: II111iiii / O0 / IiII - I11i - i1IIi
if 8 - 8: i11iIiiIii . iII111i / iIii1I11I1II1 / I1ii11iIi11i / IiII - Ii1I
if 32 - 32: o0oOOo0O0Ooo . i1IIi * Oo0Ooo
if 98 - 98: Ii1I - II111iiii / I1IiiI . oO0o * IiII . I11i
if 25 - 25: i11iIiiIii / OoOoOO00 - I1Ii111 / OoO0O00 . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 6 - 6: oO0o . I11i
if ( oO0ooOOO . type == lisp . LISP_ECM ) :
OooO00 = O0Oo0O00o0oo0OO ( source , sport , None )
if ( OooO00 ) :
if ( oOiIi ( lisp_sockets , OooO00 ,
IIo0oo0OO ) ) : return
if 43 - 43: I1ii11iIi11i + o0oOOo0O0Ooo
if 50 - 50: oO0o % i1IIi * O0
iIii11iI1II = "lisp-mr"
if ( oO0ooOOO . is_to_etr ( ) ) :
iIii11iI1II = "lisp-etr"
elif ( oO0ooOOO . is_to_ms ( ) ) :
iIii11iI1II = "lisp-ms"
elif ( oO0ooOOO . is_ddt ( ) ) :
if ( lisp . lisp_is_running ( "lisp-ddt" ) ) :
iIii11iI1II = "lisp-ddt"
elif ( lisp . lisp_is_running ( "lisp-ms" ) ) :
iIii11iI1II = "lisp-ms"
if 4 - 4: iIii1I11I1II1 . i1IIi
elif ( lisp . lisp_is_running ( "lisp-mr" ) == False ) :
iIii11iI1II = "lisp-etr"
if 63 - 63: iIii1I11I1II1 + IiII % i1IIi / I1IiiI % II111iiii
lisp . lisp_ipc ( packet , Ooo , iIii11iI1II )
if 60 - 60: o0oOOo0O0Ooo . OoOoOO00 % I1Ii111 / I1IiiI / O0
return
if 19 - 19: i11iIiiIii . I1IiiI + II111iiii / OOooOOo . I1ii11iIi11i * ooOoO0o
if 59 - 59: iIii1I11I1II1 / I1ii11iIi11i % ooOoO0o
if 84 - 84: iIii1I11I1II1 / I1IiiI . OoOoOO00 % I11i
if 99 - 99: Oo0Ooo + i11iIiiIii
if 36 - 36: Ii1I * I1Ii111 * iIii1I11I1II1 - I11i % i11iIiiIii
if 98 - 98: iIii1I11I1II1 - i1IIi + ooOoO0o % I11i + ooOoO0o / oO0o
if 97 - 97: IiII % ooOoO0o + II111iiii - IiII % OoO0O00 + ooOoO0o
if 31 - 31: o0oOOo0O0Ooo
if 35 - 35: OoOoOO00 + Ii1I * ooOoO0o / OoOoOO00
if 69 - 69: ooOoO0o . OOooOOo - I1IiiI
if 29 - 29: i11iIiiIii . I1ii11iIi11i / I1IiiI . OOooOOo + i11iIiiIii
if 26 - 26: IiII / Ii1I - OoooooooOO
class iiIiiII1II1ii ( bottle . ServerAdapter ) :
def run ( self , hand ) :
i1iI1iiI = "./lisp-cert.pem"
if 31 - 31: I1ii11iIi11i
if 63 - 63: IiII + iIii1I11I1II1 + I1IiiI + I1Ii111
if 72 - 72: OoO0O00 + i11iIiiIii + I1ii11iIi11i
if 96 - 96: oO0o % i1IIi / o0oOOo0O0Ooo
if 13 - 13: II111iiii - Oo0Ooo % i11iIiiIii + iII111i
if ( os . path . exists ( i1iI1iiI ) == False ) :
os . system ( "cp ./lisp-cert.pem.default {}" . format ( i1iI1iiI ) )
lisp . lprint ( ( "{} does not exist, creating a copy from lisp-" + "cert.pem.default" ) . format ( i1iI1iiI ) )
if 88 - 88: O0 . oO0o % I1IiiI
if 10 - 10: I1IiiI + O0
if 75 - 75: O0 % iIii1I11I1II1 / OoOoOO00 % OOooOOo / IiII
iiI1iiIiiiI1I = wsgi_server ( ( self . host , self . port ) , hand )
iiI1iiIiiiI1I . ssl_adapter = ssl_adaptor ( i1iI1iiI , i1iI1iiI , None )
try :
iiI1iiIiiiI1I . start ( )
finally :
iiI1iiIiiiI1I . stop ( )
if 6 - 6: OoO0O00
if 99 - 99: o0oOOo0O0Ooo * OOooOOo % oO0o * oO0o + OoooooooOO
if 82 - 82: I11i / OoOoOO00 - OOooOOo / ooOoO0o
if 50 - 50: OOooOOo + OoO0O00 . i11iIiiIii + I1ii11iIi11i + i11iIiiIii
if 31 - 31: oO0o * I1Ii111 . OoOoOO00 * I11i
if 28 - 28: IiII + I1IiiI - Oo0Ooo % OOooOOo . I11i + I1IiiI
if 72 - 72: Ii1I / Oo0Ooo / oO0o * OoOoOO00 + OOooOOo
if 58 - 58: o0oOOo0O0Ooo % I1IiiI . I1IiiI * OoO0O00 - IiII . OoooooooOO
if 10 - 10: I1Ii111
if 48 - 48: iII111i * i1IIi % OoooooooOO * Ii1I * OoO0O00
if 7 - 7: iII111i . Ii1I . iII111i - I1Ii111
if 33 - 33: ooOoO0o + OoooooooOO - OoO0O00 / i1IIi / OoooooooOO
if 82 - 82: I1ii11iIi11i / OOooOOo - iII111i / Oo0Ooo * OoO0O00
if 55 - 55: OoooooooOO
if 73 - 73: OoOoOO00 - I1ii11iIi11i % Oo0Ooo + I1ii11iIi11i - O0 . OoO0O00
if 38 - 38: O0
def ooO ( bottle_port ) :
lisp . lisp_set_exception ( )
if 34 - 34: I1Ii111 * II111iiii
if 71 - 71: IiII
if 97 - 97: I1ii11iIi11i
if 86 - 86: Oo0Ooo - OOooOOo . OoOoOO00 . II111iiii * I1IiiI . II111iiii
if 34 - 34: o0oOOo0O0Ooo . I1Ii111 % IiII - O0 / I1Ii111
if ( bottle_port < 0 ) :
bottle . run ( host = "0.0.0.0" , port = - bottle_port )
return
if 91 - 91: i11iIiiIii % I1Ii111 * oO0o - I1ii11iIi11i . I1Ii111
if 28 - 28: i11iIiiIii
bottle . server_names [ "lisp-ssl-server" ] = iiIiiII1II1ii
if 51 - 51: I1IiiI + ooOoO0o * O0 . Ii1I
if 82 - 82: OOooOOo * I1ii11iIi11i % Ii1I . OOooOOo
if 43 - 43: OoO0O00 . ooOoO0o * Oo0Ooo
if 20 - 20: i1IIi . i1IIi - I11i
try :
bottle . run ( host = "0.0.0.0" , port = bottle_port , server = "lisp-ssl-server" ,
fast = True )
except :
lisp . lprint ( "Could not startup lisp-ssl-server, running insecurely" )
bottle . run ( host = "0.0.0.0" , port = bottle_port )
if 89 - 89: ooOoO0o - I11i . O0 % OoooooooOO . i11iIiiIii
return
if 35 - 35: II111iiii / OoOoOO00 - O0 . II111iiii
if 55 - 55: Oo0Ooo % i1IIi * I11i
if 95 - 95: OOooOOo / II111iiii - o0oOOo0O0Ooo % I1Ii111 . I11i
if 63 - 63: iIii1I11I1II1 / ooOoO0o
if 24 - 24: Oo0Ooo / iIii1I11I1II1 % OOooOOo * OoOoOO00 - iIii1I11I1II1
if 50 - 50: II111iiii
if 39 - 39: II111iiii . OoOoOO00 - Oo0Ooo * i1IIi . OoooooooOO
if 44 - 44: I1IiiI
def oOO0O0O0OO00oo ( ) :
lisp . lisp_set_exception ( )
if 39 - 39: IiII % OoOoOO00 * I1ii11iIi11i - OoooooooOO - Oo0Ooo
return
if 75 - 75: i11iIiiIii . ooOoO0o % i1IIi . I1IiiI - oO0o + Oo0Ooo
if 66 - 66: oO0o % I1ii11iIi11i . II111iiii / OoOoOO00 / OoO0O00
if 47 - 47: iII111i + O0 / II111iiii * I1IiiI - OoooooooOO . Ii1I
if 28 - 28: oO0o . oO0o . iIii1I11I1II1 . OOooOOo . I1ii11iIi11i * i11iIiiIii
if 72 - 72: I11i
if 26 - 26: IiII % Oo0Ooo
if 72 - 72: O0 + o0oOOo0O0Ooo + I1IiiI / Oo0Ooo
if 83 - 83: IiII - I1IiiI . Ii1I
if 34 - 34: OoOoOO00 - oO0o * OoooooooOO
def IiI1I1IIIi1i ( lisp_socket ) :
lisp . lisp_set_exception ( )
i11i1iiiII = { "lisp-itr" : False , "lisp-etr" : False , "lisp-rtr" : False ,
"lisp-mr" : False , "lisp-ms" : False , "lisp-ddt" : False }
if 73 - 73: O0 * I1Ii111 . i1IIi
while ( True ) :
time . sleep ( 1 )
OO00OoOO = i11i1iiiII
i11i1iiiII = { }
if 45 - 45: II111iiii * i1IIi
for iIii11iI1II in OO00OoOO :
i11i1iiiII [ iIii11iI1II ] = lisp . lisp_is_running ( iIii11iI1II )
if ( OO00OoOO [ iIii11iI1II ] == i11i1iiiII [ iIii11iI1II ] ) : continue
if 25 - 25: OoOoOO00 + iIii1I11I1II1 % I11i / Oo0Ooo * Oo0Ooo
lisp . lprint ( "*** Process '{}' has {} ***" . format ( iIii11iI1II ,
"come up" if i11i1iiiII [ iIii11iI1II ] else "gone down" ) )
if 51 - 51: oO0o - OoO0O00 + iII111i - o0oOOo0O0Ooo . OoO0O00 % I1ii11iIi11i
if 14 - 14: I1IiiI / O0
if 43 - 43: oO0o - IiII % i11iIiiIii * II111iiii . I1Ii111 - I11i
if 13 - 13: OoO0O00
if ( i11i1iiiII [ iIii11iI1II ] == True ) :
lisp . lisp_ipc_lock . acquire ( )
lispconfig . lisp_send_commands ( lisp_socket , iIii11iI1II )
lisp . lisp_ipc_lock . release ( )
if 70 - 70: IiII . I1Ii111 * OoO0O00 + I11i - IiII . IiII
if 60 - 60: i11iIiiIii * Oo0Ooo % OoO0O00 + OoO0O00
if 84 - 84: iIii1I11I1II1 + OoooooooOO
return
if 77 - 77: O0 * I1ii11iIi11i * oO0o + OoO0O00 + I1ii11iIi11i - I1Ii111
if 10 - 10: I1ii11iIi11i + IiII
if 58 - 58: I1IiiI + OoooooooOO / iII111i . ooOoO0o % o0oOOo0O0Ooo / I1ii11iIi11i
if 62 - 62: II111iiii
if 12 - 12: IiII + II111iiii
if 92 - 92: I1Ii111 % iIii1I11I1II1 - iII111i / i11iIiiIii % ooOoO0o * o0oOOo0O0Ooo
if 80 - 80: iII111i
def iI1I1ii11IIi1 ( ) :
lisp . lisp_set_exception ( )
OOo = 60
if 80 - 80: o0oOOo0O0Ooo / oO0o / Ii1I - I1IiiI % I1Ii111
while ( True ) :
time . sleep ( OOo )
if 44 - 44: I1IiiI % OOooOOo * i11iIiiIii * i11iIiiIii - Oo0Ooo . I1Ii111
o00 = [ ]
i111iiIiiIiI = lisp . lisp_get_timestamp ( )
if 59 - 59: OOooOOo + I1IiiI / II111iiii / OoOoOO00
if 80 - 80: OoOoOO00 + iIii1I11I1II1 . IiII
if 76 - 76: I1IiiI * OOooOOo
if 12 - 12: iIii1I11I1II1 / I11i % Ii1I
for iI1i in lisp . lisp_info_sources_by_address :
OooO00 = lisp . lisp_info_sources_by_address [ iI1i ]
if ( OooO00 . no_timeout ) : continue
if ( OooO00 . uptime + OOo < i111iiIiiIiI ) : continue
if 49 - 49: OoO0O00 + II111iiii / IiII - O0 % Ii1I
o00 . append ( iI1i )
if 27 - 27: OoO0O00 + Oo0Ooo
iIi = OooO00 . nonce
if ( iIi == None ) : continue
if ( iIi in lisp . lisp_info_sources_by_nonce ) :
lisp . lisp_info_sources_by_nonce . pop ( iIi )
if 92 - 92: I1IiiI % iII111i
if 31 - 31: OoooooooOO - oO0o / I1Ii111
if 62 - 62: i11iIiiIii - I11i
if 81 - 81: I11i
if 92 - 92: OOooOOo - Oo0Ooo - OoooooooOO / IiII - i1IIi
if 81 - 81: i1IIi / I1Ii111 % i11iIiiIii . iIii1I11I1II1 * OoOoOO00 + OoooooooOO
for iI1i in o00 :
lisp . lisp_info_sources_by_address . pop ( iI1i )
if 31 - 31: i1IIi % II111iiii
if 13 - 13: iIii1I11I1II1 - II111iiii % O0 . Ii1I % OoO0O00
return
if 2 - 2: OoooooooOO - Ii1I % oO0o / I1IiiI / o0oOOo0O0Ooo
if 3 - 3: II111iiii / OOooOOo
if 48 - 48: ooOoO0o . I1ii11iIi11i
if 49 - 49: i1IIi - OoOoOO00 . Oo0Ooo + iIii1I11I1II1 - ooOoO0o / Oo0Ooo
if 24 - 24: oO0o - iII111i / ooOoO0o
if 10 - 10: OoOoOO00 * i1IIi
if 15 - 15: I11i + i1IIi - II111iiii % I1IiiI
if 34 - 34: I1IiiI
def o0OoOo0O00 ( lisp_ipc_control_socket , lisp_sockets ) :
lisp . lisp_set_exception ( )
while ( True ) :
try : iI1i1iI1iI = lisp_ipc_control_socket . recvfrom ( 9000 )
except : return ( [ "" , "" , "" , "" ] )
iiiI11 = iI1i1iI1iI [ 0 ] . split ( b"@" )
oOOo0 = iI1i1iI1iI [ 1 ]
if 18 - 18: I11i + Oo0Ooo - OoO0O00 / I1Ii111 / OOooOOo
III1iII1I1ii = iiiI11 [ 0 ] . decode ( )
O0o0O0O0O = iiiI11 [ 1 ] . decode ( )
oo00O00oO = int ( iiiI11 [ 2 ] )
OOoOoO = iiiI11 [ 3 : : ]
if 72 - 72: OoOoOO00 / I1Ii111 * IiII % iIii1I11I1II1
if 53 - 53: OoO0O00 . O0 . I1IiiI * OOooOOo / o0oOOo0O0Ooo
if 34 - 34: OoOoOO00
if 16 - 16: i1IIi - I1Ii111 - II111iiii
if ( len ( OOoOoO ) > 1 ) :
OOoOoO = lisp . lisp_bit_stuff ( OOoOoO )
else :
OOoOoO = OOoOoO [ 0 ]
if 83 - 83: I1IiiI - OoO0O00 - o0oOOo0O0Ooo / O0 - I11i . II111iiii
if 27 - 27: Ii1I
if ( III1iII1I1ii != "control-packet" ) :
lisp . lprint ( ( "lisp_core_control_packet_process() received " + "unexpected control-packet, message ignored" ) )
if 59 - 59: Ii1I / II111iiii - IiII % OoOoOO00 % OoooooooOO
continue
if 79 - 79: iII111i . OoooooooOO . I1IiiI * O0 * OoO0O00 - OOooOOo
if 33 - 33: I1ii11iIi11i . Oo0Ooo + I1IiiI + o0oOOo0O0Ooo
lisp . lprint ( ( "{} {} bytes from {}, dest/port: {}/{}, control-" + "packet: {}" ) . format ( lisp . bold ( "Receive" , False ) , len ( OOoOoO ) ,
# iII111i * I1Ii111 * I11i * iII111i
oOOo0 , O0o0O0O0O , oo00O00oO , lisp . lisp_format_packet ( OOoOoO ) ) )
if 57 - 57: OOooOOo % OoO0O00 - I1IiiI
if 3 - 3: OOooOOo + i1IIi % I1ii11iIi11i
if 100 - 100: OoooooooOO + i11iIiiIii % o0oOOo0O0Ooo + I1IiiI . Oo0Ooo . II111iiii
if 93 - 93: II111iiii . i11iIiiIii + II111iiii % oO0o
if 98 - 98: I1Ii111 * oO0o * OoOoOO00 + Ii1I * iII111i
if 4 - 4: IiII
oO0ooOOO = lisp . lisp_control_header ( )
oO0ooOOO . decode ( OOoOoO )
if ( oO0ooOOO . type == lisp . LISP_MAP_REPLY ) :
ii11Ii1IiiI1 = lisp . lisp_map_reply ( )
ii11Ii1IiiI1 . decode ( OOoOoO )
if ( O0Oo0O00o0oo0OO ( None , 0 , ii11Ii1IiiI1 . nonce ) ) :
OO0o0o0oo ( lisp_sockets , oOOo0 , oo00O00oO , OOoOoO )
continue
if 16 - 16: iIii1I11I1II1 * iII111i + oO0o . O0 . o0oOOo0O0Ooo
if 99 - 99: i11iIiiIii - iII111i
if 85 - 85: I1Ii111 % I1ii11iIi11i
if 95 - 95: OoO0O00 * OOooOOo * iII111i . o0oOOo0O0Ooo
if 73 - 73: OoO0O00
if 28 - 28: OoooooooOO - I11i
if 84 - 84: II111iiii
if 36 - 36: OOooOOo - OoOoOO00 - iIii1I11I1II1
if ( oO0ooOOO . type == lisp . LISP_MAP_NOTIFY and oOOo0 == "lisp-etr" ) :
O0ooo0O0oo0 = lisp . lisp_packet_ipc ( OOoOoO , oOOo0 , oo00O00oO )
lisp . lisp_ipc ( O0ooo0O0oo0 , Ooo , "lisp-itr" )
continue
if 10 - 10: I1ii11iIi11i / Ii1I * i1IIi % O0 + I11i
if 25 - 25: I1Ii111 - Ii1I / O0 . OoooooooOO % I1IiiI . i1IIi
if 19 - 19: II111iiii / II111iiii % I1ii11iIi11i + oO0o + oO0o + iII111i
if 4 - 4: o0oOOo0O0Ooo + I11i / iII111i + i1IIi % o0oOOo0O0Ooo % iII111i
if 80 - 80: Ii1I
if 26 - 26: iIii1I11I1II1 . OoooooooOO - iIii1I11I1II1
if 59 - 59: I1ii11iIi11i + I11i . oO0o
IIIIIiI11Ii = lisp . lisp_convert_4to6 ( O0o0O0O0O )
IIIIIiI11Ii = lisp . lisp_address ( lisp . LISP_AFI_IPV6 , "" , 128 , 0 )
if ( IIIIIiI11Ii . is_ipv4_string ( O0o0O0O0O ) ) : O0o0O0O0O = "::ffff:" + O0o0O0O0O
IIIIIiI11Ii . store_address ( O0o0O0O0O )
if 87 - 87: OoO0O00
if 34 - 34: I1Ii111 . OoOoOO00 / i11iIiiIii / iII111i
if 46 - 46: Oo0Ooo + II111iiii * I1IiiI + OOooOOo
if 31 - 31: Ii1I * o0oOOo0O0Ooo * Ii1I + OoO0O00 * o0oOOo0O0Ooo . I1Ii111
lisp . lisp_send ( lisp_sockets , IIIIIiI11Ii , oo00O00oO , OOoOoO )
if 89 - 89: OoooooooOO * Ii1I * I1IiiI . ooOoO0o * Ii1I / iII111i
return
if 46 - 46: i11iIiiIii
if 15 - 15: O0 / i1IIi / i1IIi . iII111i % OoOoOO00 + I1IiiI
if 48 - 48: I1Ii111 % iII111i % Ii1I % iIii1I11I1II1 . Ii1I
if 14 - 14: iII111i * OoO0O00 % O0 + I11i + I1ii11iIi11i
if 23 - 23: Oo0Ooo % iII111i + Ii1I - I1Ii111
if 65 - 65: OoooooooOO
if 22 - 22: OOooOOo + II111iiii + Oo0Ooo
if 83 - 83: ooOoO0o
def O0O0oOOo0O ( ) :
Oo = open ( "./lisp.config.example" , "r" ) ; O0OOOOo0O = Oo . read ( ) ; Oo . close ( )
Oo = open ( "./lisp.config" , "w" )
O0OOOOo0O = O0OOOOo0O . split ( "\n" )
for OOoO in O0OOOOo0O :
Oo . write ( OOoO + "\n" )
if ( OOoO [ 0 ] == "#" and OOoO [ - 1 ] == "#" and len ( OOoO ) >= 4 ) :
i1Ii1i11ii = OOoO [ 1 : - 2 ]
oO0O0oo = len ( i1Ii1i11ii ) * "-"
if ( i1Ii1i11ii == oO0O0oo ) : break
if 64 - 64: OoOoOO00 % OoOoOO00 + o0oOOo0O0Ooo + Oo0Ooo
if 79 - 79: Oo0Ooo - OoooooooOO % I1Ii111 + OoooooooOO - I11i % OoOoOO00
Oo . close ( )
return
if 5 - 5: OoOoOO00 . Oo0Ooo
if 89 - 89: I1IiiI / iII111i / OoooooooOO - i11iIiiIii + I1IiiI
if 64 - 64: i11iIiiIii + i1IIi % O0 . I11i
if 64 - 64: ooOoO0o / i1IIi % iII111i
if 84 - 84: OoOoOO00 - Oo0Ooo . ooOoO0o . IiII - Oo0Ooo
if 99 - 99: I1Ii111
if 75 - 75: ooOoO0o . OOooOOo / IiII
if 84 - 84: OoooooooOO . I1IiiI / o0oOOo0O0Ooo
def oOO0O00o0O0 ( bottle_port ) :
global o0oO0
global I1Ii11I1Ii1i
global Ooo
global o0oOoO00o
global i1
global oOOoo00O0O
if 68 - 68: i11iIiiIii + OoO0O00
lisp . lisp_i_am ( "core" )
lisp . lisp_set_exception ( )
lisp . lisp_print_banner ( "core-process starting up" )
lisp . lisp_version = getoutput ( "cat lisp-version.txt" )
o0oO0 = getoutput ( "cat lisp-build-date.txt" )
if 13 - 13: ooOoO0o - I1IiiI
if 23 - 23: I1IiiI
if 7 - 7: iII111i % I1ii11iIi11i
if 64 - 64: I1Ii111 + i11iIiiIii
if ( lisp . lisp_get_local_addresses ( ) == False ) : return ( False )
if 35 - 35: OoOoOO00 + i1IIi % OOooOOo
if 68 - 68: IiII . ooOoO0o
if 64 - 64: i1IIi + Oo0Ooo * I1IiiI / OOooOOo
if 3 - 3: Oo0Ooo / ooOoO0o + ooOoO0o . I1ii11iIi11i
if 50 - 50: iIii1I11I1II1 * oO0o
lisp . lisp_ipc_lock = multiprocessing . Lock ( )
if 85 - 85: i1IIi
if 100 - 100: OoooooooOO / I11i % OoO0O00 + Ii1I
if 42 - 42: Oo0Ooo / IiII . Ii1I * I1IiiI
if 54 - 54: OoOoOO00 * iII111i + OoO0O00
if 93 - 93: o0oOOo0O0Ooo / I1IiiI
if 47 - 47: Oo0Ooo * OOooOOo
if 98 - 98: oO0o - oO0o . ooOoO0o
if ( os . path . exists ( "lisp.py" ) ) : lisp . lisp_version += "+"
if 60 - 60: I1IiiI * I1ii11iIi11i / O0 + I11i + IiII
if 66 - 66: IiII * Oo0Ooo . OoooooooOO * I1Ii111
if 93 - 93: IiII / i1IIi
if 47 - 47: ooOoO0o - Ii1I
if 98 - 98: oO0o . I1Ii111 / OoOoOO00 . ooOoO0o
if 1 - 1: OOooOOo
OoOo0o0OOoO0 = "0.0.0.0" if lisp . lisp_is_raspbian ( ) else "0::0"
if ( os . getenv ( "LISP_ANYCAST_MR" ) == None or lisp . lisp_myrlocs [ 0 ] == None ) :
I1Ii11I1Ii1i = lisp . lisp_open_listen_socket ( OoOo0o0OOoO0 ,
str ( lisp . LISP_CTRL_PORT ) )
else :
OoOo0o0OOoO0 = lisp . lisp_myrlocs [ 0 ] . print_address_no_iid ( )
I1Ii11I1Ii1i = lisp . lisp_open_listen_socket ( OoOo0o0OOoO0 ,
str ( lisp . LISP_CTRL_PORT ) )
if 30 - 30: Ii1I % I11i + o0oOOo0O0Ooo
lisp . lprint ( "Listen on {}, port 4342" . format ( OoOo0o0OOoO0 ) )
if 65 - 65: iIii1I11I1II1 . iII111i / Ii1I
if 12 - 12: I1IiiI + I1Ii111
if 80 - 80: oO0o . O0
if 90 - 90: II111iiii / OoO0O00 / Ii1I
if 70 - 70: Ii1I - II111iiii . Oo0Ooo / Oo0Ooo
if 30 - 30: oO0o . OoO0O00 + I11i / iIii1I11I1II1 % Oo0Ooo / oO0o
if ( lisp . lisp_external_data_plane ( ) == False ) :
oOOoo00O0O = lisp . lisp_open_listen_socket ( OoOo0o0OOoO0 ,
str ( lisp . LISP_DATA_PORT ) )
lisp . lprint ( "Listen on {}, port 4341" . format ( OoOo0o0OOoO0 ) )
if 3 - 3: I1ii11iIi11i / II111iiii
if 73 - 73: OoO0O00 * OoooooooOO - OoooooooOO + I1IiiI * Oo0Ooo
if 87 - 87: o0oOOo0O0Ooo / IiII / i11iIiiIii
if 95 - 95: i1IIi / Ii1I / Ii1I
if 65 - 65: I1Ii111 + iII111i * iII111i
if 79 - 79: i1IIi / Oo0Ooo - I1IiiI . O0
Ooo = lisp . lisp_open_send_socket ( "lisp-core" , "" )
Ooo . settimeout ( 3 )
if 56 - 56: IiII % O0 * i1IIi - II111iiii
if 74 - 74: i1IIi - OoOoOO00 % oO0o . O0 - OoooooooOO
if 84 - 84: I1Ii111
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo + I1IiiI % OoooooooOO - iIii1I11I1II1
o0oOoO00o = lisp . lisp_open_listen_socket ( "" , "lisp-core-pkt" )
if 9 - 9: i1IIi - OoOoOO00
i1 = [ I1Ii11I1Ii1i , I1Ii11I1Ii1i ,
Ooo ]
if 57 - 57: iIii1I11I1II1 * Ii1I * iII111i / oO0o
if 46 - 46: Ii1I
if 61 - 61: o0oOOo0O0Ooo / ooOoO0o - II111iiii
if 87 - 87: I1ii11iIi11i / I1IiiI
if 45 - 45: OoOoOO00 * ooOoO0o / OoooooooOO + OoO0O00 . I1Ii111 / OoO0O00
threading . Thread ( target = o0OoOo0O00 ,
args = [ o0oOoO00o , i1 ] ) . start ( )
if 64 - 64: Ii1I / i1IIi % I1IiiI - o0oOOo0O0Ooo
if 11 - 11: I1ii11iIi11i - OoooooooOO
if 16 - 16: IiII % OoooooooOO - ooOoO0o * Ii1I - Ii1I
if 27 - 27: IiII + iIii1I11I1II1 / Oo0Ooo + OoO0O00 % Oo0Ooo + OoO0O00
if 77 - 77: Oo0Ooo * ooOoO0o % Ii1I
if 2 - 2: I11i / Oo0Ooo / Ii1I / I1ii11iIi11i / OoooooooOO
if ( os . path . exists ( "./lisp.config" ) == False ) :
lisp . lprint ( ( "./lisp.config does not exist, creating a copy " + "from lisp.config.example" ) )
if 22 - 22: iIii1I11I1II1 * I1IiiI / I11i + OoOoOO00
O0O0oOOo0O ( )
if 98 - 98: OOooOOo
if 69 - 69: II111iiii + Oo0Ooo - oO0o . Oo0Ooo / iIii1I11I1II1 * iIii1I11I1II1
if 75 - 75: OoO0O00 % OoooooooOO
if 16 - 16: O0 / i1IIi
if 58 - 58: o0oOOo0O0Ooo / i11iIiiIii / O0 % I11i % I1IiiI
if 86 - 86: IiII + OoOoOO00 / I1IiiI + I11i % I11i / i11iIiiIii
iIiI1I ( I1Ii11I1Ii1i )
if 2 - 2: o0oOOo0O0Ooo . Ii1I % OoOoOO00
threading . Thread ( target = lispconfig . lisp_config_process ,
args = [ Ooo ] ) . start ( )
if 58 - 58: I1ii11iIi11i % Ii1I * Ii1I - iII111i
if 9 - 9: ooOoO0o - Ii1I % II111iiii + IiII + OOooOOo % O0
if 65 - 65: OOooOOo - OoO0O00 % i11iIiiIii
if 58 - 58: iII111i
threading . Thread ( target = ooO ,
args = [ bottle_port ] ) . start ( )
threading . Thread ( target = oOO0O0O0OO00oo , args = [ ] ) . start ( )
if 2 - 2: II111iiii + i1IIi
if 68 - 68: OOooOOo + Ii1I
if 58 - 58: IiII * Ii1I . i1IIi
if 19 - 19: oO0o
threading . Thread ( target = IiI1I1IIIi1i ,
args = [ Ooo ] ) . start ( )
if 85 - 85: ooOoO0o - I1IiiI / i1IIi / OoO0O00 / II111iiii
if 94 - 94: iIii1I11I1II1 + IiII
if 44 - 44: OoO0O00 + I11i % OoO0O00 + i1IIi + iII111i + O0
if 18 - 18: iIii1I11I1II1 % iIii1I11I1II1 % oO0o + I1IiiI % ooOoO0o / Ii1I
threading . Thread ( target = iI1I1ii11IIi1 ) . start ( )
return ( True )
if 36 - 36: OoOoOO00 . i11iIiiIii
if 81 - 81: Oo0Ooo * iII111i * OoO0O00
if 85 - 85: O0 * oO0o
if 39 - 39: II111iiii * I1IiiI - iIii1I11I1II1
if 25 - 25: OoooooooOO . Ii1I % iII111i . IiII
if 67 - 67: OoooooooOO + I1Ii111 / ooOoO0o
if 75 - 75: IiII / OoooooooOO . I1IiiI + I1Ii111 - II111iiii
def I1i11 ( ) :
if 5 - 5: o0oOOo0O0Ooo - i11iIiiIii . IiII
if 10 - 10: OoOoOO00 . IiII * iIii1I11I1II1 - oO0o - OoOoOO00 / I1Ii111
if 13 - 13: oO0o + OoOoOO00 % IiII % OoooooooOO
if 22 - 22: I1Ii111
lisp . lisp_close_socket ( Ooo , "lisp-core" )
lisp . lisp_close_socket ( o0oOoO00o , "lisp-core-pkt" )
lisp . lisp_close_socket ( I1Ii11I1Ii1i , "" )
lisp . lisp_close_socket ( oOOoo00O0O , "" )
return
if 23 - 23: O0
if 41 - 41: i1IIi . OOooOOo / ooOoO0o / o0oOOo0O0Ooo % IiII - Ii1I
if 14 - 14: I1ii11iIi11i - i11iIiiIii * I1Ii111
if 39 - 39: OoooooooOO
if 19 - 19: i11iIiiIii
if 80 - 80: I1IiiI
if 58 - 58: oO0o + I1ii11iIi11i % OoOoOO00
if 22 - 22: iIii1I11I1II1 - Ii1I / I1IiiI * IiII
if 26 - 26: o0oOOo0O0Ooo + OOooOOo - o0oOOo0O0Ooo + Oo0Ooo . oO0o
if 97 - 97: i1IIi
if 46 - 46: I1ii11iIi11i
if 30 - 30: OoO0O00 / O0 * o0oOOo0O0Ooo * I1Ii111 + OoooooooOO * iII111i
def iIiI1I ( lisp_socket ) :
if 23 - 23: I11i
Oo = open ( "./lisp.config" , "r" ) ; O0OOOOo0O = Oo . read ( ) ; Oo . close ( )
O0OOOOo0O = O0OOOOo0O . split ( "\n" )
if 36 - 36: IiII . iII111i - i1IIi + I1Ii111
if 54 - 54: OoooooooOO . oO0o - iII111i
if 76 - 76: I1Ii111
if 61 - 61: ooOoO0o / II111iiii * ooOoO0o * OoOoOO00 * I1Ii111 . i11iIiiIii
if 26 - 26: I1Ii111 / ooOoO0o - OoO0O00 . iIii1I11I1II1
O0o0OOo0o0o = False
for OOoO in O0OOOOo0O :
if ( OOoO [ 0 : 1 ] == "#-" and OOoO [ - 2 : - 1 ] == "-#" ) : break
if ( OOoO == "" or OOoO [ 0 ] == "#" ) : continue
if ( OOoO . find ( "decentralized-push-xtr = yes" ) == - 1 ) : continue
O0o0OOo0o0o = True
break
if 90 - 90: I11i
if ( O0o0OOo0o0o == False ) : return
if 95 - 95: OoO0O00
if 68 - 68: iIii1I11I1II1 . iIii1I11I1II1 / OoOoOO00 - II111iiii - iIii1I11I1II1
if 75 - 75: ooOoO0o . I1IiiI * II111iiii
if 99 - 99: iIii1I11I1II1 * I1ii11iIi11i + IiII
if 70 - 70: i1IIi % ooOoO0o . I1ii11iIi11i - IiII + OOooOOo
OO0o0oo = [ ]
o0oo0oOOOo00 = False
for OOoO in O0OOOOo0O :
if ( OOoO [ 0 : 1 ] == "#-" and OOoO [ - 2 : - 1 ] == "-#" ) : break
if ( OOoO == "" or OOoO [ 0 ] == "#" ) : continue
if 57 - 57: o0oOOo0O0Ooo + Oo0Ooo * I1ii11iIi11i - ooOoO0o % iIii1I11I1II1 - Ii1I
if ( OOoO . find ( "lisp map-server" ) != - 1 ) :
o0oo0oOOOo00 = True
continue
if 37 - 37: OoO0O00 * I11i + Ii1I + I1ii11iIi11i * o0oOOo0O0Ooo
if ( OOoO [ 0 ] == "}" ) :
o0oo0oOOOo00 = False
continue
if 95 - 95: Ii1I - i11iIiiIii % i11iIiiIii - O0 * I1Ii111
if 81 - 81: II111iiii * I1IiiI % i1IIi * i11iIiiIii + OoOoOO00
if 100 - 100: i1IIi % Ii1I
if 55 - 55: I1IiiI + iII111i
if 85 - 85: oO0o + iII111i % iII111i / I11i . I1IiiI - OoOoOO00
if ( o0oo0oOOOo00 and OOoO . find ( "address = " ) != - 1 ) :
i1I11 = OOoO . split ( "address = " ) [ 1 ]
OoO00 = int ( i1I11 . split ( "." ) [ 0 ] )
if ( OoO00 >= 224 and OoO00 < 240 ) : OO0o0oo . append ( i1I11 )
if 57 - 57: Oo0Ooo - OoooooooOO % I1ii11iIi11i . OoO0O00 * II111iiii
if 72 - 72: I1Ii111 + ooOoO0o . IiII % II111iiii
if ( i1I11 == [ ] ) : return
if 58 - 58: ooOoO0o
if 45 - 45: o0oOOo0O0Ooo
if 67 - 67: iII111i + ooOoO0o
if 25 - 25: i1IIi - i11iIiiIii
Ii1IIi = getoutput ( 'ifconfig eth0 | egrep "inet "' )
if ( Ii1IIi == "" ) : return
i1IIII1II = Ii1IIi . split ( ) [ 1 ]
if 89 - 89: I11i % iII111i * Oo0Ooo / I1Ii111 * Oo0Ooo / ooOoO0o
if 14 - 14: i1IIi * iIii1I11I1II1 - Ii1I * OoOoOO00 - iII111i / oO0o
if 73 - 73: I1ii11iIi11i - OoOoOO00 * O0 - OoOoOO00 - OoO0O00
if 96 - 96: I1ii11iIi11i - O0
Ii111iIi1iIi = socket . inet_aton ( i1IIII1II )
for i1I11 in OO0o0oo :
lisp_socket . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEADDR , 1 )
lisp_socket . setsockopt ( socket . IPPROTO_IP , socket . IP_MULTICAST_IF , Ii111iIi1iIi )
I1iO00O000oOO0oO = socket . inet_aton ( i1I11 ) + Ii111iIi1iIi
lisp_socket . setsockopt ( socket . IPPROTO_IP , socket . IP_ADD_MEMBERSHIP , I1iO00O000oOO0oO )
lisp . lprint ( "Setting multicast listen socket for group {}" . format ( i1I11 ) )
if 88 - 88: o0oOOo0O0Ooo . I1IiiI % oO0o . Oo0Ooo % ooOoO0o . oO0o
if 53 - 53: i1IIi % Ii1I - OoooooooOO / OoOoOO00 - iIii1I11I1II1
return
if 9 - 9: I1Ii111 - OoO0O00 + iIii1I11I1II1 % O0 + I11i + IiII
if 50 - 50: i1IIi + ooOoO0o
if 64 - 64: o0oOOo0O0Ooo % oO0o . ooOoO0o
if 6 - 6: ooOoO0o / i11iIiiIii - Oo0Ooo
I11iIiiI = int ( sys . argv [ 1 ] ) if ( len ( sys . argv ) > 1 ) else 8080
if 88 - 88: I1ii11iIi11i - I11i * OoooooooOO * iII111i . i11iIiiIii . o0oOOo0O0Ooo
if 96 - 96: I1IiiI % I1IiiI / o0oOOo0O0Ooo / OoOoOO00 * ooOoO0o - I1Ii111
if 94 - 94: Oo0Ooo - iIii1I11I1II1 + I1IiiI - i1IIi + OoooooooOO % OoO0O00
if 36 - 36: iII111i * I11i * O0 * OOooOOo - o0oOOo0O0Ooo / I1ii11iIi11i
if ( oOO0O00o0O0 ( I11iIiiI ) == False ) :
lisp . lprint ( "lisp_core_startup() failed" )
lisp . lisp_print_banner ( "lisp-core abnormal exit" )
exit ( 1 )
if 54 - 54: i1IIi - OoO0O00 / OoooooooOO
if 95 - 95: O0 + iIii1I11I1II1 . I1ii11iIi11i
while ( True ) :
if 61 - 61: Ii1I * Ii1I
if 70 - 70: I1Ii111 . I1ii11iIi11i / o0oOOo0O0Ooo * oO0o
if 74 - 74: I1IiiI . ooOoO0o / iII111i . IiII
if 74 - 74: Oo0Ooo / I1Ii111 % I1Ii111 . IiII
if 72 - 72: i1IIi
III1iII1I1ii , oOOo0 , oo00O00oO , OOoOoO = lisp . lisp_receive ( I1Ii11I1Ii1i , False )
if 21 - 21: I1Ii111 . OOooOOo / i11iIiiIii * i1IIi
if ( oOOo0 == "" ) : break
if 82 - 82: ooOoO0o * Oo0Ooo % i11iIiiIii * i1IIi . OOooOOo
if 89 - 89: IiII - i1IIi - IiII
if 74 - 74: OoO0O00 % OoO0O00
if 28 - 28: OoOoOO00 % oO0o - OOooOOo + OOooOOo + oO0o / iIii1I11I1II1
oOOo0 = lisp . lisp_convert_6to4 ( oOOo0 )
OO0o0o0oo ( i1 , oOOo0 , oo00O00oO , OOoOoO )
if 91 - 91: I1IiiI / II111iiii * OOooOOo
if 94 - 94: II111iiii - iIii1I11I1II1 - iIii1I11I1II1
I1i11 ( )
lisp . lisp_print_banner ( "lisp-core normal exit" )
exit ( 0 )
if 83 - 83: I1ii11iIi11i * iIii1I11I1II1 + OoOoOO00 * i1IIi . OoooooooOO % Ii1I
if 81 - 81: OoO0O00 - iIii1I11I1II1
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
train_pg_3.py
|
#Reference:
#1. https://github.com/mabirck/CS294-DeepRL/blob/master/lectures/class-5/REINFORCE.py
#2. https://github.com/JamesChuanggg/pytorch-REINFORCE/blob/master/reinforce_continuous.py
#3. https://github.com/pytorch/examples/blob/master/reinforcement_learning/actor_critic.py
# With the help from the implementations above, I was finally able to translate the provided skeleton code in Tensorflow into the code below
import numpy as np
import tensorflow as tf
import gym
import logz
import scipy.signal
import os
import time
import inspect
from multiprocessing import Process
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data_utils
import matplotlib.pyplot as plt
from torch.distributions import Categorical
from torch.distributions.multivariate_normal import MultivariateNormal
import random
#============================================================================================#
# Utilities
#============================================================================================#
class Policy_discrete(nn.Module):
def __init__(self, inputdim, outputdim, n_layers, hiddendim, activation, output_activation):
super(Policy_discrete, self).__init__()
if (output_activation==None):
self.original_output=True
else:
self.original_output=False
self.history_of_log_probs=[]
self.layers=nn.ModuleList()
self.activation=activation
self.output_activation=output_activation
self.n_layers=n_layers+1
if self.n_layers==1:
self.layers.append(nn.Linear(inputdim, outputdim))
else:
for i in range(self.n_layers):
if(i==0):
self.layers.append(nn.Linear(inputdim, hiddendim))
elif(i==(self.n_layers-1)):
self.layers.append(nn.Linear(hiddendim, outputdim))
else:
self.layers.append(nn.Linear(hiddendim, hiddendim))
def forward(self, x):
for i, l in enumerate(self.layers):
if (i<(self.n_layers-1)):
x=l(x)
x=self.activation(x)
else:
x=l(x)
if self.original_output:
return x
else:
x=self.output_activation(x)
return x
def run(self, x):
x=Variable(x)
p=self(x)
if self.original_output:
d=Categorical(logits=p)
else:
#Suppose after the output_activation, we get the probability(i.e. a softmax activation)
#This assumption might be false.
d=Categorical(probs=p)
action=d.sample()
log_prob=d.log_prob(action)
return action, log_prob #haven't checked the type of action, might be buggy here
def learn(self, optimizer, history_of_rewards, gamma, reward_to_go):
total_weighted_reward=Variable(torch.zeros(1,1))
gradient=Variable(torch.zeros(1,1))
loss=0
if not reward_to_go:
#sum up all the reward along the trajectory
for i in reversed(range(len(history_of_rewards))):
total_weighted_reward = gamma * total_weighted_reward + rewards[i]
gradient+=self.history_of_log_probs[i]
loss=loss-gradient*total_weighted_reward
loss=loss/len(history_of_rewards) #in case the episode terminates early
else:
#reward to go mode
for i in reversed(range(len(history_of_rewards))):
total_weighted_reward=gamma*total_weighted_reward+rewards[i]
loss=loss-self.history_of_log_probs[i]*total_weighted_reward
loss=loss/len(history_of_rewards) #in case the episode terminates early
optimizer.zero_grad()
loss.backward()
optimizer.step()
self.history_of_log_probs=[]
class Policy_continuous_hw(nn.Module): #this policy network only outputs the mean of the Gaussian
def __init__(self, inputdim, outputdim, n_layers, hiddendim, activation, output_activation):
super(Policy_continuous_hw, self).__init__()
if (output_activation==None):
self.original_output=True
else:
self.original_output=False
self.history_of_log_probs=[]
self.activation=activation
self.output_activation=output_activation
self.n_layers=n_layers+1
self.logstd_raw=nn.Parameter(torch.ones(outputdim), requires_grad=True)
self.outputid=Variable(torch.eye(outputdim), requires_grad=False)
self.layers=nn.ModuleList()
if self.n_layers==1:
self.layers.append(nn.Linear(inputdim, outputdim))
else:
for i in range(self.n_layers):
if(i==0):
self.layers.append(nn.Linear(inputdim, hiddendim))
elif(i==(self.n_layers-1)):
self.layers.append(nn.Linear(hiddendim, outputdim))
else:
self.layers.append(nn.Linear(hiddendim, hiddendim))
def forward(self, x):
for i, l in enumerate(self.layers):
if (i<(self.n_layers-1)):
x=l(x)
x=self.activation(x)
else:
x=l(x)
if self.original_output:
return x
else:
x=self.output_activation(x)
return x
def run(self, x):
x=Variable(x)
#the action space is continuous
u=self(x)
sigma2=torch.exp(self.logstd_raw)*self.outputid
d=MultivariateNormal(u, sigma2)
action=d.sample()
log_prob=d.log_prob(action)
return action, log_prob
def learn(self, optimizer, history_of_rewards, gamma, reward_to_go):
total_weighted_reward=Variable(torch.zeros(1,1))
gradient=Variable(torch.zeros(1,1))
loss=0
if not reward_to_go:
#sum up all the reward along the trajectory
for i in reversed(range(len(history_of_rewards))):
total_weighted_reward = gamma * total_weighted_reward + rewards[i]
gradient+=self.history_of_log_probs[i]
loss=loss-(gradient*total_weighted_reward.expand(gradient.size())).sum()
loss=loss/len(history_of_rewards) #in case the episode terminates early
else:
#reward to go mode
for i in reversed(range(len(history_of_rewards))):
total_weighted_reward=gamma*total_weighted_reward+rewards[i]
loss=loss-(self.history_of_log_probs[i]*total_weighted_reward.expand(self.history_of_log_probs[i].size())).sum()
loss=loss/len(history_of_rewards) #in case the episode terminates early
optimizer.zero_grad()
loss.backward()
optimizer.step()
self.history_of_log_probs=[]
class Policy_continuous(nn.Module):
def __init__(self, inputdim, outputdim, n_layers, hiddendim, activation, output_activation):
super(Policy_continuous, self).__init__()
if (output_activation==None):
self.original_output=True
else:
self.original_output=False
self.activation=activation
self.history_of_log_probs=[]
self.n_layers=n_layers+1
self.layers=nn.ModuleList()
if self.n_layers==1:
self.layers.append(nn.Linear(inputdim, outputdim))
else:
for i in range(self.n_layers-1):
if(i==0):
self.layers.append(nn.Linear(inputdim, hiddendim))
else:
self.layers.append(nn.Linear(hiddendim, hiddendim))
self.mean=nn.Linear(hiddendim, outputdim)
self.logstd_raw=nn.Linear(hiddendim, outputdim)
self.outputid=Variable(torch.eye(outputdim), requires_grad=False)
if output_activation!=None:
self.layers.append(output_activation)
def forward(self, x):
for i, l in enumerate(self.layers):
x=l(x)
x=self.activation(x)
u=self.mean(x)
logstd=self.logstd_raw(x)
if self.original_output:
return u, logstd
else:
u=self.output_activation(u)
logstd=self.output_activation(logstd)
return u, logstd
def run(self, x):
x=Variable(x)
u, logstd=self(x)
sigma2=torch.exp(2*logstd)*self.outputid
d=MultivariateNormal(u, sigma2) #might want to use N Gaussian instead
action=d.sample()
log_prob=d.log_prob(action)
self.history_of_log_probs.append(log_prob)
return action, log_prob
def learn(self, optimizer, history_of_rewards, gamma, reward_to_go):
total_weighted_reward=Variable(torch.zeros(1,1))
gradient=Variable(torch.zeros(1,1))
loss=0
if not reward_to_go:
#sum up all the reward along the trajectory
for i in reversed(range(len(history_of_rewards))):
total_weighted_reward = gamma * total_weighted_reward + rewards[i]
gradient+=self.history_of_log_probs[i]
loss=loss-(gradient*total_weighted_reward.expand(gradient.size())).sum()
loss=loss/len(history_of_rewards) #in case the episode terminates early
else:
#reward to go mode
for i in reversed(range(len(history_of_rewards))):
total_weighted_reward=gamma*total_weighted_reward+rewards[i]
loss=loss-(self.history_of_log_probs[i]*total_weighted_reward.expand(self.history_of_log_probs[i].size())).sum()
loss=loss/len(history_of_rewards) #in case the episode terminates early
optimizer.zero_grad()
loss.backward()
optimizer.step()
self.history_of_log_probs=[]
class Critic(nn.Module): #Critic is always discrete
def __init__(self, inputdim, outputdim, n_layers, hiddendim, activation, output_activation):
super(Critic, self).__init__()
if (output_activation==None):
self.original_output=True
else:
self.original_output=False
self.history_of_values=[]
self.layers=nn.ModuleList()
self.activation=activation
self.output_activation=output_activation
self.n_layers=n_layers+1
if self.n_layers==1:
self.layers.append(nn.Linear(inputdim, outputdim))
else:
for i in range(self.n_layers):
if(i==0):
self.layers.append(nn.Linear(inputdim, hiddendim))
elif(i==(self.n_layers-1)):
self.layers.append(nn.Linear(hiddendim, outputdim))
else:
self.layers.append(nn.Linear(hiddendim, hiddendim))
def forward(self, x):
for i, l in enumerate(self.layers):
if (i<(self.n_layers-1)):
x=l(x)
x=self.activation(x)
else:
x=l(x)
if self.original_output:
return x
else:
x=self.output_activation(x)
return x
def run(self, x):
x=Variable(x)
v=self(x)
return v #haven't checked the type of value, might be buggy here
def learn(self, optimizer, history_of_rewards, gamma):
total_weighted_reward=0
gradient=Variable(torch.zeros(1,1))
loss=0
history_of_total_weighted_reward=[]
for i in reversed(range(len(history_of_rewards))):
total_weighted_reward=gamma*total_weighted_reward+rewards[i]
history_of_total_weighted_reward.insert(0,total_weighted_reward)
history_of_total_weighted_reward=torch.tensor(history_of_total_weighted_reward)
#rescale the reward value(do not want to compute raw Q value)
reward_u=history_of_total_weighted_reward.mean()
reward_std=history_of_total_weighted_reward.std()+1e-8
history_of_total_weighted_reward=(history_of_total_weighted_reward-reward_u)/reward_std
for i in range(len(self.history_of_values)):
loss+=F.mse_loss(history_of_values[i], history_of_weighted_reward[i])
optimizer.zero_grad()
loss.backward()
optimizer.step()
self.history_of_values=[]
def build_mlp(
input_placeholder,
output_size,
scope,
n_layers=2,
size=64,
activation=torch.nn.functional.tanh,
output_activation=None,
discrete=True
):
#========================================================================================#
# ----------SECTION 3----------
# Network building
#
# Your code should make a feedforward neural network (also called a multilayer perceptron)
# with 'n_layers' hidden layers of size 'size' units.
#
# The output layer should have size 'output_size' and activation 'output_activation'.
#
# Hint: use tf.layers.dense
#========================================================================================#
if scope=="nn_baseline":
print("critic activated.")
return Critic(input_placeholder, output_size, n_layers, size, activation, output_activation) #Critic is always discrete
else:
#return an actor
if discrete:
print("discrete-type actor activated.")
return Policy_discrete(input_placeholder, output_size, n_layers, size, activation, output_activation)
else:
print("continuous-type actor activated.")
return Policy_continuous(input_placeholder, output_size, n_layers, size, activation, output_activation)
def pathlength(path):
return len(path["reward"])
def reinforce_loss(log_prob, a, num_path):
return - (log_prob.view(-1, 1) * a).sum() / num_path
#============================================================================================#
# Policy Gradient
#============================================================================================#
def train_PG(exp_name='',
env_name='CartPole-v0',
n_iter=100,
gamma=1.0,
min_timesteps_per_batch=1000,
max_path_length=None,
learning_rate=5e-3,
reward_to_go=True,
animate=True,
logdir=None,
normalize_advantages=True,
nn_baseline=False,
seed=0,
# network arguments
n_layers=1,
size=32,
network_activation='tanh'
):
start = time.time()
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
locals_ = locals()
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
# Set random seeds
torch.manual_seed(seed)
np.random.seed(seed)
# Make the gym environment
env = gym.make(env_name)
# Is this env continuous, or discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
#========================================================================================#
# Notes on notation:
#
# Symbolic variables have the prefix sy_, to distinguish them from the numerical values
# that are computed later in the function
#
# Prefixes and suffixes:
# ob - observation
# ac - action
# _no - this tensor should have shape (batch size /n/, observation dim)
# _na - this tensor should have shape (batch size /n/, action dim)
# _n - this tensor should have shape (batch size /n/)
#
# Note: batch size /n/ is defined at runtime, and until then, the shape for that axis
# is None
#========================================================================================#
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#activation function for the network
if network_activation=='relu':
activation=torch.nn.functional.relu
elif network_activation=='leaky_relu':
activation=torch.nn.functional.leaky_relu
else:
activation=torch.nn.functional.tanh
#todo: create policy
actor=build_mlp(ob_dim, ac_dim, "actor",\
n_layers=n_layers, size=size, activation=activation, discrete=discrete)
actor_loss=reinforce_loss
actor_optimizer=torch.optim.Adam(actor.parameters(), lr=learning_rate)
#todo: initilize Agent:
#========================================================================================#
# ----------SECTION 5----------
# Optional Baseline
#========================================================================================#
if nn_baseline:
critic=build_mlp(ob_dim,1,"nn_baseline",\
n_layers=n_layers,size=size, discrete=discrete)
critic_loss=nn.MSELoss()
critic_optimizer=torch.optim.Adam(critic.parameters(), lr=learning_rate)
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
ob = env.reset()
obs, acs, rewards, log_probs = [], [], [], []
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and animate)
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.05)
ob = torch.from_numpy(ob).float().unsqueeze(0)
obs.append(ob)
ac, log_prob = actor.run(ob)
acs.append(ac)
log_probs.append(log_prob)
#format the action from policy
if discrete:
ac = int(ac)
else:
ac = ac.squeeze(0).numpy()
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > max_path_length:
break
path = {"observation" : torch.cat(obs, 0),
"reward" : torch.Tensor(rewards),
"action" : torch.cat(acs, 0),
"log_prob" : torch.cat(log_probs, 0)}
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > min_timesteps_per_batch:
break
total_timesteps += timesteps_this_batch
ob_no = torch.cat([path["observation"] for path in paths], 0)
ac_na = torch.cat([path["action"] for path in paths], 0)
#====================================================================================#
# ----------SECTION 4----------
# Computing Q-values
#
# Your code should construct numpy arrays for Q-values which will be used to compute
# advantages (which will in turn be fed to the placeholder you defined above).
#
# Recall that the expression for the policy gradient PG is
#
# PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
#
# where
#
# tau=(s_0, a_0, ...) is a trajectory,
# Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
# and b_t is a baseline which may depend on s_t.
#
# You will write code for two cases, controlled by the flag 'reward_to_go':
#
# Case 1: trajectory-based PG
#
# (reward_to_go = False)
#
# Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
# entire trajectory (regardless of which time step the Q-value should be for).
#
# For this case, the policy gradient estimator is
#
# E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
#
# where
#
# Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
#
# Thus, you should compute
#
# Q_t = Ret(tau)
#
# Case 2: reward-to-go PG
#
# (reward_to_go = True)
#
# Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
# from time step t. Thus, you should compute
#
# Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
#
#
# Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
# like the 'ob_no' and 'ac_na' above.
#
#====================================================================================#
q_n = []
for path in paths:
rewards = path['reward']
num_steps = pathlength(path)
R=[]
if reward_to_go:
for t in range(num_steps):
R.append((torch.pow(gamma, torch.arange(num_steps-t))*rewards[t:]).sum().view(-1,1))
q_n.append(torch.cat(R))
else:
q_n.append((torch.pow(gamma, torch.arange(num_steps)) * rewards).sum() * torch.ones(num_steps, 1))
q_n = torch.cat(q_n, 0)
#====================================================================================#
# ----------SECTION 5----------
# Computing Baselines
#====================================================================================#
if nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current or previous batch of Q-values. (Goes with Hint
# #bl2 below.)
b_n = critic(ob_no)
q_n_std = q_n.std()
q_n_mean = q_n.mean()
b_n_scaled = b_n * q_n_std + q_n_mean
adv_n = (q_n - b_n_scaled).detach()
else:
adv_n = q_n
#====================================================================================#
# ----------SECTION 4----------
# Advantage Normalization
#====================================================================================#
if normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
# YOUR_CODE_HERE
adv_n = (adv_n - adv_n.mean()) / (adv_n.std() + np.finfo(np.float32).eps.item())
#====================================================================================#
# ----------SECTION 5----------
# Optimizing Neural Network Baseline
#====================================================================================#
if nn_baseline:
# ----------SECTION 5----------
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 above.)
# YOUR_CODE_HERE
target = (q_n - q_n_mean) / (q_n_std + np.finfo(np.float32).eps.item())
critic_optimizer.zero_grad()
c_loss = critic_loss(b_n, target)
c_loss.backward()
critic_optimizer.step()
#====================================================================================#
# ----------SECTION 4----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
# YOUR_CODE_HERE
log_probs = torch.cat([path["log_prob"] for path in paths], 0)
actor_optimizer.zero_grad()
loss = actor_loss(log_probs, adv_n, len(paths))
print(loss)
loss.backward()
actor_optimizer.step()
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=1)
parser.add_argument('--size', '-s', type=int, default=32)
parser.add_argument('--activation', '-a', type=str, default='tanh')
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size,
network_activation=args.activation
)
# Awkward hacky process runs, because Tensorflow does not like
# repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
p.join()
if __name__ == "__main__":
main()
|
scheduler.py
|
import logging
import os
import signal
import time
import traceback
from datetime import datetime
from multiprocessing import Process
from .job import Job
from .queue import Queue
from .registry import ScheduledJobRegistry
from .utils import current_timestamp, enum
from .logutils import setup_loghandlers
from redis import Redis, SSLConnection
SCHEDULER_KEY_TEMPLATE = 'rq:scheduler:%s'
SCHEDULER_LOCKING_KEY_TEMPLATE = 'rq:scheduler-lock:%s'
logger = logging.getLogger(__name__)
setup_loghandlers(
level=logging.INFO,
name="rq.scheduler",
log_format="%(asctime)s: %(message)s",
date_format="%H:%M:%S"
)
class RQScheduler(object):
# STARTED: scheduler has been started but sleeping
# WORKING: scheduler is in the midst of scheduling jobs
# STOPPED: scheduler is in stopped condition
Status = enum(
'SchedulerStatus',
STARTED='started',
WORKING='working',
STOPPED='stopped'
)
def __init__(self, queues, connection, interval=1):
self._queue_names = set(parse_names(queues))
self._acquired_locks = set()
self._scheduled_job_registries = []
self.lock_acquisition_time = None
self._connection_kwargs = connection.connection_pool.connection_kwargs
self._connection_class = connection.__class__ # client
connection_class = connection.connection_pool.connection_class
if issubclass(connection_class, SSLConnection):
self._connection_kwargs['ssl'] = True
self._connection = None
self.interval = interval
self._stop_requested = False
self._status = self.Status.STOPPED
self._process = None
@property
def connection(self):
if self._connection:
return self._connection
self._connection = self._connection_class(**self._connection_kwargs)
return self._connection
@property
def acquired_locks(self):
return self._acquired_locks
@property
def status(self):
return self._status
@property
def should_reacquire_locks(self):
"""Returns True if lock_acquisition_time is longer than 10 minutes ago"""
if self._queue_names == self.acquired_locks:
return False
if not self.lock_acquisition_time:
return True
return (datetime.now() - self.lock_acquisition_time).total_seconds() > 600
def acquire_locks(self, auto_start=False):
"""Returns names of queue it successfully acquires lock on"""
successful_locks = set()
pid = os.getpid()
logger.info("Trying to acquire locks for %s", ", ".join(self._queue_names))
for name in self._queue_names:
if self.connection.set(self.get_locking_key(name), pid, nx=True, ex=60):
successful_locks.add(name)
# Always reset _scheduled_job_registries when acquiring locks
self._scheduled_job_registries = []
self._acquired_locks = self._acquired_locks.union(successful_locks)
self.lock_acquisition_time = datetime.now()
# If auto_start is requested and scheduler is not started,
# run self.start()
if self._acquired_locks and auto_start:
if not self._process:
self.start()
return successful_locks
def prepare_registries(self, queue_names=None):
"""Prepare scheduled job registries for use"""
self._scheduled_job_registries = []
if not queue_names:
queue_names = self._acquired_locks
for name in queue_names:
self._scheduled_job_registries.append(
ScheduledJobRegistry(name, connection=self.connection)
)
@classmethod
def get_locking_key(cls, name):
"""Returns scheduler key for a given queue name"""
return SCHEDULER_LOCKING_KEY_TEMPLATE % name
def enqueue_scheduled_jobs(self):
"""Enqueue jobs whose timestamp is in the past"""
self._status = self.Status.WORKING
if not self._scheduled_job_registries and self._acquired_locks:
self.prepare_registries()
for registry in self._scheduled_job_registries:
timestamp = current_timestamp()
# TODO: try to use Lua script to make get_jobs_to_schedule()
# and remove_jobs() atomic
job_ids = registry.get_jobs_to_schedule(timestamp)
if not job_ids:
continue
queue = Queue(registry.name, connection=self.connection)
with self.connection.pipeline() as pipeline:
jobs = Job.fetch_many(job_ids, connection=self.connection)
for job in jobs:
if job is not None:
queue.enqueue_job(job, pipeline=pipeline)
registry.remove(job, pipeline=pipeline)
pipeline.execute()
self._status = self.Status.STARTED
def _install_signal_handlers(self):
"""Installs signal handlers for handling SIGINT and SIGTERM
gracefully.
"""
signal.signal(signal.SIGINT, self.request_stop)
signal.signal(signal.SIGTERM, self.request_stop)
def request_stop(self, signum=None, frame=None):
"""Toggle self._stop_requested that's checked on every loop"""
self._stop_requested = True
def heartbeat(self):
"""Updates the TTL on scheduler keys and the locks"""
logger.debug("Scheduler sending heartbeat to %s", ", ".join(self.acquired_locks))
if len(self._queue_names) > 1:
with self.connection.pipeline() as pipeline:
for name in self._queue_names:
key = self.get_locking_key(name)
pipeline.expire(key, self.interval + 5)
pipeline.execute()
else:
key = self.get_locking_key(next(iter(self._queue_names)))
self.connection.expire(key, self.interval + 5)
def stop(self):
logger.info("Scheduler stopping, releasing locks for %s...",
','.join(self._queue_names))
keys = [self.get_locking_key(name) for name in self._queue_names]
self.connection.delete(*keys)
self._status = self.Status.STOPPED
def start(self):
self._status = self.Status.STARTED
# Redis instance can't be pickled across processes so we need to
# clean this up before forking
self._connection = None
self._process = Process(target=run, args=(self,), name='Scheduler')
self._process.start()
return self._process
def work(self):
self._install_signal_handlers()
while True:
if self._stop_requested:
self.stop()
break
if self.should_reacquire_locks:
self.acquire_locks()
self.enqueue_scheduled_jobs()
self.heartbeat()
time.sleep(self.interval)
def run(scheduler):
logger.info("Scheduler for %s started with PID %s",
','.join(scheduler._queue_names), os.getpid())
try:
scheduler.work()
except: # noqa
logger.error(
'Scheduler [PID %s] raised an exception.\n%s',
os.getpid(), traceback.format_exc()
)
raise
logger.info("Scheduler with PID %s has stopped", os.getpid())
def parse_names(queues_or_names):
"""Given a list of strings or queues, returns queue names"""
names = []
for queue_or_name in queues_or_names:
if isinstance(queue_or_name, Queue):
names.append(queue_or_name.name)
else:
names.append(str(queue_or_name))
return names
|
program.py
|
"""
TODO: Preserve stderr
- https://stackoverflow.com/questions/31833897/python-read-from-subprocess-stdout-and-stderr-separately-while-preserving-order
- https://stackoverflow.com/questions/12270645/can-you-make-a-python-subprocess-output-stdout-and-stderr-as-usual-but-also-cap
"""
import io
import logging
import sys
from subprocess import Popen, PIPE, STDOUT
from threading import Thread
from typing import Union
from taro.jobs.execution import ExecutionState, ExecutionError, OutputExecution, ExecutionOutputObserver
USE_SHELL = False # For testing only
log = logging.getLogger(__name__)
class ProgramExecution(OutputExecution):
def __init__(self, args, read_output: bool):
self.args = args
self.read_output: bool = read_output
self._popen: Union[Popen, None] = None
self._status = None
self._stopped: bool = False
self._interrupted: bool = False
self._output_observers = []
@property
def is_async(self) -> bool:
return False
def execute(self) -> ExecutionState:
ret_code = -1
if not self._stopped and not self._interrupted:
stdout = PIPE if self.read_output else None
stderr = STDOUT if self.read_output else None
try:
self._popen = Popen(" ".join(self.args) if USE_SHELL else self.args, stdout=stdout, stderr=stderr,
shell=USE_SHELL)
output_reader = None
if self.read_output:
output_reader = Thread(target=self._read_output, name='Output-Reader', daemon=True)
output_reader.start()
# print(psutil.Process(self.popen.pid).memory_info().rss)
ret_code = self._popen.wait()
if output_reader:
output_reader.join(timeout=1)
if ret_code == 0:
return ExecutionState.COMPLETED
except KeyboardInterrupt:
return ExecutionState.STOPPED
except FileNotFoundError as e:
sys.stderr.write(str(e) + "\n")
raise ExecutionError(str(e), ExecutionState.FAILED) from e
except SystemExit as e:
raise ExecutionError('System exit', ExecutionState.INTERRUPTED) from e
if self._stopped:
return ExecutionState.STOPPED
if self._interrupted:
return ExecutionState.INTERRUPTED
raise ExecutionError("Process returned non-zero code " + str(ret_code), ExecutionState.FAILED)
@property
def status(self):
return self._status
def stop(self):
self._stopped = True
if self._popen:
self._popen.terminate()
def interrupt(self):
self._interrupted = True
if self._popen:
self._popen.terminate()
def add_output_observer(self, observer):
self._output_observers.append(observer)
def remove_output_observer(self, observer):
self._output_observers.remove(observer)
def _read_output(self):
for line in io.TextIOWrapper(self._popen.stdout, encoding="utf-8"):
line_stripped = line.rstrip()
self._status = line_stripped
print(line_stripped)
self._notify_output_observers(line_stripped)
def _notify_output_observers(self, output):
for observer in self._output_observers:
# noinspection PyBroadException
try:
if isinstance(observer, ExecutionOutputObserver):
observer.output_update(output)
elif callable(observer):
observer(output)
else:
log.warning("event=[unsupported_output_observer] observer=[%s]", observer)
except BaseException:
log.exception("event=[state_observer_exception]")
|
parallel.py
|
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import operator
import sys
from threading import Semaphore
from threading import Thread
from docker.errors import APIError
from six.moves import _thread as thread
from six.moves.queue import Empty
from six.moves.queue import Queue
from compose.cli.colors import green
from compose.cli.colors import red
from compose.cli.signals import ShutdownException
from compose.errors import HealthCheckFailed
from compose.errors import NoHealthCheckConfigured
from compose.errors import OperationFailedError
from compose.utils import get_output_stream
log = logging.getLogger(__name__)
STOP = object()
def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None):
"""Runs func on objects in parallel while ensuring that func is
ran on object only after it is ran on all its dependencies.
get_deps called on object must return a collection with its dependencies.
get_name called on object must return its name.
"""
objects = list(objects)
stream = get_output_stream(sys.stderr)
writer = ParallelStreamWriter(stream, msg)
for obj in objects:
writer.add_object(get_name(obj))
writer.write_initial()
events = parallel_execute_iter(objects, func, get_deps, limit)
errors = {}
results = []
error_to_reraise = None
for obj, result, exception in events:
if exception is None:
writer.write(get_name(obj), green('done'))
results.append(result)
elif isinstance(exception, APIError):
errors[get_name(obj)] = exception.explanation
writer.write(get_name(obj), red('error'))
elif isinstance(exception, (OperationFailedError, HealthCheckFailed, NoHealthCheckConfigured)):
errors[get_name(obj)] = exception.msg
writer.write(get_name(obj), red('error'))
elif isinstance(exception, UpstreamError):
writer.write(get_name(obj), red('error'))
else:
errors[get_name(obj)] = exception
error_to_reraise = exception
for obj_name, error in errors.items():
stream.write("\nERROR: for {} {}\n".format(obj_name, error))
if error_to_reraise:
raise error_to_reraise
return results, errors
def _no_deps(x):
return []
class State(object):
"""
Holds the state of a partially-complete parallel operation.
state.started: objects being processed
state.finished: objects which have been processed
state.failed: objects which either failed or whose dependencies failed
"""
def __init__(self, objects):
self.objects = objects
self.started = set()
self.finished = set()
self.failed = set()
def is_done(self):
return len(self.finished) + len(self.failed) >= len(self.objects)
def pending(self):
return set(self.objects) - self.started - self.finished - self.failed
class NoLimit(object):
def __enter__(self):
pass
def __exit__(self, *ex):
pass
def parallel_execute_iter(objects, func, get_deps, limit):
"""
Runs func on objects in parallel while ensuring that func is
ran on object only after it is ran on all its dependencies.
Returns an iterator of tuples which look like:
# if func returned normally when run on object
(object, result, None)
# if func raised an exception when run on object
(object, None, exception)
# if func raised an exception when run on one of object's dependencies
(object, None, UpstreamError())
"""
if get_deps is None:
get_deps = _no_deps
if limit is None:
limiter = NoLimit()
else:
limiter = Semaphore(limit)
results = Queue()
state = State(objects)
while True:
feed_queue(objects, func, get_deps, results, state, limiter)
try:
event = results.get(timeout=0.1)
except Empty:
continue
# See https://github.com/docker/compose/issues/189
except thread.error:
raise ShutdownException()
if event is STOP:
break
obj, _, exception = event
if exception is None:
log.debug('Finished processing: {}'.format(obj))
state.finished.add(obj)
else:
log.debug('Failed: {}'.format(obj))
state.failed.add(obj)
yield event
def producer(obj, func, results, limiter):
"""
The entry point for a producer thread which runs func on a single object.
Places a tuple on the results queue once func has either returned or raised.
"""
with limiter:
try:
result = func(obj)
results.put((obj, result, None))
except Exception as e:
results.put((obj, None, e))
def feed_queue(objects, func, get_deps, results, state, limiter):
"""
Starts producer threads for any objects which are ready to be processed
(i.e. they have no dependencies which haven't been successfully processed).
Shortcuts any objects whose dependencies have failed and places an
(object, None, UpstreamError()) tuple on the results queue.
"""
pending = state.pending()
log.debug('Pending: {}'.format(pending))
for obj in pending:
deps = get_deps(obj)
try:
if any(dep[0] in state.failed for dep in deps):
log.debug('{} has upstream errors - not processing'.format(obj))
results.put((obj, None, UpstreamError()))
state.failed.add(obj)
elif all(
dep not in objects or (
dep in state.finished and (not ready_check or ready_check(dep))
) for dep, ready_check in deps
):
log.debug('Starting producer thread for {}'.format(obj))
t = Thread(target=producer, args=(obj, func, results, limiter))
t.daemon = True
t.start()
state.started.add(obj)
except (HealthCheckFailed, NoHealthCheckConfigured) as e:
log.debug(
'Healthcheck for service(s) upstream of {} failed - '
'not processing'.format(obj)
)
results.put((obj, None, e))
if state.is_done():
results.put(STOP)
class UpstreamError(Exception):
pass
class ParallelStreamWriter(object):
"""Write out messages for operations happening in parallel.
Each operation has its own line, and ANSI code characters are used
to jump to the correct line, and write over the line.
"""
noansi = False
@classmethod
def set_noansi(cls, value=True):
cls.noansi = value
def __init__(self, stream, msg):
self.stream = stream
self.msg = msg
self.lines = []
self.width = 0
def add_object(self, obj_index):
self.lines.append(obj_index)
self.width = max(self.width, len(obj_index))
def write_initial(self):
if self.msg is None:
return
for line in self.lines:
self.stream.write("{} {:<{width}} ... \r\n".format(self.msg, line,
width=self.width))
self.stream.flush()
def _write_ansi(self, obj_index, status):
position = self.lines.index(obj_index)
diff = len(self.lines) - position
# move up
self.stream.write("%c[%dA" % (27, diff))
# erase
self.stream.write("%c[2K\r" % 27)
self.stream.write("{} {:<{width}} ... {}\r".format(self.msg, obj_index,
status, width=self.width))
# move back down
self.stream.write("%c[%dB" % (27, diff))
self.stream.flush()
def _write_noansi(self, obj_index, status):
self.stream.write("{} {:<{width}} ... {}\r\n".format(self.msg, obj_index,
status, width=self.width))
self.stream.flush()
def write(self, obj_index, status):
if self.msg is None:
return
if self.noansi:
self._write_noansi(obj_index, status)
else:
self._write_ansi(obj_index, status)
def parallel_operation(containers, operation, options, message):
parallel_execute(
containers,
operator.methodcaller(operation, **options),
operator.attrgetter('name'),
message,
)
def parallel_remove(containers, options):
stopped_containers = [c for c in containers if not c.is_running]
parallel_operation(stopped_containers, 'remove', options, 'Removing')
def parallel_pause(containers, options):
parallel_operation(containers, 'pause', options, 'Pausing')
def parallel_unpause(containers, options):
parallel_operation(containers, 'unpause', options, 'Unpausing')
def parallel_kill(containers, options):
parallel_operation(containers, 'kill', options, 'Killing')
|
HiwinRA605_socket_ros_test_20190626113918.py
|
#!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import enum
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
point_data_flag = False
arm_mode_flag = False
speed_mode_flag = False
Socket_sent_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##----------socket sent data flag-------------
def socket_client_sent_flag(Sent_flag):
global sent_feedback
rospy.wait_for_service('sent_flag')
try:
Sent_flag_client = rospy.ServiceProxy('sent_flag', sent_flag)
sent_feedback = Sent_flag_client(Sent_flag)
#pos_feedback_times = pos_feedback.response
return sent_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(req): ##接收策略端傳送位姿資料
global client_response,point_data_flag
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
point_data_flag = True
client_response = client_response + 1
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
arm_mode_flag = True
return(1)
##-------Arm Speed Mode------------###
def Speed_Mode(req): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = int('%s'%req.Speedmode)
speed_mode_flag = True
return(1)
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
def Socket_command():
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
s.send(data.encode('utf-8'))#socket傳送for python to translate str
##-----------socket client--------
def socket_client():
global Arm_feedback,data,Socket_sent_flag,arm_mode_flag
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(s.recv(1024))
#start_input=int(input('開始傳輸請按1,離開請按3 : ')) #輸入開始指令
start_input = 1
if start_input==1:
while 1:
##---------------socket 傳輸手臂命令-----------------
#if Arm_feedback == 0:
if arm_mode_flag == True:
arm_mode_flag = False
#-------選擇模式--------
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
s.send(data.encode('utf-8'))#socket傳送for python to translate str
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F 手臂為Ready狀態準備接收下一個運動指令
Arm_feedback = 0
socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T 手臂為忙碌狀態無法執行下一個運動指令
Arm_feedback = 1
socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
Arm_feedback = 6
socket_client_arm_state(Arm_feedback)
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
Socket_sent_flag = False
socket_client_sent_flag(Socket_sent_flag)
if str(feedback_str[4]) == '49':#回傳1 true
Socket_sent_flag = True
socket_client_sent_flag(Socket_sent_flag)
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
if start_input == 3:
pass
s.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line
|
thread_event.py
|
import threading
from time import sleep
"""
threading.Event: 'wait' for the same Event run 'set' function
threading.Thread: 'join' will blocking till the thread operation ending
"""
def test(n, event):
print('Thread %s is ready' % n)
event.wait()
print('Thread %s is running' % n)
def main():
event1 = threading.Event()
event2 = threading.Event()
# for i in range(2):
# th = threading.Thread(target=test, args=(i, event))
# th.start()
th1 = threading.Thread(target=test, args=(1, event1))
th2 = threading.Thread(target=test, args=(2, event2))
th1.start()
# th1.join() # 死锁
th2.start()
print('----- event is set -----')
event1.set()
sleep(3)
print('----- event is clear -----')
event1.clear()
if __name__ == '__main__':
main()
|
worker.py
|
# -*- coding: utf-8 -*-
import abc
import multiprocessing
import logging
import random
import threading
import time
from tinyq.exceptions import JobFailedError
from tinyq.job import Job
logger = logging.getLogger(__name__)
class BaseWorker(metaclass=abc.ABCMeta):
@abc.abstractmethod
def run_once(self):
pass
@abc.abstractmethod
def sleep(self):
pass
class SchedulerWorker(BaseWorker):
def __init__(self, schedule_queue, job_queue, sleep_interval=1):
self.queue = schedule_queue
self.job_queue = job_queue
self.sleep_interval = sleep_interval
def run_once(self):
try:
job = self._get_job()
if job is not None:
logger.debug('Schedule new job: {job}.'.format(job=job))
return self._schedule_job(job)
except:
logger.exception('Raise an exception when schedule job!')
def sleep(self):
time.sleep(self.sleep_interval * (1 + random.SystemRandom().random()))
def _get_job(self):
data = self.queue.dequeue()
if data is None:
return
return Job.loads(data)
def _schedule_job(self, job):
logger.debug('Put a new job({job}) into job queue.'.format(job=job))
return self.job_queue.enqueue(job.dumps())
class JobWorker(BaseWorker):
def __init__(self, job_queue, sleep_interval=1):
self.queue = job_queue
self.sleep_interval = sleep_interval
def run_once(self):
try:
job = self._get_job()
if job is not None:
logger.info('Got a job: {job}'.format(job=job))
result = self.run_job(job)
logger.info('Finish run job {job}'.format(job=job))
return result
except:
logger.exception('Raise an exception when run job!')
def sleep(self):
time.sleep(self.sleep_interval * (1 + random.SystemRandom().random()))
def _get_job(self):
data = self.queue.dequeue()
if data is None:
return
return Job.loads(data)
def run_job(self, job):
logger.debug('Start run a job: {job}'.format(job=job))
try:
result = job.run()
logger.debug('Run job({job!r}) success. Result: {result!r}'.format(
job=job, result=result))
return result
except JobFailedError as e:
logger.exception('Run job {job} failed!'.format(job=job))
class BaseWorkerCreator(metaclass=abc.ABCMeta):
@abc.abstractmethod
def create(self, runnable, name):
pass
@abc.abstractmethod
def set_stop(self):
pass
@abc.abstractmethod
def is_stopped(self):
pass
@abc.abstractmethod
def is_alive(self, process):
pass
class ThreadWorkerCreator(BaseWorkerCreator):
def __init__(self):
self.stop_flag = threading.Event()
def create(self, runnable, name):
thread = threading.Thread(target=runnable, name=name)
return thread
def set_stop(self):
self.stop_flag.set()
def is_stopped(self):
return self.stop_flag.is_set()
def is_alive(self, process):
return process.is_alive()
class ProcessWorkerCreator(BaseWorkerCreator):
def __init__(self):
self.stop_flag = multiprocessing.Event()
def create(self, runnable, name):
process = multiprocessing.Process(target=runnable, name=name)
return process
def set_stop(self):
self.stop_flag.set()
def is_stopped(self):
return self.stop_flag.is_set()
def is_alive(self, process):
return process.is_alive()
|
util.py
|
import sys
import time
from math import ceil
import threading
class ProgressBar():
def __init__(self, bar_length=40, slug='#', space='-', countdown=True):
self.bar_length = bar_length
self.slug = slug
self.space = space
self.countdown = countdown
self.start_time = None
self.start_parcent = 0
def bar(self, percent, end=1, tail=''):
percent = percent / end
if self.countdown == True:
progress = percent - self.start_parcent
if self.start_time == None:
self.start_time = time.perf_counter()
self.start_parcent = percent
remain = 'Remain --:--:--'
elif progress == 0:
remain = 'Remain --:--:--'
elif progress != 0:
elapsed_time = time.perf_counter() - self.start_time
progress = percent - self.start_parcent
remain_t = (elapsed_time / progress) * (1 - percent)
remain_t = ceil(remain_t)
h = remain_t // 3600
m = remain_t % 3600 // 60
s = remain_t % 60
remain = 'Remain %02d:%02d:%02d' % (h, m, s)
else:
remain = ''
len_slugs = int(percent * self.bar_length)
slugs = self.slug * len_slugs
spaces = self.space * (self.bar_length - len_slugs)
txt = '\r[{bar}] {percent:.1%} {remain} {tail}'.format(
bar=(slugs + spaces), percent=percent,
remain=remain, tail=tail)
if percent == 1:
txt += '\n'
self.start_time = None
sys.stdout.write(txt)
sys.stdout.flush()
class ProgressBar2(ProgressBar):
def __init__(self, end, bar_length=40, slug='#', space='-', countdown=True):
super().__init__(bar_length=40, slug='#', space='-', countdown=True)
self.counter = 0
self.end = end
self.bar()
def bar(self, tail=''):
super().bar(self.counter, end=self.end, tail='')
self.counter += 1
class Propeller:
def __init__(self, charlist=None, sleep=0.1):
if charlist is None:
self.charlist = ['|', '/', '-', '\\']
self.sleep = sleep
self.working = True
def progress(self):
N = len(self.charlist)
i = 0
sys.stdout.write(' ')
while self.working:
sys.stdout.write('\b' + self.charlist[i])
sys.stdout.flush()
time.sleep(self.sleep)
i = (i + 1) % N
sys.stdout.write('\b' + 'done\n')
sys.stdout.flush()
def start(self):
self.working = True
self.thread = threading.Thread(target=self.progress)
self.thread.start()
def end(self):
self.working = False
self.thread.join()
if __name__ == '__main__':
N = 100
pg = ProgressBar()
for n in range(N):
pg.bar(n, N)
time.sleep(0.02)
pg.bar(1)
N = 100
pg = ProgressBar2(N)
for n in range(N):
time.sleep(0.02)
pg.bar()
p = Propeller()
p.start()
time.sleep(3)
p.end()
|
pre_inject_for_ycsb.py
|
import sys
import redis
import random
import threading
# [start, end)
def thread_func(r: redis.StrictRedis, start, end):
for i in range(start, end):
key = "key_" + str(i)
val = "val_" + str(random.randint(0,9)) * random.randint(20, 2000)
r.set(name=key, value=val)
def _main():
if len(sys.argv) != 6:
print("argument number not correct, use python3 pre_inject_for_ycsb.py <ip> <port> <db> <key_num> <thread_num>")
return
ip = str(sys.argv[1])
port = str(sys.argv[2])
db = int(sys.argv[3])
key_num = int(sys.argv[4])
thread_num = int(sys.argv[5])
pool = redis.ConnectionPool(host=ip,
port=port,
db=db,
decode_responses=True,
encoding='utf-8',
socket_connect_timeout=2)
r = redis.StrictRedis(connection_pool=pool)
scope = int(key_num/thread_num)
start = 0
threads = []
for tid in range(0, thread_num):
if tid != thread_num - 1:
end = start + scope
else:
end = key_num
thread = threading.Thread(target=thread_func, args=(r, start, end))
thread.start()
threads.append(thread)
start = end
for t in threads:
t.join()
print(f"finish inject for ycsb test, key num = {key_num}")
if __name__ == '__main__':
_main()
|
arduino.py
|
import threading
import serial
import const
from helpers import d
s = None
stop_threads = False
def init_serial():
global s
s = serial.Serial(const.ARDUINO_PORT, const.ARDUINO_BAUDRATE, timeout=const.ARDUINO_TIMEOUT)
def set_default_value(key, val):
if key == 'COLOR_RED':
const.CURRENT_COLOR[0] = val
elif key == 'COLOR_GREEN':
const.CURRENT_COLOR[1] = val
elif key == 'COLOR_BLUE':
const.CURRENT_COLOR[2] = val
elif key == 'MODE':
const.CURRENT_MODE = val
elif key == 'BRIGHTNESS':
const.CURRENT_BRIGHTNESS = val
elif key == 'SPEED':
const.CURRENT_SPEED = val
elif key == 'MICROPHONE_MODE':
const.CURRENT_MICROPHONE_MODE = val
def parseLine(line):
if len(line) and line[0] == "@":
v = line[1:].split(": ")
set_default_value(v[0], int(v[1]))
d(line)
def receive():
global s
line = ""
while True:
if stop_threads:
break
for b in s.read():
if b == b"":
continue
c = chr(b)
if c == "\n":
parseLine(line)
line = ""
elif c != "\r":
line += c
def send(cmd):
d("> %s" % cmd)
s.write((cmd + "\n").encode())
def start_reading_loop():
t = threading.Thread(target=receive)
t.start()
send("v")
def stop():
global stop_threads
stop_threads = True
|
MMA8451ToMSeed.py
|
#!/usr/bin/python3
# Simple demo of fifo mode on the MMA8451.
# Author: Philip Crotwell
import time
import json
import struct
import queue
from threading import Thread
from datetime import datetime, timedelta, timezone
import sys, os, signal
import socket
from pathlib import Path
import asyncio
import traceback
import faulthandler
from multiprocessing import Process, Queue, Pipe
from peakACC import peakAccelerationCalculation, compareSendPeakAccel
faulthandler.enable()
#import logging
#logging.basicConfig(filename='xbee.log',level=logging.DEBUG)
# to generate fake data as PI99 set to True
doFake = False
doReplay = False
if doFake:
import fakeSensor
elif doReplay:
import fakeSensor
import replaySensor
else:
# real thing
import board
import busio
import RPi.GPIO as GPIO
import adafruit_mma8451
import simpleMiniseed
import simpleDali
import dataBuffer
import decimate
verbose = True
#daliHost="129.252.35.36"
daliHost="74.207.233.105"
daliPort=6382
dali=None
#daliUri = "wss://www.seis.sc.edu/dragracews/datalink"
daliUri = "wss://74.207.233.105/datalink"
pin = 18 # GPIO interrupt
#MAX_SAMPLES = 2000
MAX_SAMPLES = -1
#MAX_SAMPLES = 1
doDali = True
doMSeedDali = False
doArchive = True
doFIR = True
doMultiprocess = True
decimationFactor = 1
if doFIR:
decimationFactor = 2
quitOnError = True
if not doFake and not doReplay:
# Initialize I2C bus.
i2c = busio.I2C(board.SCL, board.SDA)
# Initialize MMA8451 module.
sensor = adafruit_mma8451.MMA8451(i2c)
# Optionally change the address if it's not the default:
#sensor = adafruit_mma8451.MMA8451(i2c, address=0x1C)
print("reset sensor")
sensor.reset()
print("remove gpio interrupt pin")
GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.remove_event_detect(pin)
else:
if doFake:
sensor = fakeSensor.FakeSensor()
else:
############################################
# values to change:
dataDir="Track_Data"
netGlob = "XX"
staGlob = "XB08"
locGlob = "00"
chanGlob = "HN[XYZ]"
# if need seconds add :%S
startTime = datetime.strptime("2018-10-14T11:00Z", "%Y-%m-%dT%H:%MZ").replace(tzinfo=timezone.utc)
duration = timedelta(hours=2)
staRemap = {
'XB02': 'PI02',
'XB03': 'PI03',
'XB05': 'PI05',
'XB08': 'PI05',
'XB10': 'PI10'
}
#elf.duration = timedelta(seconds=20)
repeat = True
# end values to change
############################################
replaySta = staRemap[staGlob]
sensor = replaySensor.ReplaySensor(dataDir, netGlob, staGlob, locGlob, chanGlob, startTime, staRemap, duration=None)
sensor.verbose = True
doFIR = False
adafruit_mma8451 = fakeSensor.FakeAdafruit()
# Optionally change the range from its default of +/-4G:
sensor.range = adafruit_mma8451.RANGE_2G # +/- 2G
#sensor.range = adafruit_mma8451.RANGE_4G # +/- 4G (default)
#sensor.range = adafruit_mma8451.RANGE_8G # +/- 8G
# Optionally change the data rate from its default of 800hz:
#sensor.data_rate = adafruit_mma8451.DATARATE_800HZ # 800Hz (default)
sensor.data_rate = adafruit_mma8451.DATARATE_400HZ # 400Hz
#sensor.data_rate = adafruit_mma8451.DATARATE_200HZ # 200Hz
#sensor.data_rate = adafruit_mma8451.DATARATE_100HZ # 100Hz
#sensor.data_rate = adafruit_mma8451.DATARATE_50HZ # 50Hz
#sensor.data_rate = adafruit_mma8451.DATARATE_12_5HZ # 12.5Hz
#sensor.data_rate = adafruit_mma8451.DATARATE_6_25HZ # 6.25Hz
#sensor.data_rate = adafruit_mma8451.DATARATE_1_56HZ # 1.56Hz
sta="UNKNW"
net = "XX"
loc = "00"
chanMap = { "X":"HNX", "Y":"HNY", "Z":"HNZ"}
decimateMap = {}
if doFIR:
if doMultiprocess:
decimateMap = {
"X":decimate.DecimateTwoSubprocess(),
"Y":decimate.DecimateTwoSubprocess(),
"Z":decimate.DecimateTwoSubprocess(),
}
else:
decimateMap = {
"X":decimate.DecimateTwo(),
"Y":decimate.DecimateTwo(),
"Z":decimate.DecimateTwo(),
}
establishedJson = None
maxWindow = timedelta(seconds=0.25)
theta = 0.0
alpha = 0.0
def getSps():
sps = 1
if sensor.data_rate == adafruit_mma8451.DATARATE_800HZ:
sps = 800
elif sensor.data_rate == adafruit_mma8451.DATARATE_400HZ:
sps = 400
elif sensor.data_rate == adafruit_mma8451.DATARATE_200HZ:
sps = 200
elif sensor.data_rate == adafruit_mma8451.DATARATE_100HZ:
sps = 100
elif sensor.data_rate == adafruit_mma8451.DATARATE_50HZ:
sps = 50
return sps
def getGain():
gain = 0
if sensor.range == adafruit_mma8451.RANGE_2G:
gain = 2
elif sensor.range == adafruit_mma8451.RANGE_4G:
gain = 4
elif sensor.range == adafruit_mma8451.RANGE_8G:
gain = 8
return gain
before = time.perf_counter()
after = before
totalSamples = 0
sentSamples = 0
loops = 0
keepGoing = True
curFile = None
curFilename = None
startACQ = False
sps = 0
gain = 0
def dataCallback(now, status, samplesAvail, data):
item = now, status, samplesAvail, data
if samplesAvail < 28:
print("callback {}".format(samplesAvail))
dataQueue.put(item)
def sending_worker():
try:
print("starting worker")
global keepGoing
global startACQ
global dataQueue
global sps
global token
global doDali
my_loop = asyncio.new_event_loop()
asyncio.set_event_loop(my_loop)
my_loop.set_debug(True)
dali = None
if doDali:
print("try to init dali")
dali = initDali(daliUri)
dali.verbose = False
programname="MMA8451ToMseed"
username="me"
processid="0"
architecture="python"
task = my_loop.create_task(dali.id( programname, username, processid, architecture))
my_loop.run_until_complete(task)
r = task.result()
print("id respones {}".format(r))
if token:
authTask = my_loop.create_task(authorize(dali, token))
my_loop.run_until_complete(authTask)
if doMSeedDali:
for key, chan in chanMap.items():
miniseedBuffers[chan].dali = dali
while keepGoing:
try:
item = dataQueue.get(timeout=2)
if item is None or not keepGoing:
print("Worker exiting")
dataQueue.task_done()
break
now, status, samplesAvail, data = item
try:
do_work( now, status, samplesAvail, data, dali)
dataQueue.task_done()
except Exception as err:
# try once more?
#do_work(now, status, samplesAvail, data)
print("Exception sending packet: {}".format(err), file=sys.stderr)
traceback.print_exc()
if dali:
# if at first you don't suceed, try try again
time.sleep(3)
task = my_loop.create_task(dali.reconnect())
my_loop.run_until_complete(task)
r = task.result()
print("reconnect respones {}".format(r))
try:
do_work( now, status, samplesAvail, data)
dataQueue.task_done()
except Exception as err:
dataQueue.task_done()
print("2nd Exception sending packet: {}".format(err), file=sys.stderr)
traceback.print_exc()
if quitOnError:
keepGoing = False
except queue.Empty:
print("no data in queue??? startACQ={0:b}".format(startACQ))
except Exception as err:
keepGoing = False
print("send thread fail on {}".format(err), file=sys.stderr)
traceback.print_exc()
print("Worker exited")
if (dali != None):
closeTask = loop.create_task(dali.close())
loop.run_until_complete(closeTask)
cleanUp()
asyncio.get_event_loop().close()
def do_work(now, status, samplesAvail, data, dali):
global startACQ
global totalSamples
global sentSamples
global loops
global before
global pin
global keepGoing
global after
global sps
loops += 1
#print("status: {0:d} {0:08b} samps: {1:d} len(data): {2:d} queue: {3:d}".format(status, samplesAvail, len(data), dataQueue.qsize()))
if status >>7 != 0:
print("overflow at loops={0:d}".format(loops))
if len(data) < samplesAvail:
raise Exception("Not enough samples avail: len={:d}, sampsAvail={:d}".format(len(data), samplesAvail))
sendResult = None
if samplesAvail != 0:
sendResult = sendToMseed(now, status, samplesAvail, data, dali)
totalSamples += samplesAvail
if (MAX_SAMPLES != -1 and totalSamples > MAX_SAMPLES):
after = time.perf_counter()
delta = after-before
print("time take for {0:d} loops is {1:3.2f}, {2:d} samples at {3:3.2f} sps, nomSps={4:d}".format(loops, delta, totalSamples, (totalSamples-1)/delta, getSps()))
sensor.reset()
GPIO.remove_event_detect(pin)
keepGoing = False
return sendResult
def getDali():
global daliUri
global dali, doDali
if (doDali and dali is None):
dali = initDali(daliUri)
return dali
def sendToFile(now, dataPacket):
global curFile
global curFilename
filename = now.strftime("data_%Y-%m-%d_%H")
if curFilename != filename:
if curFile != None:
curFile.close()
curFile = open(filename, "ab")
curFile.write(dataPacket)
return "write to {}".format(filename)
def sendToMseed(last_sample_time, status, samplesAvail, data, dali):
global staString
global sta
global net
global loc
global chanMap
global establishedJson
global maxWindow
global theta
global alpha
global sps
global firDelay
if samplesAvail <= 0:
# no data???
return
dataIdx = 0
start = last_sample_time - timedelta(seconds=1.0*(samplesAvail-1)/sps)
xData, yData, zData = sensor.demux(data)
if doFIR:
start = start - firDelay
if doMultiprocess:
decimateMap["X"].process(xData)
decimateMap["Y"].process(yData)
decimateMap["Z"].process(zData)
xData = decimateMap["X"].results()
yData = decimateMap["Y"].results()
zData = decimateMap["Z"].results()
else:
xData = decimateMap["X"].process(xData)
yData = decimateMap["Y"].process(yData)
zData = decimateMap["Z"].process(zData)
freshJson = peakAccelerationCalculation(xData,yData,zData,theta,alpha,sta,start,last_sample_time,gain)
establishedJson = compareSendPeakAccel(establishedJson, freshJson, dali, maxWindow)
miniseedBuffers[chanMap["Z"]].push(start, zData)
miniseedBuffers[chanMap["Y"]].push(start, yData)
miniseedBuffers[chanMap["X"]].push(start, xData)
if verbose:
print("sendToMseed {} {} {}".format(sta, start, len(xData)))
def initDali(daliUri):
print("Init Dali at {0}".format(daliUri))
#dl = simpleDali.WebSocketDataLink(daliUri)
dl = simpleDali.SocketDataLink(daliHost, daliPort)
return dl
async def authorize(daliUpload, token):
global keepGoing
if token and daliUpload:
authResp = await daliUpload.auth(token)
print("auth: {}".format(authResp))
if authResp.type == 'ERROR':
print("AUTHORIZATION failed, quiting...")
keepGoing = False
raise Exception("AUTHORIZATION failed, {} {}".format(authResp.type, authResp.message))
def getLocalHostname():
if not doFake:
hostname = socket.gethostname().split('.')[0]
else:
hostname = 'PI99'
#with open("/etc/hostname") as hF:
# hostname = hF.read()
return hostname.strip()
def handleSignal(sigNum, stackFrame):
print("############ handleSignal {} ############".format(sigNum))
doQuit()
def doQuit():
global keepGoing
if keepGoing:
keepGoing = False
else:
sys.exit(0)
def cleanUp():
global keepGoing
keepGoing = False
if sensor is not None:
if not doFake:
print("remove gpio interrupt pin")
GPIO.remove_event_detect(pin)
print("reset sensor")
sensor.reset()
for key in miniseedBuffers:
try:
miniseedBuffers[key].close()
except Exception as err:
print(err)
def busyWaitStdInReader():
global keepGoing
while keepGoing:
line = sys.stdin.readline()
if (line.startswith("q")):
keepGoing = False
break
time.sleep(1.0)
async def getConfig():
# FIX... only gets one packet and then stops listening
configDali = initDali(daliUri)
await configDali.match("/ZMAXCFG")
await configDali.positionAfter(simpleDali.utcnowWithTz()-timedelta(seconds=90))
await configDali.stream()
while(True):
print("wait for packets")
peakPacket = await configDali.parseResponse()
print("got a packet: {}".format(peakPacket.streamId,))
if peakPacket.streamId.endswith("ZMAXCFG"):
config = json.loads(peakPacket.data.decode("'UTF-8'"))
await configDali.close()
return config
signal.signal(signal.SIGTERM, handleSignal)
signal.signal(signal.SIGINT, handleSignal)
hostname = getLocalHostname()[0:5].upper()
sta = hostname
if doReplay:
hostname = replaySta
print("set station code to {}".format(sta))
sps = getSps()
firDelay = decimate.DecimateTwo().FIR.calcDelay(sps)
gain = getGain()
if doDali:
try:
loop = asyncio.get_event_loop()
print("Try to get station from config via dali")
configTask = loop.create_task(getConfig())
loop.run_until_complete(configTask)
config = configTask.result()
if hostname in config["Loc"] and config["Loc"][hostname] != "NO":
sta = config["Loc"][hostname]
theta = config["LocInfo"][sta]["Angles"]["Theta"]
alpha = config["LocInfo"][sta]["Angles"]["Alpha"]
print("set station code from config to {}".format(sta))
print("from config Theta={}".format(theta))
print("from config Alpha={}".format(alpha))
else:
print("host not in config, keep default name {}".format(sta))
token=None
# load token
## with open("pi_token.jwt") as f:
# token = f.readline().strip()
print("init DataLink at {0}".format(daliUri))
except ValueError as err:
raise Exception("Unable to init DataLink at {0} {2}".format(daliUri, err))
miniseedBuffers = dict()
for key, chan in chanMap.items():
miniseedBuffers[chan] = dataBuffer.DataBuffer(net, sta, loc, chan,
getSps()/decimationFactor, archive=doArchive,
encoding=simpleMiniseed.ENC_SHORT, dali=dali,
continuityFactor=5, verbose=False)
stdinThread = Thread(target = busyWaitStdInReader)
stdinThread.daemon=True
print("stdinThread start")
stdinThread.start()
dataQueue = queue.Queue()
print("before create thead")
sendThread = Thread(target = sending_worker)
sendThread.daemon=True
print("thread start")
sendThread.start()
time.sleep(1)
print("after thread start sleep")
try:
print('task creation started')
before = time.perf_counter()
sensor.enableFifoBuffer(28, pin, dataCallback)
while keepGoing:
time.sleep(0.1)
print("before sendThread.join()")
sendThread.join()
print("after sendThread.join()")
finally:
doQuit()
print("main finally")
#cleanUp()
print("Main thread done")
|
threading_event.py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import threading
import time
event = threading.Event()
def trafficlight():
count = 0
while True:
if count < 5:
event.set()
print('=======green========')
elif count >= 5 and count < 10:
event.clear()
print('=======red========')
else:
count = 0
continue
time.sleep(1)
count += 1
def car(name):
while True:
if event.is_set():
print('[%s] running' % name)
time.sleep(1)
else:
print('[%s] waiting' % name)
# 等待event设置
event.wait()
if __name__ == '__main__':
light = threading.Thread(target=trafficlight,)
light.start()
c = threading.Thread(target=car, args=('Tesla',))
c.start()
|
rbssh.py
|
#!/usr/bin/env python
#
# rbssh.py -- A custom SSH client for use in Review Board.
#
# This is used as an ssh replacement that can be used across platforms with
# a custom .ssh directory. OpenSSH doesn't respect $HOME, instead reading
# /etc/passwd directly, which causes problems for us. Using rbssh, we can
# work around this.
#
#
# Copyright (c) 2010-2011 Beanbag, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import unicode_literals
import getpass
import logging
import os
import select
import sys
import warnings
from optparse import OptionParser
if str('RBSITE_PYTHONPATH') in os.environ:
for path in reversed(os.environ[str('RBSITE_PYTHONPATH')].split(str(':'))):
sys.path.insert(1, path)
import django
import paramiko
from django.utils import six
from reviewboard import initialize, get_version_string
from reviewboard.scmtools.core import SCMTool
from reviewboard.ssh.client import SSHClient
DEBUG = os.getenv('DEBUG_RBSSH')
DEBUG_LOGDIR = os.getenv('RBSSH_LOG_DIR')
SSH_PORT = 22
options = None
class PlatformHandler(object):
"""A generic base class for wrapping platform-specific operations.
This should be subclassed for each major platform.
"""
def __init__(self, channel):
"""Initialize the handler."""
self.channel = channel
if six.PY3:
self.write_stdout = sys.stdout.buffer.write
self.write_stderr = sys.stderr.buffer.write
else:
self.write_stdout = sys.stdout.write
self.write_stderr = sys.stderr.write
def shell(self):
"""Open a shell."""
raise NotImplementedError
def transfer(self):
"""Transfer data over the channel."""
raise NotImplementedError
def process_channel(self, channel):
"""Process the given channel."""
if channel.closed:
return False
logging.debug('!! process_channel\n')
if channel.recv_ready():
data = channel.recv(4096)
if not data:
logging.debug('!! stdout empty\n')
return False
self.write_stdout(data)
sys.stdout.flush()
if channel.recv_stderr_ready():
data = channel.recv_stderr(4096)
if not data:
logging.debug('!! stderr empty\n')
return False
self.write_stderr(data)
sys.stderr.flush()
if channel.exit_status_ready():
logging.debug('!!! exit_status_ready\n')
return False
return True
def process_stdin(self, channel):
"""Read data from stdin and send it over the channel."""
logging.debug('!! process_stdin\n')
try:
buf = os.read(sys.stdin.fileno(), 1)
except OSError:
buf = None
if not buf:
logging.debug('!! stdin empty\n')
return False
channel.send(buf)
return True
class PosixHandler(PlatformHandler):
"""A platform handler for POSIX-type platforms."""
def shell(self):
"""Open a shell."""
import termios
import tty
oldtty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
self.handle_communications()
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
def transfer(self):
"""Transfer data over the channel."""
import fcntl
fd = sys.stdin.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
self.handle_communications()
def handle_communications(self):
"""Handle any pending data over the channel or stdin."""
while True:
rl, wl, el = select.select([self.channel, sys.stdin], [], [])
if self.channel in rl:
if not self.process_channel(self.channel):
break
if sys.stdin in rl:
if not self.process_stdin(self.channel):
self.channel.shutdown_write()
break
class WindowsHandler(PlatformHandler):
"""A platform handler for Microsoft Windows platforms."""
def shell(self):
"""Open a shell."""
self.handle_communications()
def transfer(self):
"""Transfer data over the channel."""
self.handle_communications()
def handle_communications(self):
"""Handle any pending data over the channel or stdin."""
import threading
logging.debug('!! begin_windows_transfer\n')
self.channel.setblocking(0)
def writeall(channel):
while self.process_channel(channel):
pass
logging.debug('!! Shutting down reading\n')
channel.shutdown_read()
writer = threading.Thread(target=writeall, args=(self.channel,))
writer.start()
try:
while self.process_stdin(self.channel):
pass
except EOFError:
pass
logging.debug('!! Shutting down writing\n')
self.channel.shutdown_write()
def print_version(option, opt, value, parser):
"""Print the current version and exit."""
parser.print_version()
sys.exit(0)
def parse_options(args):
"""Parse the given arguments into the global ``options`` dictionary."""
global options
hostname = None
parser = OptionParser(usage='%prog [options] [user@]hostname [command]',
version='%prog ' + get_version_string())
parser.disable_interspersed_args()
parser.add_option('-l',
dest='username', metavar='USERNAME', default=None,
help='the user to log in as on the remote machine')
parser.add_option('-p', '--port',
type='int', dest='port', metavar='PORT', default=None,
help='the port to connect to')
parser.add_option('-q', '--quiet',
action='store_true', dest='quiet', default=False,
help='suppress any unnecessary output')
parser.add_option('-s',
dest='subsystem', metavar='SUBSYSTEM', default=None,
nargs=2,
help='the subsystem to use (ssh or sftp)')
parser.add_option('-V',
action='callback', callback=print_version,
help='display the version information and exit')
parser.add_option('--rb-disallow-agent',
action='store_false', dest='allow_agent',
default=os.getenv('RBSSH_ALLOW_AGENT') != '0',
help='disable using the SSH agent for authentication')
parser.add_option('--rb-local-site',
dest='local_site_name', metavar='NAME',
default=os.getenv('RB_LOCAL_SITE'),
help='the local site name containing the SSH keys to '
'use')
(options, args) = parser.parse_args(args)
if options.subsystem:
if len(options.subsystem) != 2:
parser.error('-s requires a hostname and a valid subsystem')
elif options.subsystem[1] not in ('sftp', 'ssh'):
parser.error('Invalid subsystem %s' % options.subsystem[1])
hostname, options.subsystem = options.subsystem
if len(args) == 0 and not hostname:
parser.print_help()
sys.exit(1)
if not hostname:
hostname = args[0]
args = args[1:]
if options.port:
port = options.port
else:
port = SSH_PORT
return hostname, port, args
def main():
"""Run the application."""
# We don't want any warnings to end up impacting output.
warnings.simplefilter('ignore')
if DEBUG:
pid = os.getpid()
log_filename = 'rbssh-%s.log' % pid
if DEBUG_LOGDIR:
log_path = os.path.join(DEBUG_LOGDIR, log_filename)
else:
log_path = log_filename
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-18s %(levelname)-8s '
'%(message)s',
datefmt='%m-%d %H:%M',
filename=log_path,
filemode='w')
logging.debug('%s' % sys.argv)
logging.debug('PID %s' % pid)
initialize(load_extensions=False,
setup_logging=False,
setup_templates=False)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(logging.Formatter('%(message)s'))
ch.addFilter(logging.Filter('root'))
logging.getLogger('').addHandler(ch)
path, port, command = parse_options(sys.argv[1:])
if '://' not in path:
path = 'ssh://' + path
username, hostname = SCMTool.get_auth_from_uri(path, options.username)
if username is None:
username = getpass.getuser()
logging.debug('!!! %s, %s, %s' % (hostname, username, command))
client = SSHClient(namespace=options.local_site_name)
client.set_missing_host_key_policy(paramiko.WarningPolicy())
attempts = 0
password = None
key = client.get_user_key()
while True:
try:
client.connect(hostname, port, username=username,
password=password, pkey=key,
allow_agent=options.allow_agent)
break
except paramiko.AuthenticationException as e:
if attempts == 3 or not sys.stdin.isatty():
logging.error('Too many authentication failures for %s' %
username)
sys.exit(1)
attempts += 1
password = getpass.getpass("%s@%s's password: " %
(username, hostname))
except paramiko.SSHException as e:
logging.error('Error connecting to server: %s' % e)
sys.exit(1)
except Exception as e:
logging.error('Unknown exception during connect: %s (%s)' %
(e, type(e)))
sys.exit(1)
transport = client.get_transport()
channel = transport.open_session()
if sys.platform in ('cygwin', 'win32'):
logging.debug('!!! Using WindowsHandler')
handler = WindowsHandler(channel)
else:
logging.debug('!!! Using PosixHandler')
handler = PosixHandler(channel)
if options.subsystem == 'sftp':
logging.debug('!!! Invoking sftp subsystem')
channel.invoke_subsystem('sftp')
handler.transfer()
elif command:
logging.debug('!!! Sending command %s' % command)
channel.exec_command(' '.join(command))
handler.transfer()
else:
logging.debug('!!! Opening shell')
channel.get_pty()
channel.invoke_shell()
handler.shell()
logging.debug('!!! Done')
status = channel.recv_exit_status()
client.close()
return status
if __name__ == '__main__':
main()
# ... with blackjack, and hookers.
|
test_sync_clients.py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
import logging
import threading
import time
import os
import io
import six
from azure.iot.device.iothub import IoTHubDeviceClient, IoTHubModuleClient
from azure.iot.device.iothub.pipeline import IoTHubPipeline, constant
from azure.iot.device.iothub.models import Message, MethodRequest
from azure.iot.device.iothub.sync_inbox import SyncClientInbox, InboxEmpty
from azure.iot.device.iothub.auth import IoTEdgeError
import azure.iot.device.iothub.sync_clients as sync_clients
logging.basicConfig(level=logging.INFO)
################
# SHARED TESTS #
################
class SharedClientInstantiationTests(object):
@pytest.mark.it(
"Stores the IoTHubPipeline from the 'iothub_pipeline' parameter in the '_iothub_pipeline' attribute"
)
def test_iothub_pipeline_attribute(self, client_class, iothub_pipeline):
client = client_class(iothub_pipeline)
assert client._iothub_pipeline is iothub_pipeline
@pytest.mark.it("Sets on_connected handler in the IoTHubPipeline")
def test_sets_on_connected_handler_in_pipeline(self, client_class, iothub_pipeline):
client = client_class(iothub_pipeline)
assert client._iothub_pipeline.on_connected is not None
assert client._iothub_pipeline.on_connected == client._on_connected
@pytest.mark.it("Sets on_disconnected handler in the IoTHubPipeline")
def test_sets_on_disconnected_handler_in_pipeline(self, client_class, iothub_pipeline):
client = client_class(iothub_pipeline)
assert client._iothub_pipeline.on_disconnected is not None
assert client._iothub_pipeline.on_disconnected == client._on_disconnected
@pytest.mark.it("Sets on_method_request_received handler in the IoTHubPipeline")
def test_sets_on_method_request_received_handler_in_pipleline(
self, client_class, iothub_pipeline
):
client = client_class(iothub_pipeline)
assert client._iothub_pipeline.on_method_request_received is not None
assert (
client._iothub_pipeline.on_method_request_received
== client._inbox_manager.route_method_request
)
class SharedClientCreateFromConnectionStringTests(object):
@pytest.mark.it(
"Uses the connection string and CA certificate combination to create a SymmetricKeyAuthenticationProvider"
)
@pytest.mark.parametrize(
"ca_cert",
[
pytest.param(None, id="No CA certificate"),
pytest.param("some-certificate", id="With CA certificate"),
],
)
def test_auth_provider_creation(self, mocker, client_class, connection_string, ca_cert):
mock_auth_parse = mocker.patch(
"azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider"
).parse
args = (connection_string,)
kwargs = {}
if ca_cert:
kwargs["ca_cert"] = ca_cert
client_class.create_from_connection_string(*args, **kwargs)
assert mock_auth_parse.call_count == 1
assert mock_auth_parse.call_args == mocker.call(connection_string)
assert mock_auth_parse.return_value.ca_cert is ca_cert
@pytest.mark.it("Uses the SymmetricKeyAuthenticationProvider to create an IoTHubPipeline")
@pytest.mark.parametrize(
"ca_cert",
[
pytest.param(None, id="No CA certificate"),
pytest.param("some-certificate", id="With CA certificate"),
],
)
def test_pipeline_creation(self, mocker, client_class, connection_string, ca_cert):
mock_auth = mocker.patch(
"azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider"
).parse.return_value
mock_pipeline_init = mocker.patch("azure.iot.device.iothub.pipeline.IoTHubPipeline")
args = (connection_string,)
kwargs = {}
if ca_cert:
kwargs["ca_cert"] = ca_cert
client_class.create_from_connection_string(*args, **kwargs)
assert mock_pipeline_init.call_count == 1
assert mock_pipeline_init.call_args == mocker.call(mock_auth)
@pytest.mark.it("Uses the IoTHubPipeline to instantiate the client")
@pytest.mark.parametrize(
"ca_cert",
[
pytest.param(None, id="No CA certificate"),
pytest.param("some-certificate", id="With CA certificate"),
],
)
def test_client_instantiation(self, mocker, client_class, connection_string, ca_cert):
mock_pipeline = mocker.patch("azure.iot.device.iothub.pipeline.IoTHubPipeline").return_value
spy_init = mocker.spy(client_class, "__init__")
args = (connection_string,)
kwargs = {}
if ca_cert:
kwargs["ca_cert"] = ca_cert
client_class.create_from_connection_string(*args, **kwargs)
assert spy_init.call_count == 1
assert spy_init.call_args == mocker.call(mocker.ANY, mock_pipeline)
@pytest.mark.it("Returns the instantiated client")
@pytest.mark.parametrize(
"ca_cert",
[
pytest.param(None, id="No CA certificate"),
pytest.param("some-certificate", id="With CA certificate"),
],
)
def test_returns_client(self, client_class, connection_string, ca_cert):
args = (connection_string,)
kwargs = {}
if ca_cert:
kwargs["ca_cert"] = ca_cert
client = client_class.create_from_connection_string(*args, **kwargs)
assert isinstance(client, client_class)
# TODO: If auth package was refactored to use ConnectionString class, tests from that
# class would increase the coverage here.
@pytest.mark.it("Raises ValueError when given an invalid connection string")
@pytest.mark.parametrize(
"bad_cs",
[
pytest.param("not-a-connection-string", id="Garbage string"),
pytest.param(object(), id="Non-string input"),
pytest.param(
"HostName=Invalid;DeviceId=Invalid;SharedAccessKey=Invalid",
id="Malformed Connection String",
marks=pytest.mark.xfail(reason="Bug in pipeline + need for auth refactor"), # TODO
),
],
)
def test_raises_value_error_on_bad_connection_string(self, client_class, bad_cs):
with pytest.raises(ValueError):
client_class.create_from_connection_string(bad_cs)
class SharedClientCreateFromSharedAccessSignature(object):
@pytest.mark.it("Uses the SAS token to create a SharedAccessSignatureAuthenticationProvider")
def test_auth_provider_creation(self, mocker, client_class, sas_token_string):
mock_auth_parse = mocker.patch(
"azure.iot.device.iothub.auth.SharedAccessSignatureAuthenticationProvider"
).parse
client_class.create_from_shared_access_signature(sas_token_string)
assert mock_auth_parse.call_count == 1
assert mock_auth_parse.call_args == mocker.call(sas_token_string)
@pytest.mark.it(
"Uses the SharedAccessSignatureAuthenticationProvider to create an IoTHubPipeline"
)
def test_pipeline_creation(self, mocker, client_class, sas_token_string):
mock_auth = mocker.patch(
"azure.iot.device.iothub.auth.SharedAccessSignatureAuthenticationProvider"
).parse.return_value
mock_pipeline_init = mocker.patch("azure.iot.device.iothub.pipeline.IoTHubPipeline")
client_class.create_from_shared_access_signature(sas_token_string)
assert mock_pipeline_init.call_count == 1
assert mock_pipeline_init.call_args == mocker.call(mock_auth)
@pytest.mark.it("Uses the IoTHubPipeline to instantiate the client")
def test_client_instantiation(self, mocker, client_class, sas_token_string):
mock_pipeline = mocker.patch("azure.iot.device.iothub.pipeline.IoTHubPipeline").return_value
spy_init = mocker.spy(client_class, "__init__")
client_class.create_from_shared_access_signature(sas_token_string)
assert spy_init.call_count == 1
assert spy_init.call_args == mocker.call(mocker.ANY, mock_pipeline)
@pytest.mark.it("Returns the instantiated client")
def test_returns_client(self, mocker, client_class, sas_token_string):
client = client_class.create_from_shared_access_signature(sas_token_string)
assert isinstance(client, client_class)
# TODO: If auth package was refactored to use SasToken class, tests from that
# class would increase the coverage here.
@pytest.mark.it("Raises ValueError when given an invalid SAS token")
@pytest.mark.parametrize(
"bad_sas",
[
pytest.param(object(), id="Non-string input"),
pytest.param(
"SharedAccessSignature sr=Invalid&sig=Invalid&se=Invalid", id="Malformed SAS token"
),
],
)
def test_raises_value_error_on_bad_sas_token(self, client_class, bad_sas):
with pytest.raises(ValueError):
client_class.create_from_shared_access_signature(bad_sas)
class WaitsForEventCompletion(object):
def add_event_completion_checks(self, mocker, pipeline_function, args=[], kwargs={}):
event_init_mock = mocker.patch.object(threading, "Event")
event_mock = event_init_mock.return_value
def check_callback_completes_event():
# Assert exactly one Event was instantiated so we know the following asserts
# are related to the code under test ONLY
assert event_init_mock.call_count == 1
# Assert waiting for Event to complete
assert event_mock.wait.call_count == 1
assert event_mock.set.call_count == 0
# Manually trigger callback
cb = pipeline_function.call_args[1]["callback"]
cb(*args, **kwargs)
# Assert Event is now completed
assert event_mock.set.call_count == 1
event_mock.wait.side_effect = check_callback_completes_event
class SharedClientConnectTests(WaitsForEventCompletion):
@pytest.mark.it("Begins a 'connect' pipeline operation")
def test_calls_pipeline_connect(self, client, iothub_pipeline):
client.connect()
assert iothub_pipeline.connect.call_count == 1
@pytest.mark.it("Waits for the completion of the 'connect' pipeline operation before returning")
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, iothub_pipeline_manual_cb
):
self.add_event_completion_checks(
mocker=mocker, pipeline_function=iothub_pipeline_manual_cb.connect
)
client_manual_cb.connect()
class SharedClientDisconnectTests(WaitsForEventCompletion):
@pytest.mark.it("Begins a 'disconnect' pipeline operation")
def test_calls_pipeline_disconnect(self, client, iothub_pipeline):
client.disconnect()
assert iothub_pipeline.disconnect.call_count == 1
@pytest.mark.it(
"Waits for the completion of the 'disconnect' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, iothub_pipeline_manual_cb
):
self.add_event_completion_checks(
mocker=mocker, pipeline_function=iothub_pipeline_manual_cb.disconnect
)
client_manual_cb.disconnect()
class SharedClientDisconnectEventTests(object):
@pytest.mark.it("Clears all pending MethodRequests upon disconnect")
def test_state_change_handler_clears_method_request_inboxes_on_disconnect(self, client, mocker):
clear_method_request_spy = mocker.spy(client._inbox_manager, "clear_all_method_requests")
client._on_disconnected()
assert clear_method_request_spy.call_count == 1
class SharedClientSendD2CMessageTests(WaitsForEventCompletion):
@pytest.mark.it("Begins a 'send_message' IoTHubPipeline operation")
def test_calls_pipeline_send_message(self, client, iothub_pipeline, message):
client.send_message(message)
assert iothub_pipeline.send_message.call_count == 1
assert iothub_pipeline.send_message.call_args[0][0] is message
@pytest.mark.it(
"Waits for the completion of the 'send_message' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, iothub_pipeline_manual_cb, message
):
self.add_event_completion_checks(
mocker=mocker, pipeline_function=iothub_pipeline_manual_cb.send_message
)
client_manual_cb.send_message(message)
@pytest.mark.it(
"Wraps 'message' input parameter in a Message object if it is not a Message object"
)
@pytest.mark.parametrize(
"message_input",
[
pytest.param("message", id="String input"),
pytest.param(222, id="Integer input"),
pytest.param(object(), id="Object input"),
pytest.param(None, id="None input"),
pytest.param([1, "str"], id="List input"),
pytest.param({"a": 2}, id="Dictionary input"),
],
)
def test_wraps_data_in_message_and_calls_pipeline_send_message(
self, client, iothub_pipeline, message_input
):
client.send_message(message_input)
assert iothub_pipeline.send_message.call_count == 1
sent_message = iothub_pipeline.send_message.call_args[0][0]
assert isinstance(sent_message, Message)
assert sent_message.data == message_input
class SharedClientReceiveMethodRequestTests(object):
@pytest.mark.it("Implicitly enables methods feature if not already enabled")
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
def test_enables_methods_only_if_not_already_enabled(
self, mocker, client, iothub_pipeline, method_name
):
mocker.patch.object(SyncClientInbox, "get") # patch this receive_method_request won't block
# Verify Input Messaging enabled if not enabled
iothub_pipeline.feature_enabled.__getitem__.return_value = (
False
) # Method Requests will appear disabled
client.receive_method_request(method_name)
assert iothub_pipeline.enable_feature.call_count == 1
assert iothub_pipeline.enable_feature.call_args[0][0] == constant.METHODS
iothub_pipeline.enable_feature.reset_mock()
# Verify Input Messaging not enabled if already enabled
iothub_pipeline.feature_enabled.__getitem__.return_value = (
True
) # Input Messages will appear enabled
client.receive_method_request(method_name)
assert iothub_pipeline.enable_feature.call_count == 0
@pytest.mark.it(
"Returns a MethodRequest from the generic method inbox, if available, when called without method name"
)
def test_called_without_method_name_returns_method_request_from_generic_method_inbox(
self, mocker, client
):
request = MethodRequest(request_id="1", name="some_method", payload={"key": "value"})
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
inbox_mock.get.return_value = request
manager_get_inbox_mock = mocker.patch.object(
target=client._inbox_manager,
attribute="get_method_request_inbox",
return_value=inbox_mock,
)
received_request = client.receive_method_request()
assert manager_get_inbox_mock.call_count == 1
assert manager_get_inbox_mock.call_args == mocker.call(None)
assert inbox_mock.get.call_count == 1
assert received_request is received_request
@pytest.mark.it(
"Returns MethodRequest from the corresponding method inbox, if available, when called with a method name"
)
def test_called_with_method_name_returns_method_request_from_named_method_inbox(
self, mocker, client
):
method_name = "some_method"
request = MethodRequest(request_id="1", name=method_name, payload={"key": "value"})
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
inbox_mock.get.return_value = request
manager_get_inbox_mock = mocker.patch.object(
target=client._inbox_manager,
attribute="get_method_request_inbox",
return_value=inbox_mock,
)
received_request = client.receive_method_request(method_name)
assert manager_get_inbox_mock.call_count == 1
assert manager_get_inbox_mock.call_args == mocker.call(method_name)
assert inbox_mock.get.call_count == 1
assert received_request is received_request
@pytest.mark.it("Can be called in various modes")
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_method_request_can_be_called_in_mode(
self, mocker, client, block, timeout, method_name
):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
target=client._inbox_manager,
attribute="get_method_request_inbox",
return_value=inbox_mock,
)
client.receive_method_request(method_name=method_name, block=block, timeout=timeout)
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=block, timeout=timeout)
@pytest.mark.it("Defaults to blocking mode with no timeout")
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
def test_receive_method_request_default_mode(self, mocker, client, method_name):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
target=client._inbox_manager,
attribute="get_method_request_inbox",
return_value=inbox_mock,
)
client.receive_method_request(method_name=method_name)
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=True, timeout=None)
@pytest.mark.it("Blocks until a method request is available, in blocking mode")
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
def test_no_method_request_in_inbox_blocking_mode(self, client, method_name):
request = MethodRequest(request_id="1", name=method_name, payload={"key": "value"})
inbox = client._inbox_manager.get_method_request_inbox(method_name)
assert inbox.empty()
def insert_item_after_delay():
time.sleep(0.01)
inbox._put(request)
insertion_thread = threading.Thread(target=insert_item_after_delay)
insertion_thread.start()
received_request = client.receive_method_request(method_name, block=True)
assert received_request is request
# This proves that the blocking happens because 'received_request' can't be
# 'request' until after a 10 millisecond delay on the insert. But because the
# 'received_request' IS 'request', it means that client.receive_method_request
# did not return until after the delay.
@pytest.mark.it(
"Raises InboxEmpty exception after a timeout while blocking, in blocking mode with a specified timeout"
)
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
def test_times_out_waiting_for_message_blocking_mode(self, client, method_name):
with pytest.raises(InboxEmpty):
client.receive_method_request(method_name, block=True, timeout=0.01)
@pytest.mark.it(
"Raises InboxEmpty exception immediately if there are no messages, in nonblocking mode"
)
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
def test_no_message_in_inbox_nonblocking_mode(self, client, method_name):
with pytest.raises(InboxEmpty):
client.receive_method_request(method_name, block=False)
class SharedClientSendMethodResponseTests(WaitsForEventCompletion):
@pytest.mark.it("Begins a 'send_method_response' pipeline operation")
def test_send_method_response_calls_pipeline(self, client, iothub_pipeline, method_response):
client.send_method_response(method_response)
assert iothub_pipeline.send_method_response.call_count == 1
assert iothub_pipeline.send_method_response.call_args[0][0] is method_response
@pytest.mark.it(
"Waits for the completion of the 'send_method_response' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, iothub_pipeline_manual_cb, method_response
):
self.add_event_completion_checks(
mocker=mocker, pipeline_function=iothub_pipeline_manual_cb.send_method_response
)
client_manual_cb.send_method_response(method_response)
class SharedClientGetTwinTests(WaitsForEventCompletion):
@pytest.mark.it("Implicitly enables twin messaging feature if not already enabled")
def test_enables_twin_only_if_not_already_enabled(self, mocker, client, iothub_pipeline):
# patch this so get_twin won't block
def immediate_callback(callback):
callback(None)
mocker.patch.object(iothub_pipeline, "get_twin", side_effect=immediate_callback)
# Verify twin enabled if not enabled
iothub_pipeline.feature_enabled.__getitem__.return_value = (
False
) # twin will appear disabled
client.get_twin()
assert iothub_pipeline.enable_feature.call_count == 1
assert iothub_pipeline.enable_feature.call_args[0][0] == constant.TWIN
iothub_pipeline.enable_feature.reset_mock()
# Verify twin not enabled if already enabled
iothub_pipeline.feature_enabled.__getitem__.return_value = True # twin will appear enabled
client.get_twin()
assert iothub_pipeline.enable_feature.call_count == 0
@pytest.mark.it("Begins a 'get_twin' pipeline operation")
def test_get_twin_calls_pipeline(self, client, iothub_pipeline):
client.get_twin()
assert iothub_pipeline.get_twin.call_count == 1
@pytest.mark.it(
"Waits for the completion of the 'get_twin' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, iothub_pipeline_manual_cb
):
self.add_event_completion_checks(
mocker=mocker, pipeline_function=iothub_pipeline_manual_cb.get_twin, args=[None]
)
client_manual_cb.get_twin()
@pytest.mark.it("Returns the twin that the pipeline returned")
def test_verifies_twin_returned(self, mocker, client_manual_cb, iothub_pipeline_manual_cb):
twin = {"reported": {"foo": "bar"}}
self.add_event_completion_checks(
mocker=mocker, pipeline_function=iothub_pipeline_manual_cb.get_twin, args=[twin]
)
returned_twin = client_manual_cb.get_twin()
assert returned_twin == twin
class SharedClientPatchTwinReportedPropertiesTests(WaitsForEventCompletion):
@pytest.mark.it("Implicitly enables twin messaging feature if not already enabled")
def test_enables_twin_only_if_not_already_enabled(
self, mocker, client, iothub_pipeline, twin_patch_reported
):
# patch this so x_get_twin won't block
def immediate_callback(patch, callback):
callback()
mocker.patch.object(
iothub_pipeline, "patch_twin_reported_properties", side_effect=immediate_callback
)
# Verify twin enabled if not enabled
iothub_pipeline.feature_enabled.__getitem__.return_value = (
False
) # twin will appear disabled
client.patch_twin_reported_properties(twin_patch_reported)
assert iothub_pipeline.enable_feature.call_count == 1
assert iothub_pipeline.enable_feature.call_args[0][0] == constant.TWIN
iothub_pipeline.enable_feature.reset_mock()
# Verify twin not enabled if already enabled
iothub_pipeline.feature_enabled.__getitem__.return_value = True # twin will appear enabled
client.patch_twin_reported_properties(twin_patch_reported)
assert iothub_pipeline.enable_feature.call_count == 0
@pytest.mark.it("Begins a 'patch_twin_reported_properties' pipeline operation")
def test_patch_twin_reported_properties_calls_pipeline(
self, client, iothub_pipeline, twin_patch_reported
):
client.patch_twin_reported_properties(twin_patch_reported)
assert iothub_pipeline.patch_twin_reported_properties.call_count == 1
assert (
iothub_pipeline.patch_twin_reported_properties.call_args[1]["patch"]
is twin_patch_reported
)
@pytest.mark.it(
"Waits for the completion of the 'send_method_response' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, iothub_pipeline_manual_cb, twin_patch_reported
):
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=iothub_pipeline_manual_cb.patch_twin_reported_properties,
)
client_manual_cb.patch_twin_reported_properties(twin_patch_reported)
class SharedClientReceiveTwinDesiredPropertiesPatchTests(object):
@pytest.mark.it(
"Implicitly enables Twin desired properties patch feature if not already enabled"
)
def test_enables_twin_patches_only_if_not_already_enabled(
self, mocker, client, iothub_pipeline
):
mocker.patch.object(
SyncClientInbox, "get"
) # patch this so receive_twin_desired_properties_patch won't block
# Verify twin patches enabled if not enabled
iothub_pipeline.feature_enabled.__getitem__.return_value = (
False
) # twin patches will appear disabled
client.receive_twin_desired_properties_patch()
assert iothub_pipeline.enable_feature.call_count == 1
assert iothub_pipeline.enable_feature.call_args[0][0] == constant.TWIN_PATCHES
iothub_pipeline.enable_feature.reset_mock()
# Verify twin patches not enabled if already enabled
iothub_pipeline.feature_enabled.__getitem__.return_value = True # C2D will appear enabled
client.receive_twin_desired_properties_patch()
assert iothub_pipeline.enable_feature.call_count == 0
@pytest.mark.it("Returns a patch from the twin patch inbox, if available")
def test_returns_message_from_twin_patch_inbox(self, mocker, client, twin_patch_desired):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
inbox_mock.get.return_value = twin_patch_desired
manager_get_inbox_mock = mocker.patch.object(
client._inbox_manager, "get_twin_patch_inbox", return_value=inbox_mock
)
received_patch = client.receive_twin_desired_properties_patch()
assert manager_get_inbox_mock.call_count == 1
assert inbox_mock.get.call_count == 1
assert received_patch is twin_patch_desired
@pytest.mark.it("Can be called in various modes")
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_can_be_called_in_mode(self, mocker, client, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_twin_patch_inbox", return_value=inbox_mock)
client.receive_twin_desired_properties_patch(block=block, timeout=timeout)
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=block, timeout=timeout)
@pytest.mark.it("Defaults to blocking mode with no timeout")
def test_default_mode(self, mocker, client):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_twin_patch_inbox", return_value=inbox_mock)
client.receive_twin_desired_properties_patch()
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=True, timeout=None)
@pytest.mark.it("Blocks until a patch is available, in blocking mode")
def test_no_message_in_inbox_blocking_mode(self, client, twin_patch_desired):
twin_patch_inbox = client._inbox_manager.get_twin_patch_inbox()
assert twin_patch_inbox.empty()
def insert_item_after_delay():
time.sleep(0.01)
twin_patch_inbox._put(twin_patch_desired)
insertion_thread = threading.Thread(target=insert_item_after_delay)
insertion_thread.start()
received_patch = client.receive_twin_desired_properties_patch(block=True)
assert received_patch is twin_patch_desired
# This proves that the blocking happens because 'received_patch' can't be
# 'twin_patch_desired' until after a 10 millisecond delay on the insert. But because the
# 'received_patch' IS 'twin_patch_desired', it means that client.receive_twin_desired_properties_patch
# did not return until after the delay.
@pytest.mark.it(
"Raises InboxEmpty exception after a timeout while blocking, in blocking mode with a specified timeout"
)
def test_times_out_waiting_for_message_blocking_mode(self, client):
with pytest.raises(InboxEmpty):
client.receive_twin_desired_properties_patch(block=True, timeout=0.01)
@pytest.mark.it(
"Raises InboxEmpty exception immediately if there are no patches, in nonblocking mode"
)
def test_no_message_in_inbox_nonblocking_mode(self, client):
with pytest.raises(InboxEmpty):
client.receive_twin_desired_properties_patch(block=False)
################
# DEVICE TESTS #
################
class IoTHubDeviceClientTestsConfig(object):
@pytest.fixture
def client_class(self):
return IoTHubDeviceClient
@pytest.fixture
def client(self, iothub_pipeline):
"""This client automatically resolves callbacks sent to the pipeline.
It should be used for the majority of tests.
"""
return IoTHubDeviceClient(iothub_pipeline)
@pytest.fixture
def client_manual_cb(self, iothub_pipeline_manual_cb):
"""This client requires manual triggering of the callbacks sent to the pipeline.
It should only be used for tests where manual control fo a callback is required.
"""
return IoTHubDeviceClient(iothub_pipeline_manual_cb)
@pytest.fixture
def connection_string(self, device_connection_string):
"""This fixture is parametrized to provie all valid device connection strings.
See client_fixtures.py
"""
return device_connection_string
@pytest.fixture
def sas_token_string(self, device_sas_token_string):
return device_sas_token_string
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - Instantiation")
class TestIoTHubDeviceClientInstantiation(
IoTHubDeviceClientTestsConfig, SharedClientInstantiationTests
):
@pytest.mark.it("Sets on_c2d_message_received handler in the IoTHubPipeline")
def test_sets_on_c2d_message_received_handler_in_pipeline(self, client_class, iothub_pipeline):
client = client_class(iothub_pipeline)
assert client._iothub_pipeline.on_c2d_message_received is not None
assert (
client._iothub_pipeline.on_c2d_message_received
== client._inbox_manager.route_c2d_message
)
@pytest.mark.it("Sets the '_edge_pipeline' attribute to None")
def test_edge_pipeline_is_none(self, client_class, iothub_pipeline):
client = client_class(iothub_pipeline)
assert client._edge_pipeline is None
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .create_from_connection_string()")
class TestIoTHubDeviceClientCreateFromConnectionString(
IoTHubDeviceClientTestsConfig, SharedClientCreateFromConnectionStringTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .create_from_shared_access_signature()")
class TestIoTHubDeviceClientCreateFromSharedAccessSignature(
IoTHubDeviceClientTestsConfig, SharedClientCreateFromSharedAccessSignature
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .create_from_x509_certificate()")
class TestIoTHubDeviceClientCreateFromX509Certificate(IoTHubDeviceClientTestsConfig):
hostname = "durmstranginstitute.farend"
device_id = "MySnitch"
@pytest.mark.it("Uses the provided arguments to create a X509AuthenticationProvider")
def test_auth_provider_creation(self, mocker, client_class, x509):
mock_auth_init = mocker.patch("azure.iot.device.iothub.auth.X509AuthenticationProvider")
client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id
)
assert mock_auth_init.call_count == 1
assert mock_auth_init.call_args == mocker.call(
x509=x509, hostname=self.hostname, device_id=self.device_id
)
@pytest.mark.it("Uses the X509AuthenticationProvider to create an IoTHubPipeline")
def test_pipeline_creation(self, mocker, client_class, x509):
mock_auth = mocker.patch(
"azure.iot.device.iothub.auth.X509AuthenticationProvider"
).return_value
mock_pipeline_init = mocker.patch("azure.iot.device.iothub.pipeline.IoTHubPipeline")
client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id
)
assert mock_pipeline_init.call_count == 1
assert mock_pipeline_init.call_args == mocker.call(mock_auth)
@pytest.mark.it("Uses the IoTHubPipeline to instantiate the client")
def test_client_instantiation(self, mocker, client_class, x509):
mock_pipeline = mocker.patch("azure.iot.device.iothub.pipeline.IoTHubPipeline").return_value
spy_init = mocker.spy(client_class, "__init__")
client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id
)
assert spy_init.call_count == 1
assert spy_init.call_args == mocker.call(mocker.ANY, mock_pipeline)
@pytest.mark.it("Returns the instantiated client")
def test_returns_client(self, mocker, client_class, x509):
client = client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id
)
assert isinstance(client, client_class)
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .connect()")
class TestIoTHubDeviceClientConnect(IoTHubDeviceClientTestsConfig, SharedClientConnectTests):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .disconnect()")
class TestIoTHubDeviceClientDisconnect(IoTHubDeviceClientTestsConfig, SharedClientDisconnectTests):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - EVENT: Disconnect")
class TestIoTHubDeviceClientDisconnectEvent(
IoTHubDeviceClientTestsConfig, SharedClientDisconnectEventTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .send_message()")
class TestIoTHubDeviceClientSendD2CMessage(
IoTHubDeviceClientTestsConfig, SharedClientSendD2CMessageTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .receive_message()")
class TestIoTHubDeviceClientReceiveC2DMessage(IoTHubDeviceClientTestsConfig):
@pytest.mark.it("Implicitly enables C2D messaging feature if not already enabled")
def test_enables_c2d_messaging_only_if_not_already_enabled(
self, mocker, client, iothub_pipeline
):
mocker.patch.object(SyncClientInbox, "get") # patch this so receive_message won't block
# Verify C2D Messaging enabled if not enabled
iothub_pipeline.feature_enabled.__getitem__.return_value = False # C2D will appear disabled
client.receive_message()
assert iothub_pipeline.enable_feature.call_count == 1
assert iothub_pipeline.enable_feature.call_args[0][0] == constant.C2D_MSG
iothub_pipeline.enable_feature.reset_mock()
# Verify C2D Messaging not enabled if already enabled
iothub_pipeline.feature_enabled.__getitem__.return_value = True # C2D will appear enabled
client.receive_message()
assert iothub_pipeline.enable_feature.call_count == 0
@pytest.mark.it("Returns a message from the C2D inbox, if available")
def test_returns_message_from_c2d_inbox(self, mocker, client, message):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
inbox_mock.get.return_value = message
manager_get_inbox_mock = mocker.patch.object(
client._inbox_manager, "get_c2d_message_inbox", return_value=inbox_mock
)
received_message = client.receive_message()
assert manager_get_inbox_mock.call_count == 1
assert inbox_mock.get.call_count == 1
assert received_message is message
@pytest.mark.it("Can be called in various modes")
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_can_be_called_in_mode(self, mocker, client, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_c2d_message_inbox", return_value=inbox_mock)
client.receive_message(block=block, timeout=timeout)
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=block, timeout=timeout)
@pytest.mark.it("Defaults to blocking mode with no timeout")
def test_default_mode(self, mocker, client):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_c2d_message_inbox", return_value=inbox_mock)
client.receive_message()
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=True, timeout=None)
@pytest.mark.it("Blocks until a message is available, in blocking mode")
def test_no_message_in_inbox_blocking_mode(self, client, message):
c2d_inbox = client._inbox_manager.get_c2d_message_inbox()
assert c2d_inbox.empty()
def insert_item_after_delay():
time.sleep(0.01)
c2d_inbox._put(message)
insertion_thread = threading.Thread(target=insert_item_after_delay)
insertion_thread.start()
received_message = client.receive_message(block=True)
assert received_message is message
# This proves that the blocking happens because 'received_message' can't be
# 'message' until after a 10 millisecond delay on the insert. But because the
# 'received_message' IS 'message', it means that client.receive_message
# did not return until after the delay.
@pytest.mark.it(
"Raises InboxEmpty exception after a timeout while blocking, in blocking mode with a specified timeout"
)
def test_times_out_waiting_for_message_blocking_mode(self, client):
with pytest.raises(InboxEmpty):
client.receive_message(block=True, timeout=0.01)
@pytest.mark.it(
"Raises InboxEmpty exception immediately if there are no messages, in nonblocking mode"
)
def test_no_message_in_inbox_nonblocking_mode(self, client):
with pytest.raises(InboxEmpty):
client.receive_message(block=False)
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .receive_method_request()")
class TestIoTHubDeviceClientReceiveMethodRequest(
IoTHubDeviceClientTestsConfig, SharedClientReceiveMethodRequestTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .send_method_response()")
class TestIoTHubDeviceClientSendMethodResponse(
IoTHubDeviceClientTestsConfig, SharedClientSendMethodResponseTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .get_twin()")
class TestIoTHubDeviceClientGetTwin(IoTHubDeviceClientTestsConfig, SharedClientGetTwinTests):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .patch_twin_reported_properties()")
class TestIoTHubDeviceClientPatchTwinReportedProperties(
IoTHubDeviceClientTestsConfig, SharedClientPatchTwinReportedPropertiesTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .receive_twin_desired_properties_patch()")
class TestIoTHubDeviceClientReceiveTwinDesiredPropertiesPatch(
IoTHubDeviceClientTestsConfig, SharedClientReceiveTwinDesiredPropertiesPatchTests
):
pass
################
# MODULE TESTS #
################
class IoTHubModuleClientTestsConfig(object):
@pytest.fixture
def client_class(self):
return IoTHubModuleClient
@pytest.fixture
def client(self, iothub_pipeline):
"""This client automatically resolves callbacks sent to the pipeline.
It should be used for the majority of tests.
"""
return IoTHubModuleClient(iothub_pipeline)
@pytest.fixture
def client_manual_cb(self, iothub_pipeline_manual_cb):
"""This client requires manual triggering of the callbacks sent to the pipeline.
It should only be used for tests where manual control fo a callback is required.
"""
return IoTHubModuleClient(iothub_pipeline_manual_cb)
@pytest.fixture
def connection_string(self, module_connection_string):
"""This fixture is parametrized to provie all valid device connection strings.
See client_fixtures.py
"""
return module_connection_string
@pytest.fixture
def sas_token_string(self, module_sas_token_string):
return module_sas_token_string
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - Instantiation")
class TestIoTHubModuleClientInstantiation(
IoTHubModuleClientTestsConfig, SharedClientInstantiationTests
):
@pytest.mark.it("Sets on_input_message_received handler in the IoTHubPipeline")
def test_sets_on_input_message_received_handler_in_pipeline(
self, client_class, iothub_pipeline
):
client = client_class(iothub_pipeline)
assert client._iothub_pipeline.on_input_message_received is not None
assert (
client._iothub_pipeline.on_input_message_received
== client._inbox_manager.route_input_message
)
@pytest.mark.it(
"Stores the EdgePipeline from the optionally-provided 'edge_pipeline' parameter in the '_edge_pipeline' attribute"
)
def test_sets_edge_pipeline_attribute(self, client_class, iothub_pipeline, edge_pipeline):
client = client_class(iothub_pipeline, edge_pipeline)
assert client._edge_pipeline is edge_pipeline
@pytest.mark.it(
"Sets the '_edge_pipeline' attribute to None, if the 'edge_pipeline' parameter is not provided"
)
def test_edge_pipeline_default_none(self, client_class, iothub_pipeline):
client = client_class(iothub_pipeline)
assert client._edge_pipeline is None
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .create_from_connection_string()")
class TestIoTHubModuleClientCreateFromConnectionString(
IoTHubModuleClientTestsConfig, SharedClientCreateFromConnectionStringTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .create_from_shared_access_signature()")
class TestIoTHubModuleClientCreateFromSharedAccessSignature(
IoTHubModuleClientTestsConfig, SharedClientCreateFromSharedAccessSignature
):
pass
@pytest.mark.describe(
"IoTHubModuleClient (Synchronous) - .create_from_edge_environment() -- Edge Container Environment"
)
class TestIoTHubModuleClientCreateFromEdgeEnvironmentWithContainerEnv(
IoTHubModuleClientTestsConfig
):
@pytest.mark.it(
"Uses Edge container environment variables to create an IoTEdgeAuthenticationProvider"
)
def test_auth_provider_creation(self, mocker, client_class, edge_container_environment):
mocker.patch.dict(os.environ, edge_container_environment)
mock_auth_init = mocker.patch("azure.iot.device.iothub.auth.IoTEdgeAuthenticationProvider")
client_class.create_from_edge_environment()
assert mock_auth_init.call_count == 1
assert mock_auth_init.call_args == mocker.call(
hostname=edge_container_environment["IOTEDGE_IOTHUBHOSTNAME"],
device_id=edge_container_environment["IOTEDGE_DEVICEID"],
module_id=edge_container_environment["IOTEDGE_MODULEID"],
gateway_hostname=edge_container_environment["IOTEDGE_GATEWAYHOSTNAME"],
module_generation_id=edge_container_environment["IOTEDGE_MODULEGENERATIONID"],
workload_uri=edge_container_environment["IOTEDGE_WORKLOADURI"],
api_version=edge_container_environment["IOTEDGE_APIVERSION"],
)
@pytest.mark.it(
"Ignores any Edge local debug environment variables that may be present, in favor of using Edge container variables"
)
def test_auth_provider_creation_hybrid_env(
self, mocker, client_class, edge_container_environment, edge_local_debug_environment
):
# This test verifies that with a hybrid environment, the auth provider will always be
# an IoTEdgeAuthenticationProvider, even if local debug variables are present
hybrid_environment = merge_dicts(edge_container_environment, edge_local_debug_environment)
mocker.patch.dict(os.environ, hybrid_environment)
mock_edge_auth_init = mocker.patch(
"azure.iot.device.iothub.auth.IoTEdgeAuthenticationProvider"
)
mock_sk_auth_parse = mocker.patch(
"azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider"
).parse
client_class.create_from_edge_environment()
assert mock_edge_auth_init.call_count == 1
assert mock_sk_auth_parse.call_count == 0 # we did NOT use SK auth
assert mock_edge_auth_init.call_args == mocker.call(
hostname=edge_container_environment["IOTEDGE_IOTHUBHOSTNAME"],
device_id=edge_container_environment["IOTEDGE_DEVICEID"],
module_id=edge_container_environment["IOTEDGE_MODULEID"],
gateway_hostname=edge_container_environment["IOTEDGE_GATEWAYHOSTNAME"],
module_generation_id=edge_container_environment["IOTEDGE_MODULEGENERATIONID"],
workload_uri=edge_container_environment["IOTEDGE_WORKLOADURI"],
api_version=edge_container_environment["IOTEDGE_APIVERSION"],
)
@pytest.mark.it(
"Uses the IoTEdgeAuthenticationProvider to create an IoTHubPipeline and an EdgePipeline"
)
def test_pipeline_creation(self, mocker, client_class, edge_container_environment):
mocker.patch.dict(os.environ, edge_container_environment)
mock_auth = mocker.patch(
"azure.iot.device.iothub.auth.IoTEdgeAuthenticationProvider"
).return_value
mock_iothub_pipeline_init = mocker.patch("azure.iot.device.iothub.pipeline.IoTHubPipeline")
mock_edge_pipeline_init = mocker.patch("azure.iot.device.iothub.pipeline.EdgePipeline")
client_class.create_from_edge_environment()
assert mock_iothub_pipeline_init.call_count == 1
assert mock_iothub_pipeline_init.call_args == mocker.call(mock_auth)
assert mock_edge_pipeline_init.call_count == 1
assert mock_edge_pipeline_init.call_args == mocker.call(mock_auth)
@pytest.mark.it("Uses the IoTHubPipeline and the EdgePipeline to instantiate the client")
def test_client_instantiation(self, mocker, client_class, edge_container_environment):
mocker.patch.dict(os.environ, edge_container_environment)
# Always patch the IoTEdgeAuthenticationProvider to prevent I/O operations
mocker.patch("azure.iot.device.iothub.auth.IoTEdgeAuthenticationProvider")
mock_iothub_pipeline = mocker.patch(
"azure.iot.device.iothub.pipeline.IoTHubPipeline"
).return_value
mock_edge_pipeline = mocker.patch(
"azure.iot.device.iothub.pipeline.EdgePipeline"
).return_value
spy_init = mocker.spy(client_class, "__init__")
client_class.create_from_edge_environment()
assert spy_init.call_count == 1
assert spy_init.call_args == mocker.call(
mocker.ANY, mock_iothub_pipeline, edge_pipeline=mock_edge_pipeline
)
@pytest.mark.it("Returns the instantiated client")
def test_returns_client(self, mocker, client_class, edge_container_environment):
mocker.patch.dict(os.environ, edge_container_environment)
# Always patch the IoTEdgeAuthenticationProvider to prevent I/O operations
mocker.patch("azure.iot.device.iothub.auth.IoTEdgeAuthenticationProvider")
client = client_class.create_from_edge_environment()
assert isinstance(client, client_class)
@pytest.mark.it("Raises IoTEdgeError if the environment is missing required variables")
@pytest.mark.parametrize(
"missing_env_var",
[
"IOTEDGE_MODULEID",
"IOTEDGE_DEVICEID",
"IOTEDGE_IOTHUBHOSTNAME",
"IOTEDGE_GATEWAYHOSTNAME",
"IOTEDGE_APIVERSION",
"IOTEDGE_MODULEGENERATIONID",
"IOTEDGE_WORKLOADURI",
],
)
def test_bad_environment(
self, mocker, client_class, edge_container_environment, missing_env_var
):
# Remove a variable from the fixture
del edge_container_environment[missing_env_var]
mocker.patch.dict(os.environ, edge_container_environment)
with pytest.raises(IoTEdgeError):
client_class.create_from_edge_environment()
@pytest.mark.it("Raises IoTEdgeError if there is an error using the Edge for authentication")
def test_bad_edge_auth(self, mocker, client_class, edge_container_environment):
mocker.patch.dict(os.environ, edge_container_environment)
mock_auth = mocker.patch("azure.iot.device.iothub.auth.IoTEdgeAuthenticationProvider")
mock_auth.side_effect = IoTEdgeError
with pytest.raises(IoTEdgeError):
client_class.create_from_edge_environment()
@pytest.mark.describe(
"IoTHubModuleClient (Synchronous) - .create_from_edge_environment() -- Edge Local Debug Environment"
)
class TestIoTHubModuleClientCreateFromEdgeEnvironmentWithDebugEnv(IoTHubModuleClientTestsConfig):
@pytest.fixture
def mock_open(self, mocker):
return mocker.patch.object(io, "open")
@pytest.mark.it(
"Extracts the CA certificate from the file indicated by the EdgeModuleCACertificateFile environment variable"
)
def test_read_ca_cert(self, mocker, client_class, edge_local_debug_environment, mock_open):
mock_file_handle = mock_open.return_value.__enter__.return_value
mocker.patch.dict(os.environ, edge_local_debug_environment)
client_class.create_from_edge_environment()
assert mock_open.call_count == 1
assert mock_open.call_args == mocker.call(
edge_local_debug_environment["EdgeModuleCACertificateFile"], mode="r"
)
assert mock_file_handle.read.call_count == 1
@pytest.mark.it(
"Uses Edge local debug environment variables to create a SymmetricKeyAuthenticationProvider (with CA cert)"
)
def test_auth_provider_creation(
self, mocker, client_class, edge_local_debug_environment, mock_open
):
expected_cert = mock_open.return_value.__enter__.return_value.read.return_value
mocker.patch.dict(os.environ, edge_local_debug_environment)
mock_auth_parse = mocker.patch(
"azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider"
).parse
client_class.create_from_edge_environment()
assert mock_auth_parse.call_count == 1
assert mock_auth_parse.call_args == mocker.call(
edge_local_debug_environment["EdgeHubConnectionString"]
)
assert mock_auth_parse.return_value.ca_cert == expected_cert
@pytest.mark.it(
"Only uses Edge local debug variables if no Edge container variables are present in the environment"
)
def test_auth_provider_and_pipeline_hybrid_env(
self,
mocker,
client_class,
edge_container_environment,
edge_local_debug_environment,
mock_open,
):
# This test verifies that with a hybrid environment, the auth provider will always be
# an IoTEdgeAuthenticationProvider, even if local debug variables are present
hybrid_environment = merge_dicts(edge_container_environment, edge_local_debug_environment)
mocker.patch.dict(os.environ, hybrid_environment)
mock_edge_auth_init = mocker.patch(
"azure.iot.device.iothub.auth.IoTEdgeAuthenticationProvider"
)
mock_sk_auth_parse = mocker.patch(
"azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider"
).parse
client_class.create_from_edge_environment()
assert mock_edge_auth_init.call_count == 1
assert mock_sk_auth_parse.call_count == 0 # we did NOT use SK auth
assert mock_edge_auth_init.call_args == mocker.call(
hostname=edge_container_environment["IOTEDGE_IOTHUBHOSTNAME"],
device_id=edge_container_environment["IOTEDGE_DEVICEID"],
module_id=edge_container_environment["IOTEDGE_MODULEID"],
gateway_hostname=edge_container_environment["IOTEDGE_GATEWAYHOSTNAME"],
module_generation_id=edge_container_environment["IOTEDGE_MODULEGENERATIONID"],
workload_uri=edge_container_environment["IOTEDGE_WORKLOADURI"],
api_version=edge_container_environment["IOTEDGE_APIVERSION"],
)
@pytest.mark.it(
"Uses the SymmetricKeyAuthenticationProvider to create an IoTHubPipeline and an EdgePipeline"
)
def test_pipeline_creation(self, mocker, client_class, edge_local_debug_environment, mock_open):
mocker.patch.dict(os.environ, edge_local_debug_environment)
mock_auth = mocker.patch(
"azure.iot.device.iothub.auth.SymmetricKeyAuthenticationProvider"
).parse.return_value
mock_iothub_pipeline_init = mocker.patch("azure.iot.device.iothub.pipeline.IoTHubPipeline")
mock_edge_pipeline_init = mocker.patch("azure.iot.device.iothub.pipeline.EdgePipeline")
client_class.create_from_edge_environment()
assert mock_iothub_pipeline_init.call_count == 1
assert mock_iothub_pipeline_init.call_args == mocker.call(mock_auth)
assert mock_edge_pipeline_init.call_count == 1
assert mock_iothub_pipeline_init.call_args == mocker.call(mock_auth)
@pytest.mark.it("Uses the IoTHubPipeline and the EdgePipeline to instantiate the client")
def test_client_instantiation(
self, mocker, client_class, edge_local_debug_environment, mock_open
):
mocker.patch.dict(os.environ, edge_local_debug_environment)
mock_iothub_pipeline = mocker.patch(
"azure.iot.device.iothub.pipeline.IoTHubPipeline"
).return_value
mock_edge_pipeline = mocker.patch(
"azure.iot.device.iothub.pipeline.EdgePipeline"
).return_value
spy_init = mocker.spy(client_class, "__init__")
client_class.create_from_edge_environment()
assert spy_init.call_count == 1
assert spy_init.call_args == mocker.call(
mocker.ANY, mock_iothub_pipeline, edge_pipeline=mock_edge_pipeline
)
@pytest.mark.it("Returns the instantiated client")
def test_returns_client(self, mocker, client_class, edge_local_debug_environment, mock_open):
mocker.patch.dict(os.environ, edge_local_debug_environment)
client = client_class.create_from_edge_environment()
assert isinstance(client, client_class)
@pytest.mark.it("Raises IoTEdgeError if the environment is missing required variables")
@pytest.mark.parametrize(
"missing_env_var", ["EdgeHubConnectionString", "EdgeModuleCACertificateFile"]
)
def test_bad_environment(
self, mocker, client_class, edge_local_debug_environment, missing_env_var, mock_open
):
# Remove a variable from the fixture
del edge_local_debug_environment[missing_env_var]
mocker.patch.dict(os.environ, edge_local_debug_environment)
with pytest.raises(IoTEdgeError):
client_class.create_from_edge_environment()
# TODO: If auth package was refactored to use ConnectionString class, tests from that
# class would increase the coverage here.
@pytest.mark.it(
"Raises ValueError if the connection string in the EdgeHubConnectionString environment variable is invalid"
)
@pytest.mark.parametrize(
"bad_cs",
[
pytest.param("not-a-connection-string", id="Garbage string"),
pytest.param("", id="Empty string"),
pytest.param(
"HostName=Invalid;DeviceId=Invalid;ModuleId=Invalid;SharedAccessKey=Invalid;GatewayHostName=Invalid",
id="Malformed Connection String",
marks=pytest.mark.xfail(reason="Bug in pipeline + need for auth refactor"), # TODO
),
],
)
def test_bad_connection_string(
self, mocker, client_class, edge_local_debug_environment, bad_cs, mock_open
):
edge_local_debug_environment["EdgeHubConnectionString"] = bad_cs
mocker.patch.dict(os.environ, edge_local_debug_environment)
with pytest.raises(ValueError):
client_class.create_from_edge_environment()
@pytest.mark.it(
"Raises ValueError if the filepath in the EdgeModuleCACertificateFile environment variable is invalid"
)
def test_bad_filepath(self, mocker, client_class, edge_local_debug_environment, mock_open):
# To make tests compatible with Python 2 & 3, redfine errors
try:
FileNotFoundError # noqa: F823
except NameError:
FileNotFoundError = IOError
mocker.patch.dict(os.environ, edge_local_debug_environment)
mock_open.side_effect = FileNotFoundError
with pytest.raises(ValueError):
client_class.create_from_edge_environment()
@pytest.mark.it(
"Raises ValueError if the file referenced by the filepath in the EdgeModuleCACertificateFile environment variable cannot be opened"
)
def test_bad_file_io(self, mocker, client_class, edge_local_debug_environment, mock_open):
# Raise a different error in Python 2 vs 3
if six.PY2:
error = IOError
else:
error = OSError
mocker.patch.dict(os.environ, edge_local_debug_environment)
mock_open.side_effect = error
with pytest.raises(ValueError):
client_class.create_from_edge_environment()
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .create_from_x509_certificate()")
class TestIoTHubModuleClientCreateFromX509Certificate(IoTHubModuleClientTestsConfig):
hostname = "durmstranginstitute.farend"
device_id = "MySnitch"
module_id = "Charms"
@pytest.mark.it("Uses the provided arguments to create a X509AuthenticationProvider")
def test_auth_provider_creation(self, mocker, client_class, x509):
mock_auth_init = mocker.patch("azure.iot.device.iothub.auth.X509AuthenticationProvider")
client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id, module_id=self.module_id
)
assert mock_auth_init.call_count == 1
assert mock_auth_init.call_args == mocker.call(
x509=x509, hostname=self.hostname, device_id=self.device_id, module_id=self.module_id
)
@pytest.mark.it("Uses the X509AuthenticationProvider to create an IoTHubPipeline")
def test_pipeline_creation(self, mocker, client_class, x509):
mock_auth = mocker.patch(
"azure.iot.device.iothub.auth.X509AuthenticationProvider"
).return_value
mock_pipeline_init = mocker.patch("azure.iot.device.iothub.pipeline.IoTHubPipeline")
client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id, module_id=self.module_id
)
assert mock_pipeline_init.call_count == 1
assert mock_pipeline_init.call_args == mocker.call(mock_auth)
@pytest.mark.it("Uses the IoTHubPipeline to instantiate the client")
def test_client_instantiation(self, mocker, client_class, x509):
mock_pipeline = mocker.patch("azure.iot.device.iothub.pipeline.IoTHubPipeline").return_value
spy_init = mocker.spy(client_class, "__init__")
client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id, module_id=self.module_id
)
assert spy_init.call_count == 1
assert spy_init.call_args == mocker.call(mocker.ANY, mock_pipeline)
@pytest.mark.it("Returns the instantiated client")
def test_returns_client(self, mocker, client_class, x509):
client = client_class.create_from_x509_certificate(
x509=x509, hostname=self.hostname, device_id=self.device_id, module_id=self.module_id
)
assert isinstance(client, client_class)
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .connect()")
class TestIoTHubModuleClientConnect(IoTHubModuleClientTestsConfig, SharedClientConnectTests):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .disconnect()")
class TestIoTHubModuleClientDisconnect(IoTHubModuleClientTestsConfig, SharedClientDisconnectTests):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - EVENT: Disconnect")
class TestIoTHubModuleClientDisconnectEvent(
IoTHubModuleClientTestsConfig, SharedClientDisconnectEventTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .send_message()")
class TestIoTHubNModuleClientSendD2CMessage(
IoTHubModuleClientTestsConfig, SharedClientSendD2CMessageTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .send_message_to_output()")
class TestIoTHubModuleClientSendToOutput(IoTHubModuleClientTestsConfig, WaitsForEventCompletion):
@pytest.mark.it("Begins a 'send_output_event' pipeline operation")
def test_calls_pipeline_send_message_to_output(self, client, iothub_pipeline, message):
output_name = "some_output"
client.send_message_to_output(message, output_name)
assert iothub_pipeline.send_output_event.call_count == 1
assert iothub_pipeline.send_output_event.call_args[0][0] is message
assert message.output_name == output_name
@pytest.mark.it(
"Waits for the completion of the 'send_output_event' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, iothub_pipeline_manual_cb, message
):
self.add_event_completion_checks(
mocker=mocker, pipeline_function=iothub_pipeline_manual_cb.send_output_event
)
output_name = "some_output"
client_manual_cb.send_message_to_output(message, output_name)
@pytest.mark.it(
"Wraps 'message' input parameter in Message object if it is not a Message object"
)
@pytest.mark.parametrize(
"message_input",
[
pytest.param("message", id="String input"),
pytest.param(222, id="Integer input"),
pytest.param(object(), id="Object input"),
pytest.param(None, id="None input"),
pytest.param([1, "str"], id="List input"),
pytest.param({"a": 2}, id="Dictionary input"),
],
)
def test_send_message_to_output_calls_pipeline_wraps_data_in_message(
self, client, iothub_pipeline, message_input
):
output_name = "some_output"
client.send_message_to_output(message_input, output_name)
assert iothub_pipeline.send_output_event.call_count == 1
sent_message = iothub_pipeline.send_output_event.call_args[0][0]
assert isinstance(sent_message, Message)
assert sent_message.data == message_input
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .receive_message_on_input()")
class TestIoTHubModuleClientReceiveInputMessage(IoTHubModuleClientTestsConfig):
@pytest.mark.it("Implicitly enables input messaging feature if not already enabled")
def test_enables_input_messaging_only_if_not_already_enabled(
self, mocker, client, iothub_pipeline
):
mocker.patch.object(
SyncClientInbox, "get"
) # patch this receive_message_on_input won't block
input_name = "some_input"
# Verify Input Messaging enabled if not enabled
iothub_pipeline.feature_enabled.__getitem__.return_value = (
False
) # Input Messages will appear disabled
client.receive_message_on_input(input_name)
assert iothub_pipeline.enable_feature.call_count == 1
assert iothub_pipeline.enable_feature.call_args[0][0] == constant.INPUT_MSG
iothub_pipeline.enable_feature.reset_mock()
# Verify Input Messaging not enabled if already enabled
iothub_pipeline.feature_enabled.__getitem__.return_value = (
True
) # Input Messages will appear enabled
client.receive_message_on_input(input_name)
assert iothub_pipeline.enable_feature.call_count == 0
@pytest.mark.it("Returns a message from the input inbox, if available")
def test_returns_message_from_input_inbox(self, mocker, client, message):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
inbox_mock.get.return_value = message
manager_get_inbox_mock = mocker.patch.object(
client._inbox_manager, "get_input_message_inbox", return_value=inbox_mock
)
input_name = "some_input"
received_message = client.receive_message_on_input(input_name)
assert manager_get_inbox_mock.call_count == 1
assert manager_get_inbox_mock.call_args == mocker.call(input_name)
assert inbox_mock.get.call_count == 1
assert received_message is message
@pytest.mark.it("Can be called in various modes")
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_can_be_called_in_mode(self, mocker, client, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
client._inbox_manager, "get_input_message_inbox", return_value=inbox_mock
)
input_name = "some_input"
client.receive_message_on_input(input_name, block=block, timeout=timeout)
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=block, timeout=timeout)
@pytest.mark.it("Defaults to blocking mode with no timeout")
def test_default_mode(self, mocker, client):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
client._inbox_manager, "get_input_message_inbox", return_value=inbox_mock
)
input_name = "some_input"
client.receive_message_on_input(input_name)
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=True, timeout=None)
@pytest.mark.it("Blocks until a message is available, in blocking mode")
def test_no_message_in_inbox_blocking_mode(self, client, message):
input_name = "some_input"
input_inbox = client._inbox_manager.get_input_message_inbox(input_name)
assert input_inbox.empty()
def insert_item_after_delay():
time.sleep(0.01)
input_inbox._put(message)
insertion_thread = threading.Thread(target=insert_item_after_delay)
insertion_thread.start()
received_message = client.receive_message_on_input(input_name, block=True)
assert received_message is message
# This proves that the blocking happens because 'received_message' can't be
# 'message' until after a 10 millisecond delay on the insert. But because the
# 'received_message' IS 'message', it means that client.receive_message_on_input
# did not return until after the delay.
@pytest.mark.it(
"Raises InboxEmpty exception after a timeout while blocking, in blocking mode with a specified timeout"
)
def test_times_out_waiting_for_message_blocking_mode(self, client):
input_name = "some_input"
with pytest.raises(InboxEmpty):
client.receive_message_on_input(input_name, block=True, timeout=0.01)
@pytest.mark.it(
"Raises InboxEmpty exception immediately if there are no messages, in nonblocking mode"
)
def test_no_message_in_inbox_nonblocking_mode(self, client):
input_name = "some_input"
with pytest.raises(InboxEmpty):
client.receive_message_on_input(input_name, block=False)
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .receive_method_request()")
class TestIoTHubModuleClientReceiveMethodRequest(
IoTHubModuleClientTestsConfig, SharedClientReceiveMethodRequestTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .send_method_response()")
class TestIoTHubModuleClientSendMethodResponse(
IoTHubModuleClientTestsConfig, SharedClientSendMethodResponseTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .get_twin()")
class TestIoTHubModuleClientGetTwin(IoTHubModuleClientTestsConfig, SharedClientGetTwinTests):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .patch_twin_reported_properties()")
class TestIoTHubModuleClientPatchTwinReportedProperties(
IoTHubModuleClientTestsConfig, SharedClientPatchTwinReportedPropertiesTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .receive_twin_desired_properties_patch()")
class TestIoTHubModuleClientReceiveTwinDesiredPropertiesPatch(
IoTHubModuleClientTestsConfig, SharedClientReceiveTwinDesiredPropertiesPatchTests
):
pass
####################
# HELPER FUNCTIONS #
####################
def merge_dicts(d1, d2):
d3 = d1.copy()
d3.update(d2)
return d3
|
weixin.py
|
#!/usr/bin/env python
# coding: utf-8
import qrcode
from pyqrcode import QRCode
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import http.cookiejar
import requests
import xml.dom.minidom
import json
import time
import datetime
import ssl
import re
import sys
import os
import subprocess
import random
import multiprocessing
import platform
import logging
import hashlib
import http.client
from collections import defaultdict
from urllib.parse import urlparse
from lxml import html
from socket import timeout as timeout_error
# import pdb
# for media upload
import mimetypes
# from requests_toolbelt.multipart.encoder import MultipartEncoder
from requests_toolbelt import *
from wxbot_demo_py3.save_message import MysqlDao
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
def catchKeyboardInterrupt(fn):
def wrapper(*args):
try:
return fn(*args)
except KeyboardInterrupt:
print('\n[*] 强制退出程序')
logging.debug('[*] 强制退出程序')
return wrapper
def _decode_list(data):
rv = []
for item in data:
if isinstance(item, str):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.items():
if isinstance(key, str):
key = key.encode('utf-8')
if isinstance(value, str):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
class WebWeixin(object):
def __str__(self):
description = \
"=========================\n" + \
"[#] Web Weixin\n" + \
"[#] Debug Mode: " + str(self.DEBUG) + "\n" + \
"[#] Uuid: " + self.uuid + "\n" + \
"[#] Uin: " + str(self.uin) + "\n" + \
"[#] Sid: " + self.sid + "\n" + \
"[#] Skey: " + self.skey + "\n" + \
"[#] DeviceId: " + self.deviceId + "\n" + \
"[#] PassTicket: " + self.pass_ticket + "\n" + \
"========================="
return description
def __init__(self):
self.DEBUG = False
self.commandLineQRCode = False # 在console显示二维码开关
self.uuid = ''
self.base_uri = ''
self.redirect_uri = ''
self.uin = ''
self.sid = ''
self.skey = ''
self.pass_ticket = ''
self.deviceId = 'e' + repr(random.random())[2:17]
self.BaseRequest = {}
self.synckey = ''
self.SyncKey = []
self.User = []
self.MemberList = []
self.ContactList = [] # 好友
self.GroupList = [] # 群
self.GroupMemeberList = [] # 群友
self.PublicUsersList = [] # 公众号/服务号
self.SpecialUsersList = [] # 特殊账号
self.autoReplyMode = False
self.syncHost = ''
self.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.109 Safari/537.36'
self.interactive = False
self.autoOpen = False
self.saveFolder = os.path.join(os.getcwd(), 'saved')
self.saveSubFolders = {'webwxgeticon': 'icons', 'webwxgetheadimg': 'headimgs', 'webwxgetmsgimg': 'msgimgs',
'webwxgetvideo': 'videos', 'webwxgetvoice': 'voices', '_showQRCodeImg': 'qrcodes'}
self.appid = 'wx782c26e4c19acffb'
self.lang = 'zh_CN'
self.lastCheckTs = time.time()
self.memberCount = 0
self.SpecialUsers = ['newsapp', 'fmessage', 'filehelper', 'weibo', 'qqmail', 'fmessage', 'tmessage', 'qmessage', 'qqsync', 'floatbottle', 'lbsapp', 'shakeapp', 'medianote', 'qqfriend', 'readerapp', 'blogapp', 'facebookapp', 'masssendapp', 'meishiapp', 'feedsapp',
'voip', 'blogappweixin', 'weixin', 'brandsessionholder', 'weixinreminder', 'wxid_novlwrv3lqwv11', 'gh_22b87fa7cb3c', 'officialaccounts', 'notification_messages', 'wxid_novlwrv3lqwv11', 'gh_22b87fa7cb3c', 'wxitil', 'userexperience_alarm', 'notification_messages']
self.TimeOut = 20 # 同步最短时间间隔(单位:秒)
self.media_count = -1
self.cookie = http.cookiejar.CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(self.cookie))
opener.addheaders = [('User-agent', self.user_agent)]
urllib.request.install_opener(opener)
def loadConfig(self, config):
if config['DEBUG']:
self.DEBUG = config['DEBUG']
if config['autoReplyMode']:
self.autoReplyMode = config['autoReplyMode']
if config['user_agent']:
self.user_agent = config['user_agent']
if config['interactive']:
self.interactive = config['interactive']
if config['autoOpen']:
self.autoOpen = config['autoOpen']
def getUUID(self):
url = 'https://login.weixin.qq.com/jslogin'
params = {
'appid': self.appid,
'fun': 'new',
'lang': self.lang,
'_': int(time.time()),
}
# r = requests.get(url=url, params=params)
# r.encoding = 'utf-8'
# data = r.text
data = self._post(url, params, False).decode("utf-8")
if data == '':
return False
regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)"'
pm = re.search(regx, data)
if pm:
code = pm.group(1)
self.uuid = pm.group(2)
return code == '200'
return False
def genQRCode(self):
# return self._showQRCodeImg()
if sys.platform.startswith('win'):
self._showQRCodeImg('win')
elif sys.platform.find('darwin') >= 0:
self._showQRCodeImg('macos')
else:
self._str2qr('https://login.weixin.qq.com/l/' + self.uuid)
# 显示登录二维码(分两种情况: "在命令行显示";"保存到文件")
def _showQRCodeImg(self, str):
if self.commandLineQRCode:
qrCode = QRCode('https://login.weixin.qq.com/l/' + self.uuid)
self._showCommandLineQRCode(qrCode.text(1))
else:
url = 'https://login.weixin.qq.com/qrcode/' + self.uuid
params = {
't': 'webwx',
'_': int(time.time())
}
data = self._post(url, params, False)
if data == '':
return
QRCODE_PATH = self._saveFile('qrcode.jpg', data, '_showQRCodeImg')
if str == 'win':
os.startfile(QRCODE_PATH)
elif str == 'macos':
subprocess.call(["open", QRCODE_PATH])
else:
return
# 在命令行显示二维码
def _showCommandLineQRCode(self, qr_data, enableCmdQR=2):
try:
b = u'\u2588'
sys.stdout.write(b + '\r')
sys.stdout.flush()
except UnicodeEncodeError:
white = 'MM'
else:
white = b
black = ' '
blockCount = int(enableCmdQR)
if abs(blockCount) == 0:
blockCount = 1
white *= abs(blockCount)
if blockCount < 0:
white, black = black, white
sys.stdout.write(' ' * 50 + '\r')
sys.stdout.flush()
qr = qr_data.replace('0', white).replace('1', black)
sys.stdout.write(qr)
sys.stdout.flush()
def waitForLogin(self, tip=1):
time.sleep(tip)
url = 'https://login.weixin.qq.com/cgi-bin/mmwebwx-bin/login?tip=%s&uuid=%s&_=%s' % (
tip, self.uuid, int(time.time()))
data = self._get(url)
if data == '':
return False
pm = re.search(r"window.code=(\d+);", data)
code = pm.group(1)
if code == '201':
return True
elif code == '200':
pm = re.search(r'window.redirect_uri="(\S+?)";', data)
r_uri = pm.group(1) + '&fun=new'
self.redirect_uri = r_uri
self.base_uri = r_uri[:r_uri.rfind('/')]
return True
elif code == '408':
self._echo('[登陆超时] \n')
else:
self._echo('[登陆异常] \n')
return False
def login(self):
data = self._get(self.redirect_uri)
if data == '':
return False
doc = xml.dom.minidom.parseString(data)
root = doc.documentElement
for node in root.childNodes:
if node.nodeName == 'skey':
self.skey = node.childNodes[0].data
elif node.nodeName == 'wxsid':
self.sid = node.childNodes[0].data
elif node.nodeName == 'wxuin':
self.uin = node.childNodes[0].data
elif node.nodeName == 'pass_ticket':
self.pass_ticket = node.childNodes[0].data
if '' in (self.skey, self.sid, self.uin, self.pass_ticket):
return False
self.BaseRequest = {
'Uin': int(self.uin),
'Sid': self.sid,
'Skey': self.skey,
'DeviceID': self.deviceId,
}
return True
def webwxinit(self):
url = self.base_uri + '/webwxinit?pass_ticket=%s&skey=%s&r=%s' % (
self.pass_ticket, self.skey, int(time.time()))
params = {
'BaseRequest': self.BaseRequest
}
dic = self._post(url, params)
if dic == '':
return False
self.SyncKey = dic['SyncKey']
self.User = dic['User']
# synckey for synccheck
self.synckey = '|'.join(
[str(keyVal['Key']) + '_' + str(keyVal['Val']) for keyVal in self.SyncKey['List']])
return dic['BaseResponse']['Ret'] == 0
def webwxstatusnotify(self):
url = self.base_uri + \
'/webwxstatusnotify?lang=zh_CN&pass_ticket=%s' % (self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
"Code": 3,
"FromUserName": self.User['UserName'],
"ToUserName": self.User['UserName'],
"ClientMsgId": int(time.time())
}
dic = self._post(url, params)
if dic == '':
return False
return dic['BaseResponse']['Ret'] == 0
# 获取联系人列表
def webwxgetcontact(self):
SpecialUsers = self.SpecialUsers
url = self.base_uri + '/webwxgetcontact?pass_ticket=%s&skey=%s&r=%s' % (
self.pass_ticket, self.skey, int(time.time()))
dic = self._post(url, {})
if dic == '':
return False
self.MemberCount = dic['MemberCount']
self.MemberList = dic['MemberList']
ContactList = self.MemberList[:]
GroupList = self.GroupList[:]
PublicUsersList = self.PublicUsersList[:]
SpecialUsersList = self.SpecialUsersList[:]
for i in range(len(ContactList) - 1, -1, -1):
Contact = ContactList[i]
if Contact['VerifyFlag'] & 8 != 0: # 公众号/服务号
ContactList.remove(Contact)
self.PublicUsersList.append(Contact)
elif Contact['UserName'] in SpecialUsers: # 特殊账号
ContactList.remove(Contact)
self.SpecialUsersList.append(Contact)
elif '@@' in Contact['UserName']: # 群聊
ContactList.remove(Contact)
self.GroupList.append(Contact)
elif Contact['UserName'] == self.User['UserName']: # 自己
ContactList.remove(Contact)
self.ContactList = ContactList
return True
def webwxbatchgetcontact(self):
url = self.base_uri + \
'/webwxbatchgetcontact?type=ex&r=%s&pass_ticket=%s' % (
int(time.time()), self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
"Count": len(self.GroupList),
"List": [{"UserName": g['UserName'], "EncryChatRoomId":""} for g in self.GroupList]
}
dic = self._post(url, params)
if dic == '':
return False
# blabla ...
ContactList = dic['ContactList']
ContactCount = dic['Count']
self.GroupList = ContactList
for i in range(len(ContactList) - 1, -1, -1):
Contact = ContactList[i]
MemberList = Contact['MemberList']
for member in MemberList:
self.GroupMemeberList.append(member)
return True
def getNameById(self, id):
url = self.base_uri + \
'/webwxbatchgetcontact?type=ex&r=%s&pass_ticket=%s' % (
int(time.time()), self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
"Count": 1,
"List": [{"UserName": id, "EncryChatRoomId": ""}]
}
dic = self._post(url, params)
if dic == '':
return None
# blabla ...
return dic['ContactList']
def testsynccheck(self):
SyncHost = ['wx2.qq.com',
'webpush.wx2.qq.com',
'wx8.qq.com',
'webpush.wx8.qq.com',
'qq.com',
'webpush.wx.qq.com',
'web2.wechat.com',
'webpush.web2.wechat.com',
'wechat.com',
'webpush.web.wechat.com',
'webpush.weixin.qq.com',
'webpush.wechat.com',
'webpush1.wechat.com',
'webpush2.wechat.com',
'webpush.wx.qq.com',
'webpush2.wx.qq.com']
for host in SyncHost:
self.syncHost = host
[retcode, selector] = self.synccheck()
if retcode == '0':
return True
return False
def synccheck(self):
params = {
'r': int(time.time()),
'sid': self.sid,
'uin': self.uin,
'skey': self.skey,
'deviceid': self.deviceId,
'synckey': self.synckey,
'_': int(time.time()),
}
url = 'https://' + self.syncHost + '/cgi-bin/mmwebwx-bin/synccheck?' + urllib.parse.urlencode(params)
data = self._get(url, timeout=5)
if data == '':
return [-1, -1]
pm = re.search(
r'window.synccheck={retcode:"(\d+)",selector:"(\d+)"}', data)
retcode = pm.group(1)
selector = pm.group(2)
return [retcode, selector]
def webwxsync(self):
url = self.base_uri + \
'/webwxsync?sid=%s&skey=%s&pass_ticket=%s' % (
self.sid, self.skey, self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
'SyncKey': self.SyncKey,
'rr': ~int(time.time())
}
dic = self._post(url, params)
if dic == '':
return None
if self.DEBUG:
print(json.dumps(dic, indent=4))
(json.dumps(dic, indent=4))
if dic['BaseResponse']['Ret'] == 0:
self.SyncKey = dic['SyncKey']
self.synckey = '|'.join(
[str(keyVal['Key']) + '_' + str(keyVal['Val']) for keyVal in self.SyncKey['List']])
return dic
def webwxsendmsg(self, word, to='filehelper'):
url = self.base_uri + \
'/webwxsendmsg?pass_ticket=%s' % (self.pass_ticket)
clientMsgId = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
params = {
'BaseRequest': self.BaseRequest,
'Msg': {
"Type": 1,
"Content": self._transcoding(word),
"FromUserName": self.User['UserName'],
"ToUserName": to,
"LocalID": clientMsgId,
"ClientMsgId": clientMsgId
}
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(params, ensure_ascii=False).encode('utf8')
r = requests.post(url, data=data, headers=headers)
dic = r.json()
return dic['BaseResponse']['Ret'] == 0
def webwxuploadmedia(self, image_name, self_id=None, group_id=None):
if not os.path.exists(image_name):
print("[Error] 文件不存在")
return None
url = 'https://file.wx2.qq.com/cgi-bin/mmwebwx-bin/webwxuploadmedia?f=json'
get_result = requests.get(url).json()
# 计数器
self.media_count = self.media_count + 1
# 文件名
file_name = image_name
# MIME格式
# mime_type = application/pdf, image/jpeg, image/png, etc.
mime_type = mimetypes.guess_type(image_name, strict=False)[0]
# 微信识别的文档格式,微信服务器应该只支持两种类型的格式。pic和doc
# pic格式,直接显示。doc格式则显示为文件。
media_type = 'pic' if mime_type.split('/')[0] == 'image' else 'doc'
# 上一次修改日期
lastModifieDate = 'Thu Mar 17 2016 00:55:10 GMT+0800 (CST)'
# 文件大小
file_size = os.path.getsize(file_name)
# 文件MD5
md5file = open(file_name, 'rb')
md5 = hashlib.md5(md5file.read()).hexdigest()
# PassTicket
pass_ticket = self.pass_ticket
# clientMediaId
client_media_id = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
# webwx_data_ticket
webwx_data_ticket = ''
for item in self.cookie:
if item.name == 'webwx_data_ticket':
webwx_data_ticket = item.value
break
if (webwx_data_ticket == ''):
return "None Fuck Cookie"
uploadmediarequest = json.dumps({
"UploadType": 2, # lijiyang add at 18-02-01
"FromUserName": self_id, # id
"ToUserName": group_id, # id
"FileMd5": md5, # 文件的MD5
"BaseRequest": self.BaseRequest,
"ClientMediaId": client_media_id,
"TotalLen": file_size,
"StartPos": 0,
"DataLen": file_size,
"MediaType": 4
}, ensure_ascii=False).encode('utf8')
multipart_encoder = MultipartEncoder(
fields={
'id': 'WU_FILE_' + str(self.media_count),
'name': "a1.jpg",
'type': mime_type,
'lastModifieDate': lastModifieDate,
'size': str(file_size),
'mediatype': media_type,
'uploadmediarequest': uploadmediarequest,
'webwx_data_ticket': webwx_data_ticket,
'pass_ticket': pass_ticket,
'filename': ("a1.jpg", open(file_name, 'rb'), mime_type.split('/')[1])
},
boundary='-----------------------------1575017231431605357584454111'
)
headers = {
'Host': 'file.wx2.qq.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Referer': 'https://wx2.qq.com/',
'Content-Type': multipart_encoder.content_type,
'Origin': 'https://wx2.qq.com',
'Connection': 'keep-alive',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache'
}
r = requests.post(url, data=multipart_encoder, headers=headers)
response_json = r.json()
if response_json['BaseResponse']['Ret'] == 0:
return response_json
return None
def webwxsendmsgimg(self, user_id, media_id):
url = 'https://wx2.qq.com/cgi-bin/mmwebwx-bin/webwxsendmsgimg?fun=async&f=json&pass_ticket=%s' % self.pass_ticket
clientMsgId = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
data_json = {
"BaseRequest": self.BaseRequest,
"Msg": {
"Type": 3,
"MediaId": media_id,
"FromUserName": self.User['UserName'],
"ToUserName": user_id,
"LocalID": clientMsgId,
"ClientMsgId": clientMsgId
}
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(data_json, ensure_ascii=False).encode('utf8')
r = requests.post(url, data=data, headers=headers)
dic = r.json()
return dic['BaseResponse']['Ret'] == 0
def webwxsendmsgemotion(self, user_id, media_id):
url = 'https://wx2.qq.com/cgi-bin/mmwebwx-bin/webwxsendemoticon?fun=sys&f=json&pass_ticket=%s' % self.pass_ticket
clientMsgId = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
data_json = {
"BaseRequest": self.BaseRequest,
"Msg": {
"Type": 47,
"EmojiFlag": 2,
"MediaId": media_id,
"FromUserName": self.User['UserName'],
"ToUserName": user_id,
"LocalID": clientMsgId,
"ClientMsgId": clientMsgId
}
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(data_json, ensure_ascii=False).encode('utf8')
r = requests.post(url, data=data, headers=headers)
dic = r.json()
if self.DEBUG:
print(json.dumps(dic, indent=4))
logging.debug(json.dumps(dic, indent=4))
return dic['BaseResponse']['Ret'] == 0
def _saveFile(self, filename, data, api=None):
fn = filename
if self.saveSubFolders[api]:
dirName = os.path.join(self.saveFolder, self.saveSubFolders[api])
if not os.path.exists(dirName):
os.makedirs(dirName)
fn = os.path.join(dirName, filename)
logging.debug('Saved file: %s' % fn)
with open(fn, 'wb') as f:
f.write(data)
f.close()
return fn
def webwxgeticon(self, id):
url = self.base_uri + \
'/webwxgeticon?username=%s&skey=%s' % (id, self.skey)
data = self._get(url)
if data == '':
return ''
fn = 'img_' + id + '.jpg'
return self._saveFile(fn, data, 'webwxgeticon')
def webwxgetheadimg(self, id):
url = self.base_uri + \
'/webwxgetheadimg?username=%s&skey=%s' % (id, self.skey)
data = self._get(url)
if data == '':
return ''
fn = 'img_' + id + '.jpg'
return self._saveFile(fn, data, 'webwxgetheadimg')
def webwxgetmsgimg(self, msgid):
url = self.base_uri + \
'/webwxgetmsgimg?MsgID=%s&skey=%s' % (msgid, self.skey)
data = self._get(url, api='webwxgetmsgimg')
if data == '':
return ''
fn = 'img_' + msgid + '.jpg'
return self._saveFile(fn, data, 'webwxgetmsgimg')
# Not work now for weixin haven't support this API
def webwxgetvideo(self, msgid):
url = self.base_uri + \
'/webwxgetvideo?msgid=%s&skey=%s' % (msgid, self.skey)
data = self._get(url, api='webwxgetvideo')
if data == '':
return ''
fn = 'video_' + msgid + '.mp4'
return self._saveFile(fn, data, 'webwxgetvideo')
def webwxgetvoice(self, msgid):
url = self.base_uri + \
'/webwxgetvoice?msgid=%s&skey=%s' % (msgid, self.skey)
data = self._get(url, api='webwxgetvoice')
if data == '':
return ''
fn = 'voice_' + msgid + '.mp3'
return self._saveFile(fn, data, 'webwxgetvoice')
def getGroupName(self, id):
name = '未知群'
for member in self.GroupList:
if member['UserName'] == id:
name = member['NickName']
if name == '未知群':
# 现有群里面查不到
GroupList = self.getNameById(id)
for group in GroupList:
self.GroupList.append(group)
if group['UserName'] == id:
name = group['NickName']
MemberList = group['MemberList']
for member in MemberList:
self.GroupMemeberList.append(member)
return name
def getUserRemarkName(self, id):
name = '未知群' if id[:2] == '@@' else '陌生人'
if id == self.User['UserName']:
return self.User['NickName'] # 自己
if id[:2] == '@@':
# 群
name = self.getGroupName(id)
else:
# 特殊账号
for member in self.SpecialUsersList:
if member['UserName'] == id:
name = member['RemarkName'] if member[
'RemarkName'] else member['NickName']
# 公众号或服务号
for member in self.PublicUsersList:
if member['UserName'] == id:
name = member['RemarkName'] if member[
'RemarkName'] else member['NickName']
# 直接联系人
for member in self.ContactList:
if member['UserName'] == id:
name = member['RemarkName'] if member[
'RemarkName'] else member['NickName']
# 群友
for member in self.GroupMemeberList:
if member['UserName'] == id:
name = member['DisplayName'] if member[
'DisplayName'] else member['NickName']
if name == '未知群' or name == '陌生人':
logging.debug(id)
return name
def getUSerID(self, name):
for member in self.MemberList:
if name == member['RemarkName'] or name == member['NickName']:
return member['UserName']
return None
def _showMsg(self, message):
srcName = None
dstName = None
groupName = None
content = None
msgtype = message['raw_msg']['MsgType']
msg = message
logging.debug(msg)
if msg['raw_msg']:
srcName = self.getUserRemarkName(msg['raw_msg']['FromUserName'])
dstName = self.getUserRemarkName(msg['raw_msg']['ToUserName'])
content = msg['raw_msg']['Content'].replace('<', '<').replace('>', '>')
message_id = msg['raw_msg']['MsgId']
# 地理位置消息
if content.find('http://weixin.qq.com/cgi-bin/redirectforward?args=') != -1:
data = self._get(content)
if data == '':
return
data.decode('gbk').encode('utf-8')
pos = self._searchContent('title', data, 'xml')
temp = self._get(content)
if temp == '':
return
tree = html.fromstring(temp)
url = tree.xpath('//html/body/div/img')[0].attrib['src']
for item in urlparse(url).query.split('&'):
if item.split('=')[0] == 'center':
loc = item.split('=')[-1:]
content = '%s 发送了一个 位置消息 - 我在 [%s](%s) @ %s]' % (
srcName, pos, url, loc)
# 文件传输助手
if msg['raw_msg']['ToUserName'] == 'filehelper':
dstName = '文件传输助手'
if msg['raw_msg']['FromUserName'][:2] == '@@':
# 接收到来自群的消息
if ":<br/>" in content:
[people, content] = content.split(':<br/>', 1)
groupName = srcName
srcName = self.getUserRemarkName(people)
dstName = 'GROUP'
else:
groupName = srcName
srcName = 'SYSTEM'
elif msg['raw_msg']['ToUserName'][:2] == '@@':
# 自己发给群的消息
groupName = dstName
dstName = 'GROUP'
# 收到了红包
if content == '收到红包,请在手机上查看':
msg['message'] = content
# 指定了消息内容
if 'message' in list(msg.keys()):
content = msg['message']
if groupName is not None:
print('%s |%s| %s -> %s: %s' % (message_id, groupName.strip(), srcName.strip(), dstName.strip(), content.replace('<br/>', '\n')))
logging.info('%s |%s| %s -> %s: %s' % (message_id, groupName.strip(),
srcName.strip(), dstName.strip(), content.replace('<br/>', '\n')))
else:
print('%s %s -> %s: %s' % (message_id, srcName.strip(), dstName.strip(), content.replace('<br/>', '\n')))
logging.info('%s %s -> %s: %s' % (message_id, srcName.strip(),
dstName.strip(), content.replace('<br/>', '\n')))
# jiyang.li add mysql 存储文字消息
from wxbot_demo_py3.save_message import MysqlDao
dao = MysqlDao()
dao.openConn()
cursor = dao.conn.cursor()
msg = content.replace('<br/>', '\n') # 正则去除无用符号
sql = 'INSERT INTO wx_message (msgid, sender, groupName, message, msgType,receive_time) VALUES (%s, %s, %s, %s, %s, %s)'
if groupName is None:
cursor.execute(sql, (message_id, srcName.strip(), '', msg, msgtype, datetime.datetime.now()))
else:
cursor.execute(sql, (message_id, srcName.strip(), groupName.strip(), msg, msgtype, datetime.datetime.now()))
dao.conn.commit()
cursor.close()
dao.closeConn()
def handleMsg(self, r):
for msg in r['AddMsgList']:
print('[*] 你有新的消息,请注意查收')
# logging.debug('[*] 你有新的消息,请注意查收')
if self.DEBUG:
fn = 'msg' + str(int(random.random() * 1000)) + '.json'
with open(fn, 'w') as f:
f.write(json.dumps(msg))
print('[*] 该消息已储存到文件: ' + fn)
logging.debug('[*] 该消息已储存到文件: %s' % (fn))
msgType = msg['MsgType']
name = self.getUserRemarkName(msg['FromUserName'])
content = msg['Content'].replace('<', '<').replace('>', '>')
msgid = msg['MsgId']
if msgType == 1:
raw_msg = {'raw_msg': msg}
self._showMsg(raw_msg)
# 自己加的代码-------------------------------------------#
# 自己加的代码-------------------------------------------#
# todo 如果开启自动回复,则进行自定义回复
if self.autoReplyMode:
ans = '\n[李继阳的机器人自动回复]'
if self.webwxsendmsg(ans, msg['FromUserName']):
print('自动回复: ' + ans)
logging.info('自动回复: ' + ans)
else:
print('自动回复失败')
logging.info('自动回复失败')
# 图片消息
elif msgType == 3:
image = self.webwxgetmsgimg(msgid)
raw_msg = {'raw_msg': msg,
'message': '%s 发送了一张图片: %s' % (name, image)}
self._showMsg(raw_msg)
self._safe_open(image)
elif msgType == 34:
voice = self.webwxgetvoice(msgid)
raw_msg = {'raw_msg': msg,
'message': '%s 发了一段语音: %s' % (name, voice)}
self._showMsg(raw_msg)
self._safe_open(voice)
elif msgType == 42:
info = msg['RecommendInfo']
print('%s 发送了一张名片:' % name)
print('=========================')
print('= 昵称: %s' % info['NickName'])
print('= 微信号: %s' % info['Alias'])
print('= 地区: %s %s' % (info['Province'], info['City']))
print('= 性别: %s' % ['未知', '男', '女'][info['Sex']])
print('=========================')
raw_msg = {'raw_msg': msg, 'message': '%s 发送了一张名片: %s' % (
name.strip(), json.dumps(info))}
self._showMsg(raw_msg)
elif msgType == 47:
url = self._searchContent('cdnurl', content)
raw_msg = {'raw_msg': msg,
'message': '%s 发了一个动画表情,点击下面链接查看: %s' % (name, url)}
self._showMsg(raw_msg)
self._safe_open(url)
elif msgType == 49:
appMsgType = defaultdict(lambda: "")
appMsgType.update({5: '链接', 3: '音乐', 7: '微博'})
print('%s 分享了一个%s:' % (name, appMsgType[msg['AppMsgType']]))
print('=========================')
print('= 标题: %s' % msg['FileName'])
print('= 描述: %s' % self._searchContent('des', content, 'xml'))
print('= 链接: %s' % msg['Url'])
print('= 来自: %s' % self._searchContent('appname', content, 'xml'))
print('=========================')
card = {
'title': msg['FileName'],
'description': self._searchContent('des', content, 'xml'),
'url': msg['Url'],
'appname': self._searchContent('appname', content, 'xml')
}
raw_msg = {'raw_msg': msg, 'message': '%s 分享了一个%s: %s' % (
name, appMsgType[msg['AppMsgType']], json.dumps(card))}
self._showMsg(raw_msg)
elif msgType == 51:
raw_msg = {'raw_msg': msg, 'message': '[*] 成功获取联系人信息'}
self._showMsg(raw_msg)
elif msgType == 62:
video = self.webwxgetvideo(msgid)
raw_msg = {'raw_msg': msg,
'message': '%s 发了一段小视频: %s' % (name, video)}
self._showMsg(raw_msg)
self._safe_open(video)
#
elif msgType == 10002:
raw_msg = {'raw_msg': msg, 'message': '%s 撤回了一条消息' % name}
self._showMsg(raw_msg)
# todo 群聊下,当有人撤回消息时,保留并转发到此群
if msg['FromUserName'][:2] == '@@':
if ":<br/>" in content:
[people, content] = content.split(':<br/>', 1)
srcName = self.getUserRemarkName(people)
else:
srcName = 'SYSTEM'
doc = xml.dom.minidom.parseString(content)
root = doc.documentElement
revokemsg_id = root.getElementsByTagName("msgid")[0].firstChild.data
dao = MysqlDao()
dao.openConn()
cursor = dao.conn.cursor()
sql = 'SELECT message,msgType FROM wx_message WHERE msgid=%s'
cursor.execute(sql, revokemsg_id)
result = cursor.fetchone()
dao.conn.commit()
cursor.close()
dao.closeConn()
if result[1] is not None:
if result[1] == 1:
self.sendMsg2Group(msg['FromUserName'], srcName + "撤回了->[" + result[0] + "]")
if result[1] == 3:
self.sendMsg2Group(msg['FromUserName'], srcName + "撤回了一张图片")
dirName = os.path.join(self.saveFolder, self.saveSubFolders['webwxgetmsgimg'])
filename = "img_"+revokemsg_id+".jpg"
fn = os.path.join(dirName, filename)
# PIL set water mark
self.set_watermark(fn, srcName+"撤回的图片"+datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
# resend image to group with my account
self.sendImg2Group(self.User['UserName'], msg['FromUserName'], os.path.join(dirName, "revoke", filename))
else:
print("msg")
else:
logging.debug('[*] 该消息类型为: %d,可能是表情,图片, 链接或红包: %s' %
(msg['MsgType'], json.dumps(msg)))
raw_msg = {
'raw_msg': msg, 'message': '[*] 该消息类型为: %d,可能是表情,图片, 链接或红包' % msg['MsgType']}
self._showMsg(raw_msg)
def listenMsgMode(self):
print('[*] 进入消息监听模式 ... 成功')
logging.debug('[*] 进入消息监听模式 ... 成功')
self._run('[*] 进行同步线路测试 ... ', self.testsynccheck)
playWeChat = 0
redEnvelope = 0
while True:
self.lastCheckTs = time.time()
[retcode, selector] = self.synccheck()
if self.DEBUG:
print('retcode: %s, selector: %s' % (retcode, selector))
logging.debug('retcode: %s, selector: %s' % (retcode, selector))
if retcode == '1100':
print('[*] 你在手机上登出了微信,债见')
logging.debug('[*] 你在手机上登出了微信,债见')
break
if retcode == '1101':
print('[*] 你在其他地方登录了 WEB 版微信,债见')
logging.debug('[*] 你在其他地方登录了 WEB 版微信,债见')
break
elif retcode == '0':
if selector == '2':
r = self.webwxsync()
if r is not None:
self.handleMsg(r)
elif selector == '6':
# TODO 有bug
redEnvelope += 1
# print('[*] 收到疑似红包消息 %d 次' % redEnvelope)
# logging.debug('[*] 收到疑似红包消息 %d 次' % redEnvelope)
elif selector == '7':
playWeChat += 1
print('[*] 你在手机上玩微信被我发现了 %d 次' % playWeChat)
logging.debug('[*] 你在手机上玩微信被我发现了 %d 次' % playWeChat)
r = self.webwxsync()
elif selector == '0':
time.sleep(1)
if (time.time() - self.lastCheckTs) <= 20:
time.sleep(time.time() - self.lastCheckTs)
def sendMsg2Group(self, id, word, isfile=False):
if id:
if isfile:
name = ''
with open(word, 'r') as f:
for line in f.readlines():
line = line.replace('\n', '')
self._echo('-> ' + name + ': ' + line)
if self.webwxsendmsg(line, id):
print(' [成功]')
else:
print(' [失败]')
time.sleep(1)
else:
if self.webwxsendmsg(word, id):
print('[*] 消息发送成功')
logging.debug('[*] 消息发送成功')
else:
print('[*] 消息发送失败')
logging.debug('[*] 消息发送失败')
else:
print('[*] 此群ID不存在')
logging.debug('[*] 此群ID不存在')
def sendMsg(self, name, word, isfile=False):
id = self.getUSerID(name)
if id:
if isfile:
with open(word, 'r') as f:
for line in f.readlines():
line = line.replace('\n', '')
self._echo('-> ' + name + ': ' + line)
if self.webwxsendmsg(line, id):
print(' [成功]')
else:
print(' [失败]')
time.sleep(1)
else:
if self.webwxsendmsg(word, id):
print('[*] 消息发送成功')
logging.debug('[*] 消息发送成功')
else:
print('[*] 消息发送失败')
logging.debug('[*] 消息发送失败')
else:
print('[*] 此用户不存在')
logging.debug('[*] 此用户不存在')
def sendMsgToAll(self, word):
for contact in self.ContactList:
name = contact['RemarkName'] if contact[
'RemarkName'] else contact['NickName']
id = contact['UserName']
self._echo('-> ' + name + ': ' + word)
if self.webwxsendmsg(word, id):
print(' [成功]')
else:
print(' [失败]')
time.sleep(1)
def sendImg(self, name, file_name):
response = self.webwxuploadmedia(file_name)
media_id = ""
if response is not None:
media_id = response['MediaId']
user_id = self.getUSerID(name)
response = self.webwxsendmsgimg(user_id, media_id)
def sendImg2Group(self, self_id, group_id, file_name):
response = self.webwxuploadmedia(file_name, self_id, group_id)
media_id = ""
if response is not None:
media_id = response['MediaId']
response = self.webwxsendmsgimg(group_id, media_id)
def sendEmotion(self, name, file_name):
response = self.webwxuploadmedia(file_name)
media_id = ""
if response is not None:
media_id = response['MediaId']
user_id = self.getUSerID(name)
response = self.webwxsendmsgemotion(user_id, media_id)
# 给图片添加水印
def set_watermark(self, image_file, text):
img = Image.open(image_file)
(img_x, img_y) = img.size
basename = os.path.basename(image_file)
# 文字字体大小根据图片分辨率动态调整 图片分辨率过小的情况下依然会导致水印显示不完整
fontSize = 1
font = ImageFont.truetype("yahei.ttf", fontSize, encoding='unic')
while font.getsize(text)[0] < 0.3 * img_x:
fontSize = fontSize + 1
font = ImageFont.truetype("yahei.ttf", fontSize, encoding='unic')
draw = ImageDraw.Draw(img)
draw.text((int(img_x / 5), 0), text, (100, 100, 0), font=font)
newdir = os.path.join("saved", "msgimgs", "revoke")
if not os.path.exists(newdir):
os.mkdir(newdir)
img.save(newdir + '/' + basename, 'jpeg')
@catchKeyboardInterrupt
def start(self):
self._echo('[*] wechat for web ... starting')
print()
logging.debug('[*] wechat for web ... starting')
while True:
self._run('[*] get uuid ... ', self.getUUID)
self._echo('[*] get qrcode ... 成功')
print()
logging.debug('[*] wechat for web ... starting')
self.genQRCode()
print('[*] 请使用微信扫描二维码以登录 ... ')
# 轮询获取登录结果
if not self.waitForLogin():
continue
print('[*] 请在手机上点击确认以登录 ... ')
if not self.waitForLogin(0):
continue
break
self._run('[*] 正在登录 ... ', self.login)
self._run('[*] 微信初始化 ... ', self.webwxinit)
self._run('[*] 开启状态通知 ... ', self.webwxstatusnotify)
self._run('[*] 获取联系人 ... ', self.webwxgetcontact)
self._echo('[*] 应有 %s 个联系人,读取到联系人 %d 个' %
(self.MemberCount, len(self.MemberList)))
print()
self._echo('[*] 共有 %d 个群 | %d 个直接联系人 | %d 个特殊账号 | %d 公众号或服务号' % (len(self.GroupList),
len(self.ContactList), len(self.SpecialUsersList), len(self.PublicUsersList)))
print()
self._run('[*] 获取群 ... ', self.webwxbatchgetcontact)
logging.debug('[*] 微信网页版 ... 开动')
if self.DEBUG:
print(self)
logging.debug(self)
if self.interactive and input('[*] 是否开启自动回复模式(y/n): ') == 'y':
self.autoReplyMode = True
print('[*] 自动回复模式 ... 开启')
logging.debug('[*] 自动回复模式 ... 开启')
else:
print('[*] 自动回复模式 ... 关闭')
logging.debug('[*] 自动回复模式 ... 关闭')
if sys.platform.startswith('win'):
import _thread
_thread.start_new_thread(self.listenMsgMode())
else:
listenProcess = multiprocessing.Process(target=self.listenMsgMode)
listenProcess.start()
while True:
text = input('')
if text == 'quit':
listenProcess.terminate()
print('[*] 退出微信')
logging.debug('[*] 退出微信')
exit()
elif text[:2] == '->':
[name, word] = text[2:].split(':')
if name == 'all':
self.sendMsgToAll(word)
else:
self.sendMsg(name, word)
elif text[:3] == 'm->':
[name, file] = text[3:].split(':')
self.sendMsg(name, file, True)
elif text[:3] == 'f->':
print('发送文件')
logging.debug('发送文件')
elif text[:3] == 'i->':
print('发送图片')
[name, file_name] = text[3:].split(':')
self.sendImg(name, file_name)
logging.debug('发送图片')
elif text[:3] == 'e->':
print('发送表情')
[name, file_name] = text[3:].split(':')
self.sendEmotion(name, file_name)
logging.debug('发送表情')
def _safe_open(self, path):
if self.autoOpen:
if platform.system() == "Linux":
os.system("xdg-open %s &" % path)
else:
os.system('open %s &' % path)
def _run(self, str, func, *args):
self._echo(str)
if func(*args):
print('成功')
logging.debug('%s... 成功' % (str))
else:
print('失败\n[*] 退出程序')
logging.debug('%s... 失败' % (str))
logging.debug('[*] 退出程序')
exit()
def _echo(self, str):
sys.stdout.write(str)
sys.stdout.flush()
def _printQR(self, mat):
for i in mat:
BLACK = '\033[40m \033[0m'
WHITE = '\033[47m \033[0m'
print(''.join([BLACK if j else WHITE for j in i]))
def _str2qr(self, str):
print(str)
qr = qrcode.QRCode()
qr.border = 1
qr.add_data(str)
qr.make()
# img = qr.make_image()
# img.save("qrcode.png")
#mat = qr.get_matrix()
#self._printQR(mat) # qr.print_tty() or qr.print_ascii()
qr.print_ascii(invert=True)
def _transcoding(self, data):
if not data:
return data
result = None
if type(data) == str:
result = data
elif type(data) == str:
result = data.decode('utf-8')
return result
def _get(self, url: object, api: object = None, timeout: object = None) -> object:
request = urllib.request.Request(url=url)
request.add_header('Referer', 'https://wx.qq.com/')
if api == 'webwxgetvoice':
request.add_header('Range', 'bytes=0-')
if api == 'webwxgetvideo':
request.add_header('Range', 'bytes=0-')
if api == 'webwxgetmsgimg':
request.add_header('Range', 'bytes=0-')
try:
response = urllib.request.urlopen(request, timeout=timeout) if timeout else urllib.request.urlopen(request)
if api == 'webwxgetvoice' or api == 'webwxgetvideo' or api == 'webwxgetmsgimg':
data = response.read()
else:
data = response.read().decode('utf-8')
logging.debug(url)
return data
except urllib.error.HTTPError as e:
logging.error('HTTPError = ' + str(e.code))
except urllib.error.URLError as e:
logging.error('URLError = ' + str(e.reason))
except http.client.HTTPException as e:
logging.error('HTTPException')
except timeout_error as e:
pass
except ssl.CertificateError as e:
pass
except Exception:
import traceback
logging.error('generic exception: ' + traceback.format_exc())
return ''
def _post(self, url: object, params: object, jsonfmt: object = True) -> object:
if jsonfmt:
data = (json.dumps(params)).encode()
request = urllib.request.Request(url=url, data=data)
request.add_header(
'ContentType', 'application/json; charset=UTF-8')
else:
request = urllib.request.Request(url=url, data=urllib.parse.urlencode(params).encode(encoding='utf-8'))
try:
response = urllib.request.urlopen(request)
data = response.read()
if jsonfmt:
return json.loads(data.decode('utf-8') )#object_hook=_decode_dict)
return data
except urllib.error.HTTPError as e:
logging.error('HTTPError = ' + str(e.code))
except urllib.error.URLError as e:
logging.error('URLError = ' + str(e.reason))
except http.client.HTTPException as e:
logging.error('HTTPException')
except Exception:
import traceback
logging.error('generic exception: ' + traceback.format_exc())
return ''
def _xiaodoubi(self, word):
url = 'http://www.xiaodoubi.com/bot/chat.php'
try:
r = requests.post(url, data={'chat': word})
return r.content
except:
return "让我一个人静静 T_T..."
def _simsimi(self, word):
key = ''
url = 'http://sandbox.api.simsimi.com/request.p?key=%s&lc=ch&ft=0.0&text=%s' % (
key, word)
r = requests.get(url)
ans = r.json()
if ans['result'] == '100':
return ans['response']
else:
return '你在说什么,风太大听不清列'
def _searchContent(self, key, content, fmat='attr'):
if fmat == 'attr':
pm = re.search(key + '\s?=\s?"([^"<]+)"', content)
if pm:
return pm.group(1)
elif fmat == 'xml':
pm = re.search('<{0}>([^<]+)</{0}>'.format(key), content)
if not pm:
pm = re.search(
'<{0}><\!\[CDATA\[(.*?)\]\]></{0}>'.format(key), content)
if pm:
return pm.group(1)
return '未知'
class UnicodeStreamFilter:
def __init__(self, target):
self.target = target
self.encoding = 'utf-8'
self.errors = 'replace'
self.encode_to = self.target.encoding
def write(self, s):
if type(s) == str:
s = s.encode().decode('utf-8')
s = s.encode(self.encode_to, self.errors).decode(self.encode_to)
self.target.write(s)
def flush(self):
self.target.flush()
if sys.stdout.encoding == 'cp936':
sys.stdout = UnicodeStreamFilter(sys.stdout)
if __name__ == '__main__':
logger = logging.getLogger(__name__)
if not sys.platform.startswith('win'):
import coloredlogs
coloredlogs.install(level='DEBUG')
webwx = WebWeixin()
webwx.start()
|
test_filesink_compression.py
|
import datetime
import os
import sys
import threading
import time
import pytest
import loguru
from loguru import logger
@pytest.mark.parametrize(
"compression", ["gz", "bz2", "zip", "xz", "lzma", "tar", "tar.gz", "tar.bz2", "tar.xz"]
)
def test_compression_ext(tmpdir, compression):
i = logger.add(str(tmpdir.join("file.log")), compression=compression)
logger.remove(i)
assert len(tmpdir.listdir()) == 1
assert tmpdir.join("file.log.%s" % compression).check(exists=1)
def test_compression_function(tmpdir):
def compress(file):
os.replace(file, file + ".rar")
i = logger.add(str(tmpdir.join("file.log")), compression=compress)
logger.remove(i)
assert len(tmpdir.listdir()) == 1
assert tmpdir.join("file.log.rar").check(exists=1)
@pytest.mark.parametrize("mode", ["a", "a+", "w", "x"])
def test_compression_at_rotation(tmpdir, mode):
logger.add(
str(tmpdir.join("file.log")), format="{message}", rotation=0, compression="gz", mode=mode
)
logger.debug("After compression")
files = sorted(tmpdir.listdir())
assert len(files) == 2
assert files[0].fnmatch(
"file.{0}-{1}-{1}_{1}-{1}-{1}_{2}.log.gz".format("[0-9]" * 4, "[0-9]" * 2, "[0-9]" * 6)
)
assert files[1].basename == "file.log"
assert files[1].read() == "After compression\n"
@pytest.mark.parametrize("mode", ["a", "a+", "w", "x"])
def test_compression_at_remove_without_rotation(tmpdir, mode):
i = logger.add(str(tmpdir.join("file.log")), compression="gz", mode=mode)
logger.debug("test")
logger.remove(i)
assert len(tmpdir.listdir()) == 1
assert tmpdir.join("file.log.gz").check(exists=1)
@pytest.mark.parametrize("mode", ["a", "a+", "w", "x"])
def test_no_compression_at_remove_with_rotation(tmpdir, mode):
i = logger.add(str(tmpdir.join("test.log")), compression="gz", rotation="100 MB", mode=mode)
logger.debug("test")
logger.remove(i)
assert len(tmpdir.listdir()) == 1
assert tmpdir.join("test.log").check(exists=1)
def test_rename_existing_with_creation_time(monkeypatch, tmpdir):
def creation_time(filepath):
assert os.path.isfile(filepath)
assert os.path.basename(filepath) == "test.log.tar.gz"
return datetime.datetime(2018, 1, 1, 0, 0, 0, 0).timestamp()
i = logger.add(str(tmpdir.join("test.log")), compression="tar.gz")
logger.debug("test")
logger.remove(i)
j = logger.add(str(tmpdir.join("test.log")), compression="tar.gz")
logger.debug("test")
monkeypatch.setattr(loguru._file_sink, "get_ctime", creation_time)
logger.remove(j)
assert len(tmpdir.listdir()) == 2
assert tmpdir.join("test.log.tar.gz").check(exists=1)
assert tmpdir.join("test.2018-01-01_00-00-00_000000.log.tar.gz").check(exists=1)
def test_renaming_compression_dest_exists(monkeypatch, monkeypatch_date, tmpdir):
date = (2019, 1, 2, 3, 4, 5, 6)
timestamp = datetime.datetime(*date).timestamp()
monkeypatch_date(*date)
monkeypatch.setattr(loguru._file_sink, "get_ctime", lambda _: timestamp)
for i in range(4):
logger.add(str(tmpdir.join("rotate.log")), compression=".tar.gz", format="{message}")
logger.info(str(i))
logger.remove()
assert len(tmpdir.listdir()) == 4
assert tmpdir.join("rotate.log.tar.gz").check(exists=1)
assert tmpdir.join("rotate.2019-01-02_03-04-05_000006.log.tar.gz").check(exists=1)
assert tmpdir.join("rotate.2019-01-02_03-04-05_000006.2.log.tar.gz").check(exists=1)
assert tmpdir.join("rotate.2019-01-02_03-04-05_000006.3.log.tar.gz").check(exists=1)
def test_renaming_compression_dest_exists_with_time(monkeypatch, monkeypatch_date, tmpdir):
date = (2019, 1, 2, 3, 4, 5, 6)
timestamp = datetime.datetime(*date).timestamp()
monkeypatch_date(*date)
monkeypatch.setattr(loguru._file_sink, "get_ctime", lambda _: timestamp)
for i in range(4):
logger.add(str(tmpdir.join("rotate.{time}.log")), compression=".tar.gz", format="{message}")
logger.info(str(i))
logger.remove()
assert len(tmpdir.listdir()) == 4
assert tmpdir.join("rotate.2019-01-02_03-04-05_000006.log.tar.gz").check(exists=1)
assert tmpdir.join(
"rotate.2019-01-02_03-04-05_000006.2019-01-02_03-04-05_000006.log.tar.gz"
).check(exists=1)
assert tmpdir.join(
"rotate.2019-01-02_03-04-05_000006.2019-01-02_03-04-05_000006.2.log.tar.gz"
).check(exists=1)
assert tmpdir.join(
"rotate.2019-01-02_03-04-05_000006.2019-01-02_03-04-05_000006.3.log.tar.gz"
).check(exists=1)
def test_compression_use_renamed_file_after_rotation(tmpdir):
compressed_file = None
def compression(filepath):
nonlocal compressed_file
compressed_file = filepath
def rotation(message, _):
return message.record["extra"].get("rotate", False)
filepath = tmpdir.join("test.log")
logger.add(str(filepath), format="{message}", compression=compression, rotation=rotation)
logger.info("Before")
logger.bind(rotate=True).info("Rotation")
logger.info("After")
assert compressed_file != str(filepath)
assert open(compressed_file, "r").read() == "Before\n"
assert filepath.read() == "Rotation\nAfter\n"
def test_threaded_compression_after_rotation(tmpdir):
thread = None
def rename(filepath):
time.sleep(1)
os.rename(filepath, str(tmpdir.join("test.log.mv")))
def compression(filepath):
nonlocal thread
thread = threading.Thread(target=rename, args=(filepath,))
thread.start()
def rotation(message, _):
return message.record["extra"].get("rotate", False)
logger.add(
str(tmpdir.join("test.log")), format="{message}", compression=compression, rotation=rotation
)
logger.info("Before")
logger.bind(rotate=True).info("Rotation")
logger.info("After")
thread.join()
assert tmpdir.join("test.log").read() == "Rotation\nAfter\n"
assert tmpdir.join("test.log.mv").read() == "Before\n"
@pytest.mark.parametrize("delay", [True, False])
def test_exception_during_compression_at_rotation(tmpdir, capsys, delay):
raising = True
def failing_compression(file):
nonlocal raising
if raising:
raising = False
raise Exception("Compression error")
logger.add(
str(tmpdir.join("test.log")),
format="{message}",
compression=failing_compression,
rotation=0,
catch=True,
delay=delay,
)
logger.debug("AAA")
logger.debug("BBB")
files = sorted(tmpdir.listdir())
out, err = capsys.readouterr()
assert len(files) == 3
assert [file.read() for file in files] == ["", "", "BBB\n"]
assert out == ""
assert err.count("Logging error in Loguru Handler") == 1
assert err.count("Exception: Compression error") == 1
@pytest.mark.parametrize("delay", [True, False])
def test_exception_during_compression_at_rotation_not_caught(tmpdir, capsys, delay):
raising = True
def failing_compression(file):
nonlocal raising
if raising:
raising = False
raise Exception("Compression error")
logger.add(
str(tmpdir.join("test.log")),
format="{message}",
compression=failing_compression,
rotation=0,
catch=False,
delay=delay,
)
with pytest.raises(Exception, match="Compression error"):
logger.debug("AAA")
logger.debug("BBB")
files = sorted(tmpdir.listdir())
out, err = capsys.readouterr()
assert len(files) == 3
assert [file.read() for file in files] == ["", "", "BBB\n"]
assert out == err == ""
@pytest.mark.parametrize("delay", [True, False])
def test_exception_during_compression_at_remove(tmpdir, capsys, delay):
raising = True
def failing_compression(file):
nonlocal raising
if raising:
raising = False
raise Exception("Compression error")
i = logger.add(
str(tmpdir.join("test.log")),
format="{message}",
compression=failing_compression,
catch=True,
delay=delay,
)
logger.debug("AAA")
with pytest.raises(Exception, match=r"Compression error"):
logger.remove(i)
logger.debug("Nope")
files = sorted(tmpdir.listdir())
out, err = capsys.readouterr()
assert len(files) == 1
assert tmpdir.join("test.log").read() == "AAA\n"
assert out == err == ""
@pytest.mark.parametrize("compression", [0, True, os, object(), {"zip"}])
def test_invalid_compression(compression):
with pytest.raises(TypeError):
logger.add("test.log", compression=compression)
@pytest.mark.parametrize("compression", ["rar", ".7z", "tar.zip", "__dict__"])
def test_unknown_compression(compression):
with pytest.raises(ValueError):
logger.add("test.log", compression=compression)
@pytest.mark.parametrize("ext", ["gz", "tar.gz"])
def test_gzip_module_unavailable(ext, monkeypatch):
monkeypatch.setitem(sys.modules, "gzip", None)
with pytest.raises(ImportError):
logger.add("test.log", compression=ext)
@pytest.mark.parametrize("ext", ["bz2", "tar.bz2"])
def test_bz2_module_unavailable(ext, monkeypatch):
monkeypatch.setitem(sys.modules, "bz2", None)
with pytest.raises(ImportError):
logger.add("test.log", compression=ext)
@pytest.mark.parametrize("ext", ["xz", "lzma", "tar.xz"])
def test_lzma_module_unavailable(ext, monkeypatch):
monkeypatch.setitem(sys.modules, "lzma", None)
with pytest.raises(ImportError):
logger.add("test.log", compression=ext)
@pytest.mark.parametrize("ext", ["tar", "tar.gz", "tar.bz2", "tar.xz"])
def test_tarfile_module_unavailable(ext, monkeypatch):
monkeypatch.setitem(sys.modules, "tarfile", None)
with pytest.raises(ImportError):
logger.add("test.log", compression=ext)
@pytest.mark.parametrize("ext", ["zip"])
def test_zipfile_module_unavailable(ext, monkeypatch):
monkeypatch.setitem(sys.modules, "zipfile", None)
with pytest.raises(ImportError):
logger.add("test.log", compression=ext)
|
click_count.py
|
from pynput import mouse
import wx
import time
import threading
class Singleton:
_unique_instance = None
elapsed_time = 0
click_count = -1
active_state = False
@classmethod
def get_instance(cls):
if not cls._unique_instance:
cls._unique_instance = cls()
return cls._unique_instance
def set_time(self,time):
self.elapsed_time = time
#print("set_time = {}".format(self.elapsed_time))
def count_increment(self):
self.click_count += 1
print(self.click_count)
class AppFrame(wx.Frame):
start_time = time.time()
current_count = 0
thread = None
def __init__(self):
wx.Frame.__init__( self, None, title="click count",size=(250, 100))
frame = wx.Frame(None, -1, "click count")
self.SetTransparent(255)
self.start_time = time.time()
main_panel = wx.Panel(self)
date_time = wx.DateTime.Now()
self.label_1 = wx.StaticText(main_panel, label="elapsed", pos=(20, 10))
self.label_2 = wx.StaticText(main_panel, label="time:{0}".format(0) + "[sec]", pos=(20,10))
self.label_3 = wx.StaticText(main_panel, label="click_count", pos=(20, 10))
self.label_4 = wx.StaticText(main_panel, label="count:{0}".format(0), pos=(20,10))
button_1 = wx.Button(main_panel, wx.ID_ANY, 'Start')
button_2 = wx.Button(main_panel, wx.ID_ANY, 'Stop')
button_1.Bind(wx.EVT_BUTTON, self.start)
button_2.Bind(wx.EVT_BUTTON, self.stop)
layout = wx.GridSizer(rows=3, cols=2, gap=(2, 2))
layout.Add(self.label_1, 0, wx.GROW)
layout.Add(self.label_2, 0, wx.GROW)
layout.Add(self.label_3, 0, wx.GROW)
layout.Add(self.label_4, 0, wx.GROW)
layout.Add(button_1, 0, wx.GROW)
layout.Add(button_2, 0, wx.GROW)
main_panel.SetSizer(layout)
def initialize(self):
self.start_time = time.time()
object = Singleton.get_instance()
object.click_count = -1
object.elapsed_time = 0
object.active_state = False
self.label_2.SetLabel("time:{0}".format(0) + "[sec]")
def start(self,event):
self.initialize()
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.update)
self.timer.Start(1000)
object = Singleton.get_instance()
object.active_state = True
if self.thread == None:
self.thread = threading.Thread(target=click_liten)
self.thread.start()
def stop(self,event):
self.timer.Stop()
def update(self, event):
current_label = self.label_2.GetLabel()
new_time = time.time()
delta = new_time - self.start_time
delta = int(delta)
object = Singleton.get_instance()
object.set_time(delta)
self.label_2.SetLabel("time:{0}".format(delta) + "[sec]")
new_count = object.click_count
if self.current_count != new_count:
self.current_count = new_count
self.label_4.SetLabel("count:{0}".format(new_count))
def on_click(x, y, button, pressed):
print('{0} at {1}'.format('Pressed' if pressed else 'Released',(int(x), int(y))))
if not pressed:
## Stop listener
object = Singleton.get_instance()
object.count_increment()
# return False
def click_liten():
object = Singleton.get_instance()
while(True):
if(object.active_state == False):
break
with mouse.Listener(on_click=on_click) as listener:
listener.join()
def make_view():
app = wx.App(False)
frame = AppFrame().Show(True)
app.MainLoop()
#object = Singleton.get_instance()
#while(True):
## if(object.active_state == False):
# break
# with mouse.Listener(on_click=on_click) as listener:
#print("tes")
# listener.join()
make_view()
|
p_bfgs.py
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Parallelized Limited-memory BFGS optimizer"""
from typing import Optional
import multiprocessing
import platform
import logging
import numpy as np
from scipy import optimize as sciopt
from qiskit.aqua import aqua_globals
from qiskit.aqua.utils.validation import validate_min
from .optimizer import Optimizer
logger = logging.getLogger(__name__)
# pylint: disable=invalid-name
class P_BFGS(Optimizer):
"""
Parallelized Limited-memory BFGS optimizer.
P-BFGS is a parallelized version of :class:`L_BFGS_B` with which it shares the same parameters.
P-BFGS can be useful when the target hardware is a quantum simulator running on a classical
machine. This allows the multiple processes to use simulation to potentially reach a minimum
faster. The parallelization may also help the optimizer avoid getting stuck at local optima.
Uses scipy.optimize.fmin_l_bfgs_b.
For further detail, please refer to
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_l_bfgs_b.html
"""
_OPTIONS = ['maxfun', 'factr', 'iprint']
# pylint: disable=unused-argument
def __init__(self,
maxfun: int = 1000,
factr: float = 10,
iprint: int = -1,
max_processes: Optional[int] = None) -> None:
r"""
Args:
maxfun: Maximum number of function evaluations.
factr : The iteration stops when (f\^k - f\^{k+1})/max{\|f\^k\|,
\|f\^{k+1}|,1} <= factr * eps, where eps is the machine precision,
which is automatically generated by the code. Typical values for
factr are: 1e12 for low accuracy; 1e7 for moderate accuracy;
10.0 for extremely high accuracy. See Notes for relationship to ftol,
which is exposed (instead of factr) by the scipy.optimize.minimize
interface to L-BFGS-B.
iprint: Controls the frequency of output. iprint < 0 means no output;
iprint = 0 print only one line at the last iteration; 0 < iprint < 99
print also f and \|proj g\| every iprint iterations; iprint = 99 print
details of every iteration except n-vectors; iprint = 100 print also the
changes of active set and final x; iprint > 100 print details of
every iteration including x and g.
max_processes: maximum number of processes allowed, has a min. value of 1 if not None.
"""
if max_processes:
validate_min('max_processes', max_processes, 1)
super().__init__()
for k, v in locals().items():
if k in self._OPTIONS:
self._options[k] = v
self._max_processes = max_processes
def get_support_level(self):
""" return support level dictionary """
return {
'gradient': Optimizer.SupportLevel.supported,
'bounds': Optimizer.SupportLevel.supported,
'initial_point': Optimizer.SupportLevel.required
}
def optimize(self, num_vars, objective_function, gradient_function=None,
variable_bounds=None, initial_point=None):
num_procs = multiprocessing.cpu_count() - 1
num_procs = \
num_procs if self._max_processes is None else min(num_procs, self._max_processes)
num_procs = num_procs if num_procs >= 0 else 0
if platform.system() == "Windows":
num_procs = 0
logger.warning("Using only current process. Multiple core use not supported in Windows")
queue = multiprocessing.Queue()
# bounds for additional initial points in case bounds has any None values
threshold = 2*np.pi
if variable_bounds is None:
variable_bounds = [(-threshold, threshold)] * num_vars
low = [(l if l is not None else -threshold) for (l, u) in variable_bounds]
high = [(u if u is not None else threshold) for (l, u) in variable_bounds]
def optimize_runner(_queue, _i_pt): # Multi-process sampling
_sol, _opt, _nfev = self._optimize(num_vars, objective_function,
gradient_function, variable_bounds, _i_pt)
_queue.put((_sol, _opt, _nfev))
# Start off as many other processes running the optimize (can be 0)
processes = []
for _ in range(num_procs):
i_pt = aqua_globals.random.uniform(low, high) # Another random point in bounds
p = multiprocessing.Process(target=optimize_runner, args=(queue, i_pt))
processes.append(p)
p.start()
# While the one _optimize in this process below runs the other processes will
# be running to. This one runs
# with the supplied initial point. The process ones have their own random one
sol, opt, nfev = self._optimize(num_vars, objective_function,
gradient_function, variable_bounds, initial_point)
for p in processes:
# For each other process we wait now for it to finish and see if it has
# a better result than above
p.join()
p_sol, p_opt, p_nfev = queue.get()
if p_opt < opt:
sol, opt = p_sol, p_opt
nfev += p_nfev
return sol, opt, nfev
def _optimize(self, num_vars, objective_function, gradient_function=None,
variable_bounds=None, initial_point=None):
super().optimize(num_vars, objective_function, gradient_function,
variable_bounds, initial_point)
approx_grad = bool(gradient_function is None)
sol, opt, info = sciopt.fmin_l_bfgs_b(objective_function, initial_point,
bounds=variable_bounds,
fprime=gradient_function,
approx_grad=approx_grad, **self._options)
return sol, opt, info['funcalls']
|
main.py
|
# python main.py \ --train_data=input.txt \ --eval_data=hrv_questions.txt \ --save_path=hrv_tmp/
import os
import sys
import threading
import time
import numpy as np
import tensorflow as tf
from tensorflow.models.embedding import gen_word2vec as word2vec
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model.")
flags.DEFINE_string(
"train_data", None,
"Training data. E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "Analogy questions. "
"See README.md for how to get 'questions-words.txt'.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 10,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.025, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 15,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 500,
"Numbers of training examples each step processes "
"(no minibatching).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 10,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_integer("min_count", 50,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy(b'france', b'paris', b'russia') and "
"model.nearby([b'proton', b'elephant', b'maxwell'])")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# Where to write out summaries.
self.save_path = FLAGS.save_path
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
def read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
def build_graph(self):
"""Build the model graph."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, current_epoch, total_words_processed,
examples, labels) = word2vec.skipgram(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
# Declare all variables we need.
# Input words embedding: [vocab_size, emb_dim]
w_in = tf.Variable(
tf.random_uniform(
[opts.vocab_size,
opts.emb_dim], -0.5 / opts.emb_dim, 0.5 / opts.emb_dim),
name="w_in")
# Global step: scalar, i.e., shape [].
w_out = tf.Variable(tf.zeros([opts.vocab_size, opts.emb_dim]), name="w_out")
# Global step: []
global_step = tf.Variable(0, name="global_step")
# Linear learning rate decay.
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001,
1.0 - tf.cast(total_words_processed, tf.float32) / words_to_train)
# Training nodes.
inc = global_step.assign_add(1)
with tf.control_dependencies([inc]):
train = word2vec.neg_train(w_in,
w_out,
examples,
labels,
lr,
vocab_count=opts.vocab_counts.tolist(),
num_negative_samples=opts.num_samples)
self._w_in = w_in
self._examples = examples
self._labels = labels
self._lr = lr
self._train = train
self.global_step = global_step
self._epoch = current_epoch
self._words = total_words_processed
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w", encoding="utf-8") as f:
for i in range(opts.vocab_size):
vocab_word = tf.compat.as_text(opts.vocab_words[i])
f.write("%s %d\n" % (vocab_word,
opts.vocab_counts[i]))
def build_eval_graph(self):
"""Build the evaluation graph."""
# Eval graph
opts = self._options
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._w_in, 1)
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of target and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, opts.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
# Properly initialize all variables.
tf.global_variables_initializer().run()
self.saver = tf.train.Saver()
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
workers = []
for _ in range(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time = initial_words, time.time()
while True:
time.sleep(5) # Reports our progress once a while.
(epoch, step, words, lr) = self._session.run(
[self._epoch, self.global_step, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f words/sec = %8.0f\r" % (epoch, step,
lr, rate),
end="")
sys.stdout.flush()
if epoch != initial_epoch:
break
for t in workers:
t.join()
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
try:
total = self._analogy_questions.shape[0]
except AttributeError as e:
raise AttributeError("Need to read analogy questions.")
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in range(sub.shape[0]):
for j in range(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total))
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
print(c)
break
print("unknown")
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in range(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance))
def save_input(self):
with open(self._options.train_data, "rb") as train_f, \
open(self._options.save_path + self._options.train_data, "wb") as train_f2:
for line in train_f:
line2 = b""
for word in line.strip().split(b" "):
if self._word2id.get(word.strip()) is None:
line2 += b"<unk> "
else:
line2 += word + b" "
train_f2.write(line2 + b"\n")
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print("--train_data --eval_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.device("/gpu:0"):
with tf.Graph().as_default(), tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) as session:
model = Word2Vec(opts, session)
model.read_analogies() # Read analogy questions
for _ in range(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session, os.path.join(opts.save_path, "model.ckpt"),
global_step=model.global_step)
model.save_input()
if FLAGS.interactive:
# E.g.,
# [0]: model.analogy(b'france', b'paris', b'russia')
# [1]: model.nearby([b'proton', b'elephant', b'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
|
mixed_compare.py
|
import logging
import multiprocessing
from multiprocessing.pool import ThreadPool
import threading
import time
logging.basicConfig()
logger = logging.getLogger("mixed_task_campare")
logger.setLevel(logging.DEBUG)
LOOP = 5000
NUM = 50000
elapsed_time = {}
def count(n):
while n > 0:
n = n - 1
time.sleep(0.1)
# Sequential execution
# logger.info('Sequential execution')
# start = time.time()
# for i in xrange(LOOP):
# count(NUM)
# stop = time.time()
# elapsed_time['Sequential'] = stop - start
# Threaded exeution
logger.info('Threaded exeution')
start = time.time()
threads = []
for i in xrange(LOOP):
thread = threading.Thread(target=count, args=(NUM,))
thread.start()
threads.append(thread)
for t in threads:
t.join()
stop = time.time()
elapsed_time['Threaded'] = stop - start
# Thread pool execution
logger.info('Thread pool execution')
start = time.time()
pool = ThreadPool(processes=200)
for i in xrange(LOOP):
pool.apply_async(count, (NUM,))
pool.close()
pool.join()
stop = time.time()
elapsed_time['ThreadPool(200)'] = stop - start
# Processed exeution
logger.info('Processed exeution')
start = time.time()
processes = []
for i in xrange(LOOP):
process = multiprocessing.Process(target=count, args=(NUM,))
process.start()
processes.append(process)
for p in processes:
p.join()
stop = time.time()
elapsed_time['Processed'] = stop - start
# Multiprocessing pool execution
logger.info('Multiprocessing pool(200) execution')
start = time.time()
pool = multiprocessing.Pool(processes=200)
for i in xrange(LOOP):
pool.apply_async(count, (NUM,))
pool.close()
pool.join()
stop = time.time()
elapsed_time['ProcessPool(200)'] = stop - start
# Multiprocessing pool plus thread pool execution
logger.info('Multiprocessing pool(2) plus thread pool(100) execution')
def threadpool_executor(processes=None, func=None, iterable=None):
threadpool = ThreadPool(processes=processes)
threadpool.map(func, iterable)
start = time.time()
cpus = multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=cpus)
for i in xrange(cpus):
pool.apply_async(threadpool_executor, (100, count, [NUM] * (LOOP / cpus)))
pool.close()
pool.join()
stop = time.time()
elapsed_time['MultiProcessingPool(2)_ThreadPool(100)'] = stop - start
# Compare performance
for key, value in sorted(elapsed_time.iteritems()):
logger.info("\t" + key + "\t" + str(value))
|
main.py
|
import threading
import sys
from flask import Flask, abort, request
from flask_limiter import Limiter
from yowsup.config.manager import ConfigManager
from yowsup.profile.profile import YowProfile
import sendclient
# Start yowsup thread
config = ConfigManager().load(sys.argv[1])
profile = YowProfile(config.phone, config)
stack = sendclient.YowsupSendStack(profile)
worker = threading.Thread(target=stack.start)
worker.setDaemon(True)
worker.start()
# Set up Flask app
app = Flask(__name__)
# Set up rate limits as a safety guard in case the PLC goes crazy
limiter = Limiter(
app, application_limits=["5 per minute"])
@app.route('/notifyGroup', methods=['POST'])
def notifyGroup():
if not request.json or not request.json.get('message'):
abort(400)
stack.send_message(request.json['message'])
return '{"status": "success"}'
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080, debug=True)
|
hfut_img.py
|
# -*- coding:utf-8 -*-
"""
抓取全校学生的照片
"""
from __future__ import unicode_literals
import logging
import os
import sys
import threading
import requests
import six
from hfut import Guest, XC, HF
from hfut.util import cal_term_code
# 文件保存路径
DIR_NAME = 'img'
# 起始年份
START_YEAR = 2012
# 结束年份
END_YEAR = 2015
# 校区
campus = XC
# 所有人都上的课程 # 军事训练
if campus == HF:
COURSE_CODE = '52000020'
else:
COURSE_CODE = '5200023B'
# 设置日志
logger = logging.Logger('hfut_img', level=logging.WARNING)
sh = logging.StreamHandler()
fh = logging.FileHandler('hfut_img.log', encoding='utf-8')
fmt = logging.Formatter('%(threadName)s %(levelname)s %(lineno)s行: - %(asctime)s\n\t %(message)s', '%d %H:%M')
sh.setFormatter(fmt)
fh.setFormatter(fmt)
logger.addHandler(sh)
logger.addHandler(fh)
logger.setLevel(logging.INFO)
# 初始化 session
shortcuts = Guest(campus)
# 初始化文件夹
def setup_dir():
if not os.path.isdir(DIR_NAME):
os.mkdir(DIR_NAME)
logger.info('成功创建目录 {}'.format(DIR_NAME))
for i in range(START_YEAR, END_YEAR + 1):
path = os.path.join(DIR_NAME, six.text_type(i))
if not os.path.isdir(path):
os.mkdir(path)
logger.info('成功创建目录 {}'.format(path))
# 下载照片
def fetch_img(term_code):
file_suffix = '.jpg'
stu_sum = 0
success_sum = 0
fail_sum = 0
error_sum = 0
exist_sum = 0
# 获取该学期的所有教学班
klass = shortcuts.search_course(term_code, COURSE_CODE)
if klass:
logger.info('{} 学期共有 {} 个教学班'.format(term_code, len(klass)))
for k in klass:
# 获取教学班学生
class_stus = shortcuts.get_class_students(term_code, COURSE_CODE, k['教学班号'])
if class_stus is None:
logger.critical('没有获取到 {} 学期的教学班'.format(term_code))
sys.exit(0)
stu_num = len(class_stus['学生'])
logger.info('{} 班共有 {} 名学生'.format(class_stus['班级名称'], stu_num))
stu_sum += stu_num
for stu in class_stus['学生']:
year = str(stu['学号'] // 1000000)
code = str(stu['学号'])
img_url = six.moves.urllib.parse.urljoin(shortcuts.session.host, ''.join(
['student/photo/', year, '/', code, file_suffix]))
sex = '男'
stu_name = stu['姓名']
if stu['姓名'].endswith('*'):
sex = '女'
stu_name = stu_name[:-1]
full_name = ''.join([code, '-', sex, '-', stu_name])
filename = os.path.join(DIR_NAME, year, ''.join([full_name, file_suffix]))
if os.path.isfile(filename):
logger.warning('{} 的照片已下载过'.format(full_name))
exist_sum += 1
continue
try:
res = requests.get(img_url)
if res.status_code == 200:
with open(filename, 'wb') as fp:
fp.write(res.content)
logger.info('下载 {} 的照片成功'.format(full_name))
success_sum += 1
elif res.status_code == 404:
logger.warning('下载 {} 的照片失败'.format(full_name))
fail_sum += 1
except Exception as e:
logger.error('下载 {} 的照片出错\n\t{}'.format(full_name, e))
error_sum += 1
logger.info('{} 学期共有 {} 名学生'.format(term_code, stu_sum))
logger.info('{} 学期下载完成,成功 {},失败 {},错误 {}, 已存在 {}'.format(
term_code, success_sum, fail_sum, error_sum, exist_sum))
else:
logger.critical('没有获取到第 {} 的教学班级'.format(term_code))
sys.exit(0)
if __name__ == '__main__':
setup_dir()
for year in range(START_YEAR, END_YEAR + 1):
term_code = cal_term_code(year)
t = threading.Thread(target=fetch_img, name=year, args=(term_code,))
t.start()
|
HiwinRA605_socket_ros_test_20190625190318.py
|
#!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import enum
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
point_data_flag = False
arm_mode_flag = False
speed_mode_flag = False
##------------class pos-------
class pos():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(req): ##接收策略端傳送位姿資料
global client_response,point_data_flag
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
client_response = client_response + 1
point_data_flag = True
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
arm_mode_flag = True
return(1)
##-------Arm Speed Mode------------###
def Speed_Mode(req): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = int('%s'%req.Speedmode)
speed_mode_flag = True
return(1)
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##-----------socket client--------
def socket_client():
global Arm_feedback,data
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(s.recv(1024))
socket_command()
socket_cmd.action= 5 ##切換初始mode狀態
s.send(data.encode('utf-8'))#socket傳送for python to translate str
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F 手臂為Ready狀態準備接收下一個運動指令
Arm_feedback = 0
socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T 手臂為忙碌狀態無法執行下一個運動指令
Arm_feedback = 1
socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
Arm_feedback = 6
socket_client_arm_state(Arm_feedback)
print("shutdown")
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
s.close()
def socket_command():
while(point_data_flag == True or arm_mode_flag == True or speed_mode_flag == True):
##---------------socket 傳輸手臂命令-----------------
#-------選擇模式--------
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
return(data)
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
def
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line
|
osc_server.py
|
"""OSC Servers that receive UDP packets and invoke handlers accordingly.
Use like this:
dispatcher = dispatcher.Dispatcher()
# This will print all parameters to stdout.
dispatcher.map("/bpm", print)
server = ForkingOSCUDPServer((ip, port), dispatcher)
server.serve_forever()
or run the server on its own thread:
server = ForkingOSCUDPServer((ip, port), dispatcher)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
...
server.shutdown()
Those servers are using the standard socketserver from the standard library:
http://docs.python.org/library/socketserver.html
Alternatively, the AsyncIOOSCUDPServer server can be integrated with an
asyncio event loop:
loop = asyncio.get_event_loop()
server = AsyncIOOSCUDPServer(server_address, dispatcher, loop)
server.serve()
loop.run_forever()
"""
import asyncio
import os
import socketserver
import time
from pythonosc import osc_bundle
from pythonosc import osc_message
from pythonosc import osc_packet
def _call_handlers_for_packet(data, dispatcher):
"""
This function calls the handlers registered to the dispatcher for
every message it found in the packet.
The process/thread granularity is thus the OSC packet, not the handler.
If parameters were registered with the dispatcher, then the handlers are
called this way:
handler('/address that triggered the message',
registered_param_list, osc_msg_arg1, osc_msg_arg2, ...)
if no parameters were registered, then it is just called like this:
handler('/address that triggered the message',
osc_msg_arg1, osc_msg_arg2, osc_msg_param3, ...)
"""
# Get OSC messages from all bundles or standalone message.
try:
packet = osc_packet.OscPacket(data)
for timed_msg in packet.messages:
now = time.time()
handlers = dispatcher.handlers_for_address(
timed_msg.message.address)
if not handlers:
continue
# If the message is to be handled later, then so be it.
if timed_msg.time > now:
time.sleep(timed_msg.time - now)
for handler in handlers:
if handler.args:
handler.callback(
timed_msg.message.address, handler.args, *timed_msg.message)
else:
handler.callback(timed_msg.message.address, *timed_msg.message)
except osc_packet.ParseError:
pass
class _UDPHandler(socketserver.BaseRequestHandler):
"""Handles correct UDP messages for all types of server.
Whether this will be run on its own thread, the server's or a whole new
process depends on the server you instanciated, look at their documentation.
This method is called after a basic sanity check was done on the datagram,
basically whether this datagram looks like an osc message or bundle,
if not the server won't even bother to call it and so no new
threads/processes will be spawned.
"""
def handle(self):
_call_handlers_for_packet(self.request[0], self.server.dispatcher)
def _is_valid_request(request):
"""Returns true if the request's data looks like an osc bundle or message."""
data = request[0]
return (
osc_bundle.OscBundle.dgram_is_bundle(data)
or osc_message.OscMessage.dgram_is_message(data))
class OSCUDPServer(socketserver.UDPServer):
"""Superclass for different flavors of OSCUDPServer"""
def __init__(self, server_address, dispatcher):
super().__init__(server_address, _UDPHandler)
self._dispatcher = dispatcher
def verify_request(self, request, client_address):
"""Returns true if the data looks like a valid OSC UDP datagram."""
return _is_valid_request(request)
@property
def dispatcher(self):
"""Dispatcher accessor for handlers to dispatch osc messages."""
return self._dispatcher
class BlockingOSCUDPServer(OSCUDPServer):
"""Blocking version of the UDP server.
Each message will be handled sequentially on the same thread.
Use this is you don't care about latency in your message handling or don't
have a multiprocess/multithread environment (really?).
"""
class ThreadingOSCUDPServer(socketserver.ThreadingMixIn, OSCUDPServer):
"""Threading version of the OSC UDP server.
Each message will be handled in its own new thread.
Use this when lightweight operations are done by each message handlers.
"""
if hasattr(os, "fork"):
class ForkingOSCUDPServer(socketserver.ForkingMixIn, OSCUDPServer):
"""Forking version of the OSC UDP server.
Each message will be handled in its own new process.
Use this when heavyweight operations are done by each message handlers
and forking a whole new process for each of them is worth it.
"""
class AsyncIOOSCUDPServer():
"""Asyncio version of the OSC UDP Server.
Each UDP message is handled by _call_handlers_for_packet, the same method as in the
OSCUDPServer family of blocking, threading, and forking servers
"""
def __init__(self, server_address, dispatcher, loop):
"""
:param server_address: tuple of (IP address to bind to, port)
:param dispatcher: a pythonosc.dispatcher.Dispatcher
:param loop: an asyncio event loop
"""
self._server_address = server_address
self._dispatcher = dispatcher
self._loop = loop
class _OSCProtocolFactory(asyncio.DatagramProtocol):
"""OSC protocol factory which passes datagrams to _call_handlers_for_packet"""
def __init__(self, dispatcher):
self.dispatcher = dispatcher
def datagram_received(self, data, unused_addr):
_call_handlers_for_packet(data, self.dispatcher)
def serve(self):
"""creates a datagram endpoint and registers it with our event loop"""
listen = self._loop.create_datagram_endpoint(
lambda: self._OSCProtocolFactory(self.dispatcher),
local_addr=self._server_address)
self._loop.run_until_complete(listen)
@property
def dispatcher(self):
return self._dispatcher
|
article_extractor2.py
|
import subprocess
from multiprocessing import Queue, Process, Value
import json
import psycopg2
from io import StringIO
from html.parser import HTMLParser
class MLStripper(HTMLParser):
def __init__(self):
super().__init__()
self.reset()
self.strict = False
self.convert_charrefs= True
self.text = StringIO()
def handle_data(self, d):
self.text.write(d)
def get_data(self):
return self.text.getvalue()
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
def extract_process(process_number, jobs_queue):
output_filename = 'working_dir/article_extractor_output/records_' + str(process_number) + '.txt'
output_file = open(output_filename, 'w')
connection = psycopg2.connect(user = 'postgres',
password = 'postgres',
host = '127.0.0.1',
port = 5432,
database = 'postgres')
connection.autocommit = True
query = '''
WITH RECURSIVE graph(id, name, main_category, depth, path, cycle) AS (
SELECT id, '', main_category, 0 depth, ARRAY[id], false
FROM fs_category
WHERE name = %s
UNION
SELECT c.id, c.name, c.main_category, g.depth + 1, path || c.id, c.id = ANY(path)
FROM graph g
INNER JOIN fs_category_relationship cr on g.id = cr.fs_category_child_id
INNER JOIN fs_category c on cr.fs_category_parent_id = c.id
WHERE NOT cycle and depth < 5
)
SELECT DISTINCT name FROM graph
WHERE main_category = true and depth = (SELECT min(depth) FROM graph WHERE main_category = TRUE)
limit 5
'''
while True:
line = jobs_queue.get()
if line:
page_json = json.loads(line)
cursor = connection.cursor()
categories = [cat.lower() for cat in page_json['categories']]
if len(categories) == 0:
continue
# Determine main categories
mainCategories = []
for category in categories:
cursor.execute(query, (category,))
if cursor.rowcount == 0:
continue
records = cursor.fetchall()
for record in records:
mainCategories.append(record[0])
if len(mainCategories) > 4:
break
if len(mainCategories) > 4:
break
cursor.close()
if len(mainCategories) == 0:
continue
# Write out the training records
trainingRecord = ''
for mainCategory in mainCategories:
trainingRecord += '__label__'
trainingRecord += mainCategory.replace(' ', '-')
trainingRecord += ' '
trainingRecord += strip_tags(page_json['text']).replace('\n', ' ')
trainingRecord += '\n'
output_file.write(trainingRecord)
else:
break
if (connection):
connection.close()
output_file.close()
if __name__ == '__main__':
print('Starting Article Extractor 2...')
categories_file = 'working_dir/wikiextractor_output_articles.bz2'
process_count = 12
maxsize = 10 * process_count
max_spool_length = 10000
spool_length = Value('i', 0, lock=False)
# initialize jobs queue
jobs_queue = Queue(maxsize=maxsize)
# start worker processes
workers = []
for i in range(process_count):
extractor = Process(target=extract_process, args=(i, jobs_queue,))
extractor.daemon = True
extractor.start()
workers.append(extractor)
for i, line in enumerate(subprocess.Popen(['bzcat'], stdin = open(categories_file), stdout = subprocess.PIPE).stdout):
if spool_length.value > max_spool_length:
# reduce to 10%
while spool_length.value > max_spool_length/10:
time.sleep(10)
jobs_queue.put(line) # goes to any available extract_process
for _ in workers:
jobs_queue.put(None)
for w in workers:
w.join()
print('Article Extractor 2 done.')
|
server.py
|
from http.server import BaseHTTPRequestHandler, HTTPServer
import re
import numpy as np
from urllib.parse import urlparse
from urllib.parse import parse_qs
from multiprocessing import Process
import multiprocessing
# from fuzzywuzzy import fuzz
# from fuzzywuzzy import process
from rapidfuzz import process
from rapidfuzz import fuzz
import re
import json
from collections import Counter
import sys
# import json
# records = json.loads(open("InventoryScrap.txt","r",encoding="utf-8").read())
#
# len(records)
#
# # things = list(map(lambda x: x["tags"], records))
# #%%
# # records = list(filter(lambda x: x["recordType"]=="object",records))
# # things = list(map(lambda x: x["tags"], records))
# records = list(filter(lambda x: x["RecordType"]=="object",records))
# things = list(map(lambda x: x["Tags"], records))
# # things2 = list(map(lambda x: x["Name"], records))
# things2 = list(map(lambda x: sum(map(lambda y: list(map(lambda z: z.lower().strip(), re.split(' |-|_',y))),x["Path"].split("\\")),[]), records))
import numpy as np
use_bert = False
def query(vec,embs,n=3):
# index = np.argmax(np.dot(embs,vec/np.linalg.norm(vec)))
if use_bert:
scores = np.dot(embs,vec/np.linalg.norm(vec))
else:
scores = np.dot(embs,vec[0]/np.linalg.norm(vec[0]))
# nonlocal scores
# scores = -np.linalg.norm(embs-vec,axis=1)
indices = np.argsort(scores)
# for i in indices[-n:][::-1]:
# scores1.append(scores[i])
# print(scores[i])
return scores,indices[-n:]
def queryParal(procid,vec,embs,n,return_dict):
scores,indices = query(vec,embs,n=n)
return_dict[procid] = scores,indices
if __name__ == "__main__":
# use_bert = True
if use_bert:
from sentence_transformers import SentenceTransformer
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
else:
from clip_embeddings import embed_text, embed_image
records = json.loads(open("new_inventory_index.txt","r",encoding="utf-8").read())
# records[0]
# len(records)
records = list(filter(lambda x: ("name" in x and x["name"] is not None) and ("tags" in x and x["tags"] is not None) and ("thumbnailUri" in x and x["thumbnailUri"] is not None), records))
bad_thumbnails = ["R-18fb0f87-a8dc-426e-85b6-835a96d74ec3","R-47e0eba3-b408-45a6-a25d-771533803680","R-72815276-5acf-4b2f-a6f4-a4ecfa7e284d","R-84e14452-9d93-449a-8c77-910c62694a03","R-8e023894-dc52-43c4-a575-e09db0e3751c","R-a8c347ef-76fc-4759-b9c8-09a6c4c02c3d","R-aa261a5b-747e-49e6-a8a2-a3dc926dc3e7","R-afa0122b-faab-4bf3-a537-938d0a053e55","R-f6fe4528-f67c-46a5-8fb2-d18fd2f471de"]
records = list(filter(lambda x: x["id"] not in bad_thumbnails, records))
tags = list(map(lambda x: x["tags"], records))
names = list(map(lambda x: x["name"], records))
paths = list(map(lambda x: sum(map(lambda y: list(map(lambda z: z.lower().strip(), re.split(' |-|_',y))),x["path"].split("\\")),[]), records))
image_thumbnails = list(map(lambda x: "thumbnails/"+x["id"]+".webp.jpg", records))
len(records)
# image_embeddings = embed_image(image_thumbnails)
# sentence_embeddings = embed_text(names)
# sentence_embeddings = model.encode(names)
# np.save("sentence_embeddings",sentence_embeddings)
# np.save("sentence_embeddings_clip",sentence_embeddings)
# np.save("image_embeddings_clip",image_embeddings)
## use pre-computed embeddings for next time putting in Neos
if use_bert:
sentence_embeddings = np.load("sentence_embeddings.npy")
normalized_sentence_embeddings = sentence_embeddings / np.linalg.norm(sentence_embeddings,axis=1, keepdims=True)
else:
sentence_embeddings = np.load("sentence_embeddings_clip.npy")
normalized_sentence_embeddings = sentence_embeddings / np.linalg.norm(sentence_embeddings,axis=1, keepdims=True)
image_embeddings = np.load("image_embeddings_clip.npy")
normalized_image_embeddings = image_embeddings / np.linalg.norm(image_embeddings,axis=1, keepdims=True)
# names = [t.encode('ascii', 'ignore') for t in names]
# names = [(n if n != "" else " ") for n in names]
# sentence_weight = 0.5
default_text_weight = 0.4
default_image_weight = 0.6
# default_fuzzy_weight = 0.5
default_fuzzy_weight = 0.2
manager = multiprocessing.Manager()
# def search(query_str,n=3,fuzzy_weight=0.5):
def search(query_str,n=3,fuzzy_weight=default_fuzzy_weight,text_weight=default_text_weight,image_weight=default_image_weight):
print(query_str)
if use_bert:
query_embedding = model.encode(query_str)
else:
# import time
# start_time = time.time()
query_embedding = embed_text(query_str)
# print("--- %s seconds ---" % (time.time() - start_time))
if use_bert:
embeddings = normalized_sentence_embeddings
scores,indices = query(query_embedding,embeddings,n)
results1 = Counter({i:text_weight*scores[i] for i in indices})
# print(results1)
if fuzzy_weight > 0:
# results2 = process.extract(query_str, {i:x for i,x in enumerate(names)}, limit=n)
results2 = process.extract(query_str, names, scorer=fuzz.WRatio, limit=n)
results2 = Counter({x[2]:(fuzzy_weight*x[1]/100) for x in results2})
# print(results2)
for key,value in list(results1.most_common()):
results2[key] = fuzzy_weight*fuzz.WRatio(query_str,names[key])/100
for key,value in list(results2.most_common()):
results1[key] = text_weight*scores[key]
results = results1 + results2
return [key for key,value in results.most_common(n)]
else:
return [key for key,value in results1.most_common(n)]
else:
# embeddings = sentence_weight * sentence_embeddings + (1-sentence_weight) * image_embeddings
# embeddings = sentence_weight * normalized_sentence_embeddings + (1-sentence_weight) * normalized_image_embeddings
# import time
# start_time = time.time()
scores_text,indices_text = query(query_embedding,normalized_sentence_embeddings,n)
# print("--- %s seconds ---" % (time.time() - start_time))
# start_time = time.time()
scores_images,indices_images = query(query_embedding,normalized_image_embeddings,n)
# print("--- %s seconds ---" % (time.time() - start_time))
# return_dict = manager.dict()
# p = Process(target=queryParal, args=("text",query_embedding,normalized_sentence_embeddings,n, return_dict))
# p.start()
# p2 = Process(target=queryParal, args=("images",query_embedding,normalized_image_embeddings,n, return_dict))
# p2.start()
# p.join()
# p2.join()
# scores_text, indices_text = return_dict["text"]
# scores_images, indices_images = return_dict["images"]
results_text = Counter({i:text_weight*scores_text[i] for i in indices_text})
results_images = Counter({i:image_weight*scores_images[i] for i in indices_images})
# print(results1)
if fuzzy_weight > 0:
import time
start_time = time.time()
# results2 = process.extract(query_str, {i:x for i,x in enumerate(names)}, limit=n)
# print(query_str)
# print(type(names))
print(names[0])
results2 = process.extract(query_str, names, scorer=fuzz.WRatio, limit=n)
# results2 = process.extract("hahatest", ["test","tost"], scorer=fuzz.WRatio, limit=1)
print("--- %s seconds ---" % (time.time() - start_time))
results2 = Counter({x[2]:(fuzzy_weight*x[1]/100) for x in results2})
# print(results2)
for key,value in list(results_text.most_common()):
results2[key] = fuzzy_weight*fuzz.WRatio(query_str,names[key])/100
for key,value in list(results_images.most_common()):
results2[key] = fuzzy_weight*fuzz.WRatio(query_str,names[key])/100
for key,value in list(results2.most_common()):
results_text[key] = text_weight*scores_text[key]
results_images[key] = image_weight*scores_images[key]
results = results_text + results_images + results2
return [key for key,value in results.most_common(n)]
else:
return [key for key,value in results1.most_common(n)]
# things[0]
#%%
class S(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
print(self.path)
if self.path == "/favicon.ico": return
self._set_headers()
# query=self.path[1:]
query_params = parse_qs(urlparse(self.path).query)
query = query_params["q"][0] if "q" in query_params else self.path[1:]
fuzzy_weight = float(query_params["f"][0]) if "f" in query_params else default_fuzzy_weight
text_weight = float(query_params["t"][0]) if "t" in query_params else default_text_weight
image_weight = float(query_params["i"][0]) if "i" in query_params else default_image_weight
print(query)
results_ids=[]
results_str=""
# for i,thing in enumerate(things):
# if query.lower() in thing or query.lower() in things2[i]:
indices = search(query.lower(),100,fuzzy_weight=fuzzy_weight,text_weight=text_weight,image_weight=image_weight)
for i in indices:
results_ids.append(i)
r=records[i]
thumbnailUri = r["thumbnailUri"].split(".")[0] if "thumbnailUri" in r else ""
assetUri = r["assetUri"].split(".")[0]+".7zbson" if "assetUri" in r else ""
name = r["name"].split(".")[0].replace(",",".") if "name" in r else ""
ownerName = r["ownerName"].split(".")[0] if "ownerName" in r else ""
path = r["path"].split(".")[0] if "path" in r else ""
results_str += thumbnailUri+"|"+assetUri+"|"+name+"|"+ownerName+"|"+path+"|,"
# results_str += name + " " + r["id"] + "\n"
# i = np.random.choice(results)
sys.stdout.flush()
self.wfile.write(bytes(str(results_str), "utf-8"))
def do_HEAD(self):
self._set_headers()
def do_POST(self):
# Doesn't do anything with posted data
self._set_headers()
self.wfile.write(bytes("<html><body><h1>POST!</h1></body></html>", "utf-8"))
def do_OPTIONS(self):
self.send_response(200, "ok")
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET, OPTIONS')
self.send_header("Access-Control-Allow-Headers", "X-Requested-With")
self.send_header("Access-Control-Allow-Headers", "Content-Type")
self.end_headers()
def do_CONNECT(self):
self.send_response(200, "ok")
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET, OPTIONS')
self.send_header("Access-Control-Allow-Headers", "X-Requested-With")
self.send_header("Access-Control-Allow-Headers", "Content-Type")
self.end_headers()
def finish(self,*args,**kw):
try:
if not self.wfile.closed:
self.wfile.flush()
self.wfile.close()
except socket.error:
pass
self.rfile.close()
def run(server_class=HTTPServer, handler_class=S, port=80):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print('Starting httpd...')
httpd.serve_forever()
run(port=6969)
|
handler.py
|
from abc import ABC
from datetime import datetime
import json
import os
import shutil
import log
import threading
import urllib
import re
import glob
import settings
from scripts import util
from webdriver.webdriver_connection import WebDriverConnection
from database.database_connection import DatabaseConnection
from scripts import parser
from database import insertions
class Cache(ABC):
@staticmethod
def clear(path):
if os.path.exists(path):
try:
shutil.rmtree(path)
log.info("HANDLER", f"{path} removed")
except Exception as e:
log.error("HANDLER", f"{path} not removed ", str(e))
return
else:
log.info("HANDLER", f"{path} is already removed")
@staticmethod
def init(path):
if not os.path.exists(path):
try:
os.makedirs(path)
log.info("HANDLER", f"{path} is initialized")
except Exception as e:
log.error(
"HANDLER", f"{path} is not initialized ==> " + str(e))
return
else:
log.info("HANDLER", f"{path} is already initialized")
@staticmethod
def clearAll(paths: list[str]):
for path in paths:
Cache.clear(path)
@staticmethod
def initAll(paths: list[str]):
for path in paths:
Cache.init(path)
@staticmethod
def resetAll():
caches = [
settings.FETCHED_CACHE,
settings.PARSED_CACHE,
settings.SERVER_CACHE
]
Cache.clearAll(caches)
Cache.initAll(caches)
@staticmethod
def writeTo(content: str, *, file: str, to: str, format: str = None, encoding: str = "utf8"):
hashval = util.get_hash(file)
finalpath = to + hashval
if format:
finalpath += f'.{format}'
try:
with open(finalpath, 'w+', encoding=encoding) as buffer:
if to == settings.FETCHED_CACHE:
buffer.write(util.now() + '\n' + content)
elif to == settings.PARSED_CACHE:
json.dump(content, buffer, indent=4, ensure_ascii=False)
else:
buffer.write(content)
except Exception as e:
log.error("HANDLER", str(e))
return False, str(e)
return True, None
@staticmethod
def isCached(file: str, from_: str):
hashval = util.get_hash(file)
finalpath = from_ + hashval
if not os.path.exists(finalpath):
return False, None
try:
with open(finalpath, 'r') as buffer:
if from_ == settings.FETCHED_CACHE:
date = str(buffer.readline()).strip()
if Cache.isExpired(date):
return False, None
except Exception as e:
log.error("HANDLER", str(e))
return None, str(e)
return True, None
@staticmethod
def readFrom(file: str, from_: str):
hashval = util.get_hash(file)
finalpath = from_ + hashval
try:
with open(finalpath, 'r') as buffer:
content = buffer.read()
except Exception as e:
log.error("HANDLER", str(e))
return False, f"{file} doesn't exist"
return True, content
@staticmethod
def isExpired(date: str):
return abs(util.now(no_str=True) - datetime.strptime(date, "%Y-%m-%d %H:%M:%S")).days != 0
class Scheduler(ABC):
@staticmethod
def run(func, sec: int = 86400):
def target():
while not stopped.wait(sec):
func()
stopped = threading.Event()
threading.Thread(target=target).start()
class Handler(ABC):
@staticmethod
def fetch(urls: list[str]):
for url in urls:
isCached, err = Cache.isCached(url, settings.FETCHED_CACHE)
if err:
return (500, err)
if not isCached:
try:
log.info("HANDLER", f"{url} is fetching")
WebDriverConnection.driver.get(url)
content = WebDriverConnection.driver.page_source
wrto, err = Cache.writeTo(content, file=url,
to=settings.FETCHED_CACHE)
if not wrto:
return (400, err)
except Exception as e:
log.error("HANDLER", str(e))
return (500, str(e))
else:
log.info("HANDLER", f"{url} is already in cache")
return (200, {'already-cached': True})
return (200, True)
@staticmethod
def parse(urls: list[str]):
workers = []
for url in urls:
status, content = Cache.readFrom(url, settings.FETCHED_CACHE)
if not status:
return 500, content
try:
tid = threading.Thread(
target=parser.run, args=(url, content, Cache.writeTo, url, settings.PARSED_CACHE))
workers.append(tid)
tid.start()
except Exception as e:
log.error("HANDLER", str(e))
return 500, str(e)
for tid in workers:
tid.join()
return 200, True
@staticmethod
def insert(urls: list[str]):
hashed_urls = [util.get_hash(url) for url in urls]
products = glob.glob(settings.PARSED_CACHE + "*.json")
for product in products:
content_file = product.split(settings.PARSED_CACHE)[
1].split('.json')[0]
if content_file not in hashed_urls:
continue
try:
with open(product, "r") as other:
insertions.insert(json.load(other))
except Exception as e:
log.error(content_file, str(e))
return 500, str(e)
log.info(content_file, "Added to database",
fore=log.Fore.LIGHTGREEN_EX)
return 200, True
@staticmethod
def pattern_match(url: str):
pattern = r"^(http[s]?:\/\/)?(www\.)?(hepsiburada.com\/)(\S*)$"
return re.match(pattern, url)
@staticmethod
def refresh():
try:
DatabaseConnection.cursor.execute("""
select url from product
""")
urls = DatabaseConnection.cursor.fetchall()
urls = [url[0] for url in urls]
if urls:
log.info("HANDLER", "Database is refreshing")
Cache.resetAll()
return Handler.ordinal(urls)
else:
log.info("HANDLER", "Database is empty")
return 200, "database is empty"
except Exception as e:
log.error("HANDLER", str(e))
return 500, str(e)
@staticmethod
def track(params):
urls = list(
filter(lambda url: Handler.pattern_match(url), params['urls']))
return Handler.ordinal(urls)
@staticmethod
def ordinal(urls: list[str]):
errorWhen = [400, 500]
code, msg = Handler.fetch(urls)
if code in errorWhen:
return code, msg
code, msg = Handler.parse(urls)
if code in errorWhen:
return code, msg
code, msg = Handler.insert(urls)
if code in errorWhen:
return code, msg
return 200, True
|
Broadlink-RM3-MQTTBridge.py
|
#!/usr/bin/python
#
# I've taken Perrin7's NinjaCape MQTT Bridge code and modified it for the
# Broadlink RM3 and the Blackbean python code.
#
# This retains Perrin7's MIT License
# 1) Read the ini file using configparser
# a) get the Blackbean information
# i) host
# ii) port
# iii) mac
# iv) timeout
# b) get the MQTT information
# i) topic
# ii) host
# iii) port
# iv) user
# v) password
# used to interface the NinjaCape to openHAB via MQTT
# - reads data from serial port and publishes on MQTT client
# - writes data to serial port from MQTT subscriptions
#
# - uses the Python MQTT client from the Mosquitto project http://mosquitto.org (now in Paho)
#
# https://github.com/perrin7/ninjacape-mqtt-bridge
# perrin7
#
import paho.mqtt.client as mqtt
import os
import json
import threading
import time
import broadlink, configparser
import sys, getopt
import time, binascii
import netaddr
# This is a local file (has the BlackBeanControlSetting, config file name)
# Not sure how to pass the config file name to it but we can pass that to
# the configparser.configparser.read('filename.ini')
#import Settings
from os import path
from Crypto.Cipher import AES
myType = 0x2712 # RM2, close enough for the RM3
# from Settings.py
#ApplicationDir = path.dirname(path.abspath(__file__))
#BlackBeanControlSettings = path.join(ApplicationDir, 'BlackBeanControl.ini')
# Hard coded for now, I'll change that later
myConfigFile = os.environ['HOME'] + '/.mqtt-bbeancr.ini'
if os.path.isfile(myConfigFile) != True :
print >>sys.stderr, "Config file: %s" % (myConfigFile)
exit(2)
#
Settings = configparser.ConfigParser()
Settings.read(myConfigFile)
IPAddress = Settings.get('General', 'IPAddress')
#
Port = Settings.get('General', 'Port')
#
MACAddress = Settings.get('General', 'MACAddress')
#
Timeout = Settings.get('General', 'Timeout')
#
MQTT_Topic = Settings.get('MQTT', 'Topic')
MQTT_Host = Settings.get('MQTT', 'Host')
#
MQTT_Port = int(Settings.get('MQTT', 'Port'))
#
MQTT_Timeout = int(Settings.get('MQTT', 'Timeout'))
#SettingsFile = configparser.ConfigParser()
#SettingsFile.optionxform = str
#SettingsFile.read(Settings.BlackBeanControlSettings)
print >> sys.stderr, "IPAddress = %s" % (IPAddress)
print >> sys.stderr, "Port = %s" % (Port)
print >> sys.stderr, "MACAddress = %s" % (MACAddress)
print >> sys.stderr, "Timeout = %s" % (Timeout)
print >> sys.stderr, "Type = %s" % (myType)
print >> sys.stderr, "MQTT_Topic = %s" % (MQTT_Topic)
print >> sys.stderr, "MQTT_Host = %s" % (MQTT_Host)
print >> sys.stderr, "MQTT_Port = %s" % (MQTT_Port)
print >> sys.stderr, "MQTT_Timeout = %s" % (MQTT_Timeout)
MACAddress = bytearray.fromhex(MACAddress)
SentCommand = ''
ReKeyCommand = False
DeviceName =''
DeviceIPAddress = ''
DevicePort = ''
DeviceMACAddres = ''
DeviceTimeout = ''
AlternativeIPAddress = ''
AlternativePort = ''
AlternativeMACAddress = ''
AlternativeTimeout = ''
try:
Options, args = getopt.getopt(sys.argv[1:], 'c:d:r:i:p:m:t:h', ['command=','device=','rekey=','ipaddress=','port=','macaddress=','timeout=','help'])
except getopt.GetoptError:
print('BlackBeanControl.py -c <Command name> [-d <Device name>] [-i <IP Address>] [-p <Port>] [-m <MAC Address>] [-t <Timeout>] [-r <Re-Key Command>]')
sys.exit(2)
#
for Option, Argument in Options:
if Option in ('-h', '--help'):
print('BlackBeanControl.py -c <Command name> [-d <Device name>] [-i <IP Address>] [-p <Port>] [-m <MAC Address>] [-t <Timeout> [-r <Re-Key Command>]')
sys.exit()
elif Option in ('-c', '--command'):
SentCommand = Argument
elif Option in ('-d', '--device'):
DeviceName = Argument
elif Option in ('-r', '--rekey'):
ReKeyCommand = True
SentCommand = Argument
elif Option in ('-i', '--ipaddress'):
AlternativeIPAddress = Argument
elif Option in ('-p', '--port'):
AlternativePort = Argument
elif Option in ('-m', '--macaddress'):
AlternativeMACAddress = Argument
elif Option in ('-t', '--timeout'):
AlternativeTimeout = Argument
#
#
### Settings
broker = "127.0.0.1" # mqtt broker
port = 1883 # mqtt broker port
debug = False ## set this to True for lots of prints
# buffer of data to output to the serial port
outputData = []
#
def lprint(s):
print time.strftime('%X ') + s
#
#### MQTT callbacks
def on_connect(client, userdata, flags, rc):
if rc == 0:
#rc 0 successful connect
print "Connected"
else:
raise Exception
# subscribe to the output MQTT messages
output_mid = client.subscribe("ninjaCape/output/#")
#
def on_publish(client, userdata, mid):
if(debug):
print "Published. mid:", mid
#
def on_subscribe(client, userdata, mid, granted_qos):
if(debug):
print "Subscribed. mid:", mid
#
def on_message_output(client, userdata, msg):
if(debug):
print "Output Data: ", msg.topic, "data:", msg.payload
#add to outputData list
outputData.append(msg)
#
def on_message(client, userdata, message):
if(debug):
print "Unhandled Message Received: ", message.topic, message.paylod
#
#called on exit
#close serial, disconnect MQTT
def cleanup():
print "Ending and cleaning up"
#ser.close()
mqttc.disconnect()
#
def mqtt_to_JSON_output(mqtt_message):
topics = mqtt_message.topic.split('/');
## JSON message in ninjaCape form
json_data = '{"DEVICE": [{"G":"0","V":0,"D":' + topics[2] + ',"DA":"' + mqtt_message.payload + '"}]})'
return json_data
#
#thread for reading serial data and publishing to MQTT client
def read_and_publish(dev, mqttc, topic):
lprint("Learning...")
# I've found I need to put his thing back into learning mode to get the new
# data, otherwise I just keep reading the last one.
while True:
timeout = 30
dev.enter_learning()
data = None
while (data is None) and (timeout > 0):
time.sleep(.25)
timeout -= 2
data = dev.check_data()
if data:
learned = ''.join(format(x, '02x') for x in bytearray(data))
lprint(learned)
try:
# Format data as needed
#mqttc.publish("home/network/device/bbeancr/learned", learned)
mqttc.publish(topic, learned)
except(KeyError):
# TODO should probably do something here if the data is malformed
lprint("Exception ...")
pass
#
else:
#lprint("No data received...")
pass
#
#
#
#
#thread for reading serial data and publishing to MQTT client
def serial_read_and_publish(ser, mqttc):
ser.flushInput()
while True:
line = ser.readline() # this is blocking
if(debug):
print "line to decode:",line
#
# split the JSON packet up here and publish on MQTT
json_data = json.loads(line)
if(debug):
print "json decoded:",json_data
#
try:
device = str( json_data['DEVICE'][0]['D'] )
data = str( json_data['DEVICE'][0]['DA'] )
mqttc.publish("ninjaCape/input/"+device, data)
except(KeyError):
# TODO should probably do something here if the data is malformed
pass
#
#
#
# ------------------------------------------------------------------------------
############ MAIN PROGRAM START
if(True):
print "Connecting ... ", IPAddress
dev = broadlink.gendevice(myType, (IPAddress, 80), MACAddress)
dev.auth()
else:
try:
print "Connecting... ", IPAddress
#connect to serial port
#ser = serial.Serial(serialdev, 9600, timeout=None) #timeout 0 for non-blocking. Set to None for blocking.
dev = broadlink.gendevice(myType, (IPAddress, 80), MACAddress)
dev.auth()
except:
print "Failed to connect Blackbean"
#unable to continue with no serial input
raise SystemExit
#
#
try:
#create an mqtt client
mqttc = mqtt.Client("broadlink")
#connect to broker
mqttc.connect(MQTT_Host, MQTT_Port, MQTT_Timeout)
#mqttc.connect("mozart.uucp", 1883, 60)
#attach MQTT callbacks
mqttc.on_connect = on_connect
mqttc.on_publish = on_publish
mqttc.on_subscribe = on_subscribe
mqttc.on_message = on_message
mqttc.message_callback_add("home/network/device/bbeancr/send", on_message_output)
# start the mqttc client thread
mqttc.loop_start()
dev_thread = threading.Thread(target=read_and_publish, args=(dev, mqttc, MQTT_Topic))
dev_thread.daemon = True
dev_thread.start()
while True: # main thread
# writing to serial port if there is data available
#if( len(outputData) > 0 ):
# #print "***data to OUTPUT:",mqtt_to_JSON_output(outputData[0])
# ser.write(mqtt_to_JSON_output(outputData.pop()))
#
time.sleep(3600)
#
time.sleep(0.5)
#
# handle app closure
except (KeyboardInterrupt):
print "Interrupt received"
cleanup()
except (RuntimeError):
print "uh-oh! time to die"
cleanup()
# =[ Fini ]=====================================================================
"""
https://github.com/mjg59/python-broadlink/issues/57
0 1 2 3
0000 0001 0010 0011
4 5 6 7
0100 0101 0110 0111
8 9 A B
1000 1001 1010 1011
C D E F
1100 1101 1110 1111
Key 2 -
1 2 3 5 50 52 59
+ + + + | + + +
260030001d1a3936391a1d1a1d361d1b381b1d1a1d1a1d3639000acd1d1a3936391a1d1a1d361d1b381b1d1a1d1a1d3639000d050000000000000000
26 00 3000 1d1a 3936 391a 1d1a 1d36 1d1b 381b 1d1a 1d1a 1d36 3900 0acd 1d1a 3936 391a 1d1a 1d36 1d1b 381b 1d1a 1d1a 1d36 3900 0d05 0000000000000000
1d 00011101
1a 00011100
39 00111001
36 00111010
0a 00001010
cd 11001101
1 2 3 5 28
| | | | + | | |
26001a001d1b1d1a1d1a391a1d1b1d361d1a391a1d1b1c1b1d3638000d050000000000000000000000000000
26 00 1a00 1d1b1d1a1d1a391a1d1b1d361d1a391a1d1b1c1b1d 36 3800 0d05 0000000000000000000000000000
Bytes Meaning
26 IR
00 No repeat
1a00 26 bytes
1d ... data
3800 ???
0d05 End of signal
00 ... Nul padding (???)
260030001f1b3836391a1d1b1d361d1a391a1d1b1c1b1d3639000acc1d1b3836391b1c1b1d361d1a391a1d1b1d1a1d363900 0d05 0000000000000000
260030001d1a3936391a1d1a1d361d1b381b1d1a1d1b1c3639000acd1d1a3936391a1d1a1d361d1b381b1d1a1d1b1c363900 0d05 0000000000000000
26001a001d1b1d1a1d1b381b1c1b1d33201a391a1d1b1d1a1d363900 0d05 0000000000000000000000000000
260034001d1b1d1a1d1a391a1d1b1d361d1a391a1d1b1c1b1d3639000acc1d1b1d1a1d1a391b1c1b1d361d1a391a1d1b1c1b1d363900 0d05 00000000
260030001e1b3836391a1d1b1d361d1a391a1d1a1d1b1d3638000acd1d1b3836391a1d1b1d361d1a391a1d1a1d1b1d363800 0d05 0000000000000000
260030001d1a3936391a1d1a1d361d1b381b1d1a1d1a1d3639000acd1d1a3936391a1d1a1d361d1b381b1d1a1d1a1d363900 0d05 0000000000000000
26001a001d1b1d1a1d1a391b1c1b1d361d1a391a1d1b1c1b1d363900 0d05 0000000000000000000000000000
260034001d1a1d1a1d1b381b1d1a1d361d1b381b1c1b1d1a1d343b000acd1d1a1d1b1c1b381b1d1a1d361d1b381b1d1a1d1a1d363900 0d05 00000000
260038001d1b1d1a1d1b381b1d1a1d361d1a391a1d1b1d1a1d36390006770700044f1d1a1d1a1d1b381b1d1a1d361d1a391a1d1b1d1a1d363900 0d05
260030001d1a3936381b1d1a1d361d1b381b1c1b1d1a1d3639000acd1d1a3936381b1d1a1d361d1b381b1d1a1d1a1d363900 0d05 0000000000000000
260030001d1a3936381b1d1a1d361d1b381b1c1b1d1a1d3639000acd1d1a3936381b1d1a1d361d1b381b1d1a1d1a1d363900 0d05 0000000000000000
1 2 3 5 80
| | | | |
26004e001d1b1c1b1d1a391a1d1b1d361d1a391a1d1a1d1b1d3638000acd1d1b1c1b1d1a391a1d1b1d361d1a391a1d1a1d1b1d3638000acd1d1b1d1a1d1a391a1d1b1d361d1a391a1d1a1d1b1d3638000d05 00000000000000000000
26 00 4e00 1d1b1c1b1d1a391a1d1b1d361d1a391a1d1a1d1b1d 36 3800 0acd1d1b1c1b1d1a391a1d1b1d361d1a391a1d1a1d1b1d 36 3800 0acd1d1b1d1a1d1a391a1d1b1d361d1a391a1d1a1d1b1d 36 3800 0d05 00000000000000000000
0x4e = 78
0d05 @ byte 80 - 2
260048001e1b3837381b1d1a1d361d1a391a1d1b1d1a1d3639000acc1d1b3936381b1d1a1d361d1a391a1d1b1d1a1d3639000acd1d1a3936381b1d1a1d361d1a391a1d1b1d1a1d363900 0d05
26001a001d1a1d1a1d1b381b1d1a1d361d1a391a1d1b1d1a1d363900 0d05 0000000000000000000000000000
26001a001d1b1d1a1d1a391b1c1b1d361d1a391a1d1b1c1b1d363900 0d05 0000000000000000000000000000
26001a001e1a1d1a1d1b381b1d1a1d361d1a391a1d1b1d1a1d363900 0d05 0000000000000000000000000000
26001a001d1b1d1a1d1b381b1d1a1d361d1a391a1d1b1d1a1d363900 0d05 0000000000000000000000000000
26001a001d1b1d1a1d1a391a1d1b1d361d1a391a1d1a1d1b1d363800 0d05 0000000000000000000000000000
26001a001c1b1d1a1d1b381b1d1a1d361d1a391a1d1b1d1a1d363900 0d05 0000000000000000000000000000
260018001d1b3836391b1c1b1d361d1a391a1d1b1d1a1d363900 0d05
260018001d1b3837381b1d1a1d361d1a391a1d1b1d1a1d363900 0d05
Key 3 (with some long hold)
26 00 38001d1a1d1b1d1a 391a1d1a1d 361d1b 381b1d1a1d1b1c 371d1a1d000ab11d1a1d1b1d1a 391a1d1b1c 371c1b 381b1d1a1d1b1d 361d1a1d00 0d05
26 00 34001d1a 39 36 391a1d1a1d 361d1b 381b1d1a1d1b1c 33201b1d000ab11d1a 39 36 391a1d1a1d 361d1b 381b1d1a1d1b1c 371c1b1d00 0d05 00000000
26003c000e0001431d1b1c1b1d1a391a1d1b1d361d1a391a1d1a1d1b1d361d1a1d000ab11d1b1c1b1d1a391a1d1b1d361d1a391a1d1a1d1b1d361d1a1d00 0d05 000000000000000000000000
260034001d1a3936391a1d1b1c361d1b381b1d1a1d1b1c371c1b1d000ab11d1a3936391a1d1b1c361d1b381b1d1a1d1b1d361d1a1d00 0d05 00000000
260038001d1b1d1a1d1a391a1d1b1d361d1a391a1d1b1c1b1d361d1a1d000ab11d1b1d1a1d1a391b1c1b1d361d1a391a1d1b1c1b1d361d1a1d00 0d05
26001a001d1b3836391a1d1b1d361d1a391a1d1a1d1b1d361d1a1d00 0d05 0000000000000000000000000000
260038001d1a1d1b1d1a391a1d1b1c361d1b381b1d1a1d1b1d361d1a1d000ab11d1a1d1b1d1a391a1d1b1c371c1b391a1d1a1d1b1d361d1a1d00 0d05
260038001d1a1d1b1d1a391a1d1a1d361d1b381b1d1a1d1b1c361d1b1d000ab11d1a1d1b1d1a391a1d1a1d361d1b381b1d1a1d1b1c361d1b1d00 0d05
26003c011d1a3936391a1d1a1d361d1b381b1d1a1d1a1d361d1b1d000ab11d1a3936391a1d1a1d361d1b381b1d1a1d1b1c361d1b1d000ab11d1a3936391a1d1b1c361d1b381b1d1a1d1b1c371d1a1d000ab11d1a3936391a1d1b1c361d1b381b1d1a1d1b1d361d1a1d000ab11d1b3836391a1d1b1d361d1a391a1d1a1d1b1d361d1a1d000ab11d1b3836391a1d1b1d361d1a391a1d1a1d1b1d361d1a1d000ab11d1b3836391a1d1b1d361d1a391a1d1a1d1b1d361d1a1d000ab11d1b3836391a1d1b1d361d1a391a1d1b1c1b1d361d1a1d000ab11d1b3836391b1c1b1d361d1a391a1d1b1d1a1d361d1a1d0006af100003f21d1b3836391b1c1b1d361d1a391a1d1b1d1a1d361d1a1d000ab11d1b3837381b1d1a1d361d1a391a1d1b1d1a1d361d1a1d000ab21d1a3936381b1d1a1d361d1a391a1d1b1d1a1d361d1b1c00 0d05 000000000000000000000000
2600a8001d1a1d1a1d1b381b1d1a1d361d1a391a1d1b1d1a1d361d1b1c000ab21d1a1d1a1d1b381b1d1a1d361d1a391b1c1b1d1a1d361d1b1c000ab21d1a1d1a1d1b381b1d1a1d361d1b381b1c1b1d1a1d361d1b1d000ab11d1a1d1a1d1b381b1d1a1d361d1b381b1d1a1d1a1d361d1b1d000ab11d1a1d1b1c1b381b1d1a1d361d1b381b1d1a1d1a1d361d1b1d000ab11d1a1d1b1c1b391a1d1a1d361d1b381b1d1a1d1a1d361d1b1d00 0d05
"""
|
_gnupg.py
|
# Copyright (c) 2008-2014 by Vinay Sajip.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name(s) of the copyright holder(s) may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" A wrapper for the 'gpg' command::
Portions of this module are derived from A.M. Kuchling's well-designed
GPG.py, using Richard Jones' updated version 1.3, which can be found
in the pycrypto CVS repository on Sourceforge:
http://pycrypto.cvs.sourceforge.net/viewvc/pycrypto/gpg/GPG.py
This module is *not* forward-compatible with amk's; some of the
old interface has changed. For instance, since I've added decrypt
functionality, I elected to initialize with a 'gnupghome' argument
instead of 'keyring', so that gpg can find both the public and secret
keyrings. I've also altered some of the returned objects in order for
the caller to not have to know as much about the internals of the
result classes.
While the rest of ISconf is released under the GPL, I am releasing
this single file under the same terms that A.M. Kuchling used for
pycrypto.
Steve Traugott, stevegt@terraluna.org
Thu Jun 23 21:27:20 PDT 2005
This version of the module has been modified from Steve Traugott's version
(see http://trac.t7a.org/isconf/browser/trunk/lib/python/isconf/GPG.py) by
Vinay Sajip to make use of the subprocess module (Steve's version uses os.fork()
and so does not work on Windows). Renamed to gnupg.py to avoid confusion with
the previous versions.
Modifications Copyright (C) 2008-2017 Vinay Sajip. All rights reserved.
A unittest harness (test_gnupg.py) has also been added.
"""
__version__ = "0.4.1"
__author__ = "Vinay Sajip"
__date__ = "$07-Jul-2017 15:09:20$"
try:
from io import StringIO
except ImportError: # pragma: no cover
from cStringIO import StringIO
import codecs
import locale
import logging
import os
import re
import socket
from subprocess import Popen
from subprocess import PIPE
import sys
import threading
STARTUPINFO = None
if os.name == 'nt': # pragma: no cover
try:
from subprocess import STARTUPINFO, STARTF_USESHOWWINDOW, SW_HIDE
except ImportError:
STARTUPINFO = None
try:
import logging.NullHandler as NullHandler
except ImportError:
class NullHandler(logging.Handler):
def handle(self, record):
pass
try:
unicode
_py3k = False
string_types = basestring
text_type = unicode
except NameError:
_py3k = True
string_types = str
text_type = str
logger = logging.getLogger(__name__)
if not logger.handlers:
logger.addHandler(NullHandler())
# We use the test below because it works for Jython as well as CPython
if os.path.__name__ == 'ntpath': # pragma: no cover
# On Windows, we don't need shell quoting, other than worrying about
# paths with spaces in them.
def shell_quote(s):
return '"%s"' % s
else:
# Section copied from sarge
# This regex determines which shell input needs quoting
# because it may be unsafe
UNSAFE = re.compile(r'[^\w%+,./:=@-]')
def shell_quote(s):
"""
Quote text so that it is safe for Posix command shells.
For example, "*.py" would be converted to "'*.py'". If the text is
considered safe it is returned unquoted.
:param s: The value to quote
:type s: str (or unicode on 2.x)
:return: A safe version of the input, from the point of view of Posix
command shells
:rtype: The passed-in type
"""
if not isinstance(s, string_types): # pragma: no cover
raise TypeError('Expected string type, got %s' % type(s))
if not s:
result = "''"
elif not UNSAFE.search(s):
result = s
else:
result = "'%s'" % s.replace("'", r"'\''")
return result
# end of sarge code
# Now that we use shell=False, we shouldn't need to quote arguments.
# Use no_quote instead of shell_quote to remind us of where quoting
# was needed. However, note that we still need, on 2.x, to encode any
# Unicode argument with the file system encoding - see Issue #41 and
# Python issue #1759845 ("subprocess.call fails with unicode strings in
# command line").
# Allows the encoding used to be overridden in special cases by setting
# this module attribute appropriately.
fsencoding = sys.getfilesystemencoding()
def no_quote(s):
if not _py3k and isinstance(s, text_type):
s = s.encode(fsencoding)
return s
def _copy_data(instream, outstream):
# Copy one stream to another
sent = 0
if hasattr(sys.stdin, 'encoding'):
enc = sys.stdin.encoding
else: # pragma: no cover
enc = 'ascii'
while True:
# See issue #39: read can fail when e.g. a text stream is provided
# for what is actually a binary file
try:
data = instream.read(1024)
except UnicodeError:
logger.warning('Exception occurred while reading', exc_info=1)
break
if not data:
break
sent += len(data)
# logger.debug("sending chunk (%d): %r", sent, data[:256])
try:
outstream.write(data)
except UnicodeError: # pragma: no cover
outstream.write(data.encode(enc))
except:
# Can sometimes get 'broken pipe' errors even when the data has all
# been sent
logger.exception('Error sending data')
break
try:
outstream.close()
except IOError: # pragma: no cover
logger.warning('Exception occurred while closing: ignored', exc_info=1)
logger.debug("closed output, %d bytes sent", sent)
def _threaded_copy_data(instream, outstream):
wr = threading.Thread(target=_copy_data, args=(instream, outstream))
wr.setDaemon(True)
logger.debug('data copier: %r, %r, %r', wr, instream, outstream)
wr.start()
return wr
def _write_passphrase(stream, passphrase, encoding):
passphrase = '%s\n' % passphrase
passphrase = passphrase.encode(encoding)
stream.write(passphrase)
logger.debug('Wrote passphrase')
def _is_sequence(instance):
return isinstance(instance, (list, tuple, set, frozenset))
def _make_memory_stream(s):
try:
from io import BytesIO
rv = BytesIO(s)
except ImportError: # pragma: no cover
rv = StringIO(s)
return rv
def _make_binary_stream(s, encoding):
if _py3k:
if isinstance(s, str):
s = s.encode(encoding)
else:
if type(s) is not str:
s = s.encode(encoding)
return _make_memory_stream(s)
class Verify(object):
"Handle status messages for --verify"
TRUST_UNDEFINED = 0
TRUST_NEVER = 1
TRUST_MARGINAL = 2
TRUST_FULLY = 3
TRUST_ULTIMATE = 4
TRUST_LEVELS = {
"TRUST_UNDEFINED" : TRUST_UNDEFINED,
"TRUST_NEVER" : TRUST_NEVER,
"TRUST_MARGINAL" : TRUST_MARGINAL,
"TRUST_FULLY" : TRUST_FULLY,
"TRUST_ULTIMATE" : TRUST_ULTIMATE,
}
def __init__(self, gpg):
self.gpg = gpg
self.valid = False
self.fingerprint = self.creation_date = self.timestamp = None
self.signature_id = self.key_id = None
self.username = None
self.key_status = None
self.status = None
self.pubkey_fingerprint = None
self.expire_timestamp = None
self.sig_timestamp = None
self.trust_text = None
self.trust_level = None
def __nonzero__(self):
return self.valid
__bool__ = __nonzero__
def handle_status(self, key, value):
if key in self.TRUST_LEVELS:
self.trust_text = key
self.trust_level = self.TRUST_LEVELS[key]
elif key in ("WARNING", "ERROR"):
logger.warning('potential problem: %s: %s', key, value)
elif key == "BADSIG": # pragma: no cover
self.valid = False
self.status = 'signature bad'
self.key_id, self.username = value.split(None, 1)
elif key == "ERRSIG": # pragma: no cover
self.valid = False
(self.key_id,
algo, hash_algo,
cls,
self.timestamp) = value.split()[:5]
self.status = 'signature error'
elif key == "EXPSIG": # pragma: no cover
self.valid = False
self.status = 'signature expired'
self.key_id, self.username = value.split(None, 1)
elif key == "GOODSIG":
self.valid = True
self.status = 'signature good'
self.key_id, self.username = value.split(None, 1)
elif key == "VALIDSIG":
(self.fingerprint,
self.creation_date,
self.sig_timestamp,
self.expire_timestamp) = value.split()[:4]
# may be different if signature is made with a subkey
self.pubkey_fingerprint = value.split()[-1]
self.status = 'signature valid'
elif key == "SIG_ID":
(self.signature_id,
self.creation_date, self.timestamp) = value.split()
elif key == "DECRYPTION_FAILED": # pragma: no cover
self.valid = False
self.key_id = value
self.status = 'decryption failed'
elif key == "NO_PUBKEY": # pragma: no cover
self.valid = False
self.key_id = value
self.status = 'no public key'
elif key in ("EXPKEYSIG", "REVKEYSIG"): # pragma: no cover
# signed with expired or revoked key
self.valid = False
self.key_id = value.split()[0]
if key == "EXPKEYSIG":
self.key_status = 'signing key has expired'
else:
self.key_status = 'signing key was revoked'
self.status = self.key_status
elif key in ("UNEXPECTED", "FAILURE"): # pragma: no cover
self.valid = False
self.key_id = value
if key == "UNEXPECTED":
self.status = 'unexpected data'
else:
# N.B. there might be other reasons
if not self.status:
self.status = 'incorrect passphrase'
elif key in ("DECRYPTION_INFO", "PLAINTEXT", "PLAINTEXT_LENGTH",
"NO_SECKEY", "BEGIN_SIGNING"):
pass
else: # pragma: no cover
logger.debug('message ignored: %s, %s', key, value)
class ImportResult(object):
"Handle status messages for --import"
counts = '''count no_user_id imported imported_rsa unchanged
n_uids n_subk n_sigs n_revoc sec_read sec_imported
sec_dups not_imported'''.split()
def __init__(self, gpg):
self.gpg = gpg
self.imported = []
self.results = []
self.fingerprints = []
for result in self.counts:
setattr(self, result, None)
def __nonzero__(self):
if self.not_imported: return False
if not self.fingerprints: return False
return True
__bool__ = __nonzero__
ok_reason = {
'0': 'Not actually changed',
'1': 'Entirely new key',
'2': 'New user IDs',
'4': 'New signatures',
'8': 'New subkeys',
'16': 'Contains private key',
}
problem_reason = {
'0': 'No specific reason given',
'1': 'Invalid Certificate',
'2': 'Issuer Certificate missing',
'3': 'Certificate Chain too long',
'4': 'Error storing certificate',
}
def handle_status(self, key, value):
if key in ("WARNING", "ERROR"):
logger.warning('potential problem: %s: %s', key, value)
elif key in ("IMPORTED", "KEY_CONSIDERED"):
# this duplicates info we already see in import_ok & import_problem
pass
elif key == "NODATA": # pragma: no cover
self.results.append({'fingerprint': None,
'problem': '0', 'text': 'No valid data found'})
elif key == "IMPORT_OK":
reason, fingerprint = value.split()
reasons = []
for code, text in list(self.ok_reason.items()):
if int(reason) | int(code) == int(reason):
reasons.append(text)
reasontext = '\n'.join(reasons) + "\n"
self.results.append({'fingerprint': fingerprint,
'ok': reason, 'text': reasontext})
self.fingerprints.append(fingerprint)
elif key == "IMPORT_PROBLEM": # pragma: no cover
try:
reason, fingerprint = value.split()
except:
reason = value
fingerprint = '<unknown>'
self.results.append({'fingerprint': fingerprint,
'problem': reason, 'text': self.problem_reason[reason]})
elif key == "IMPORT_RES":
import_res = value.split()
for i, count in enumerate(self.counts):
setattr(self, count, int(import_res[i]))
elif key == "KEYEXPIRED": # pragma: no cover
self.results.append({'fingerprint': None,
'problem': '0', 'text': 'Key expired'})
elif key == "SIGEXPIRED": # pragma: no cover
self.results.append({'fingerprint': None,
'problem': '0', 'text': 'Signature expired'})
elif key == "FAILURE": # pragma: no cover
self.results.append({'fingerprint': None,
'problem': '0', 'text': 'Other failure'})
else: # pragma: no cover
logger.debug('message ignored: %s, %s', key, value)
def summary(self):
l = []
l.append('%d imported' % self.imported)
if self.not_imported: # pragma: no cover
l.append('%d not imported' % self.not_imported)
return ', '.join(l)
ESCAPE_PATTERN = re.compile(r'\\x([0-9a-f][0-9a-f])', re.I)
BASIC_ESCAPES = {
r'\n': '\n',
r'\r': '\r',
r'\f': '\f',
r'\v': '\v',
r'\b': '\b',
r'\0': '\0',
}
class SendResult(object):
def __init__(self, gpg):
self.gpg = gpg
def handle_status(self, key, value):
logger.debug('SendResult: %s: %s', key, value)
class SearchKeys(list):
''' Handle status messages for --search-keys.
Handle pub and uid (relating the latter to the former).
Don't care about the rest
'''
UID_INDEX = 1
FIELDS = 'type keyid algo length date expires'.split()
def __init__(self, gpg):
self.gpg = gpg
self.curkey = None
self.fingerprints = []
self.uids = []
def get_fields(self, args):
result = {}
for i, var in enumerate(self.FIELDS):
if i < len(args):
result[var] = args[i]
else:
result[var] = 'unavailable'
result['uids'] = []
result['sigs'] = []
return result
def pub(self, args):
self.curkey = curkey = self.get_fields(args)
self.append(curkey)
def uid(self, args):
uid = args[self.UID_INDEX]
uid = ESCAPE_PATTERN.sub(lambda m: chr(int(m.group(1), 16)), uid)
for k, v in BASIC_ESCAPES.items():
uid = uid.replace(k, v)
self.curkey['uids'].append(uid)
self.uids.append(uid)
def handle_status(self, key, value): # pragma: no cover
pass
class ListKeys(SearchKeys):
''' Handle status messages for --list-keys, --list-sigs.
Handle pub and uid (relating the latter to the former).
Don't care about (info from src/DETAILS):
crt = X.509 certificate
crs = X.509 certificate and private key available
uat = user attribute (same as user id except for field 10).
sig = signature
rev = revocation signature
pkd = public key data (special field format, see below)
grp = reserved for gpgsm
rvk = revocation key
'''
UID_INDEX = 9
FIELDS = 'type trust length algo keyid date expires dummy ownertrust uid sig'.split()
def __init__(self, gpg):
super(ListKeys, self).__init__(gpg)
self.in_subkey = False
self.key_map = {}
def key(self, args):
self.curkey = curkey = self.get_fields(args)
if curkey['uid']:
curkey['uids'].append(curkey['uid'])
del curkey['uid']
curkey['subkeys'] = []
self.append(curkey)
self.in_subkey = False
pub = sec = key
def fpr(self, args):
fp = args[9]
if fp in self.key_map: # pragma: no cover
raise ValueError('Unexpected fingerprint collision: %s' % fp)
if not self.in_subkey:
self.curkey['fingerprint'] = fp
self.fingerprints.append(fp)
self.key_map[fp] = self.curkey
else:
self.curkey['subkeys'][-1].append(fp)
self.key_map[fp] = self.curkey
def sub(self, args):
subkey = [args[4], args[11]] # keyid, type
self.curkey['subkeys'].append(subkey)
self.in_subkey = True
def ssb(self, args):
subkey = [args[4], None] # keyid, type
self.curkey['subkeys'].append(subkey)
self.in_subkey = True
def sig(self, args):
# keyid, uid, sigclass
self.curkey['sigs'].append((args[4], args[9], args[10]))
class ScanKeys(ListKeys):
''' Handle status messages for --with-fingerprint.'''
def sub(self, args):
# --with-fingerprint --with-colons somehow outputs fewer colons,
# use the last value args[-1] instead of args[11]
subkey = [args[4], args[-1]]
self.curkey['subkeys'].append(subkey)
self.in_subkey = True
class TextHandler(object):
def _as_text(self):
return self.data.decode(self.gpg.encoding, self.gpg.decode_errors)
if _py3k:
__str__ = _as_text
else:
__unicode__ = _as_text
def __str__(self):
return self.data
class Crypt(Verify, TextHandler):
"Handle status messages for --encrypt and --decrypt"
def __init__(self, gpg):
Verify.__init__(self, gpg)
self.data = ''
self.ok = False
self.status = ''
def __nonzero__(self):
if self.ok: return True
return False
__bool__ = __nonzero__
def handle_status(self, key, value):
if key in ("WARNING", "ERROR"):
logger.warning('potential problem: %s: %s', key, value)
elif key == "NODATA":
self.status = "no data was provided"
elif key in ("NEED_PASSPHRASE", "BAD_PASSPHRASE", "GOOD_PASSPHRASE",
"MISSING_PASSPHRASE", "DECRYPTION_FAILED",
"KEY_NOT_CREATED", "NEED_PASSPHRASE_PIN"):
self.status = key.replace("_", " ").lower()
elif key == "NEED_PASSPHRASE_SYM":
self.status = 'need symmetric passphrase'
elif key == "BEGIN_DECRYPTION":
self.status = 'decryption incomplete'
elif key == "BEGIN_ENCRYPTION":
self.status = 'encryption incomplete'
elif key == "DECRYPTION_OKAY":
self.status = 'decryption ok'
self.ok = True
elif key == "END_ENCRYPTION":
self.status = 'encryption ok'
self.ok = True
elif key == "INV_RECP": # pragma: no cover
self.status = 'invalid recipient'
elif key == "KEYEXPIRED": # pragma: no cover
self.status = 'key expired'
elif key == "SIG_CREATED": # pragma: no cover
self.status = 'sig created'
elif key == "SIGEXPIRED": # pragma: no cover
self.status = 'sig expired'
elif key in ("ENC_TO", "USERID_HINT", "GOODMDC",
"END_DECRYPTION", "CARDCTRL", "BADMDC",
"SC_OP_FAILURE", "SC_OP_SUCCESS",
"PINENTRY_LAUNCHED", "KEY_CONSIDERED"):
pass
else:
Verify.handle_status(self, key, value)
class GenKey(object):
"Handle status messages for --gen-key"
def __init__(self, gpg):
self.gpg = gpg
self.type = None
self.fingerprint = None
def __nonzero__(self):
if self.fingerprint: return True
return False
__bool__ = __nonzero__
def __str__(self):
return self.fingerprint or ''
def handle_status(self, key, value):
if key in ("WARNING", "ERROR"): # pragma: no cover
logger.warning('potential problem: %s: %s', key, value)
elif key == "KEY_CREATED":
(self.type,self.fingerprint) = value.split()
elif key in ("PROGRESS", "GOOD_PASSPHRASE", "KEY_NOT_CREATED"):
pass
else: # pragma: no cover
logger.debug('message ignored: %s, %s', key, value)
class ExportResult(GenKey):
"""Handle status messages for --export[-secret-key].
For now, just use an existing class to base it on - if needed, we
can override handle_status for more specific message handling.
"""
def handle_status(self, key, value):
if key in ("EXPORTED", "EXPORT_RES"):
pass
else:
super(ExportResult, self).handle_status(key, value)
class DeleteResult(object):
"Handle status messages for --delete-key and --delete-secret-key"
def __init__(self, gpg):
self.gpg = gpg
self.status = 'ok'
def __str__(self):
return self.status
problem_reason = {
'1': 'No such key',
'2': 'Must delete secret key first',
'3': 'Ambiguous specification',
}
def handle_status(self, key, value):
if key == "DELETE_PROBLEM": # pragma: no cover
self.status = self.problem_reason.get(value,
"Unknown error: %r" % value)
else: # pragma: no cover
logger.debug('message ignored: %s, %s', key, value)
def __nonzero__(self):
return self.status == 'ok'
__bool__ = __nonzero__
class Sign(TextHandler):
"Handle status messages for --sign"
def __init__(self, gpg):
self.gpg = gpg
self.type = None
self.hash_algo = None
self.fingerprint = None
self.status = None
def __nonzero__(self):
return self.fingerprint is not None
__bool__ = __nonzero__
def handle_status(self, key, value):
if key in ("WARNING", "ERROR", "FAILURE"): # pragma: no cover
logger.warning('potential problem: %s: %s', key, value)
elif key in ("KEYEXPIRED", "SIGEXPIRED"): # pragma: no cover
self.status = 'key expired'
elif key == "KEYREVOKED": # pragma: no cover
self.status = 'key revoked'
elif key == "SIG_CREATED":
(self.type,
algo, self.hash_algo, cls,
self.timestamp, self.fingerprint
) = value.split()
self.status = 'signature created'
elif key in ("USERID_HINT", "NEED_PASSPHRASE", "GOOD_PASSPHRASE",
"BAD_PASSPHRASE", "BEGIN_SIGNING"):
pass
else: # pragma: no cover
logger.debug('message ignored: %s, %s', key, value)
VERSION_RE = re.compile(r'gpg \(GnuPG\) (\d+(\.\d+)*)'.encode('ascii'), re.I)
HEX_DIGITS_RE = re.compile(r'[0-9a-f]+$', re.I)
class GPG(object):
decode_errors = 'strict'
result_map = {
'crypt': Crypt,
'delete': DeleteResult,
'generate': GenKey,
'import': ImportResult,
'send': SendResult,
'list': ListKeys,
'scan': ScanKeys,
'search': SearchKeys,
'sign': Sign,
'verify': Verify,
'export': ExportResult,
}
"Encapsulate access to the gpg executable"
def __init__(self, gpgbinary='gpg', gnupghome=None, verbose=False,
use_agent=False, keyring=None, options=None,
secret_keyring=None):
"""Initialize a GPG process wrapper. Options are:
gpgbinary -- full pathname for GPG binary.
gnupghome -- full pathname to where we can find the public and
private keyrings. Default is whatever gpg defaults to.
keyring -- name of alternative keyring file to use, or list of such
keyrings. If specified, the default keyring is not used.
options =-- a list of additional options to pass to the GPG binary.
secret_keyring -- name of alternative secret keyring file to use, or
list of such keyrings.
"""
self.gpgbinary = gpgbinary
self.gnupghome = gnupghome
if keyring:
# Allow passing a string or another iterable. Make it uniformly
# a list of keyring filenames
if isinstance(keyring, string_types):
keyring = [keyring]
self.keyring = keyring
if secret_keyring:
# Allow passing a string or another iterable. Make it uniformly
# a list of keyring filenames
if isinstance(secret_keyring, string_types):
secret_keyring = [secret_keyring]
self.secret_keyring = secret_keyring
self.verbose = verbose
self.use_agent = use_agent
if isinstance(options, str): # pragma: no cover
options = [options]
self.options = options
self.on_data = None # or a callable - will be called with data chunks
# Changed in 0.3.7 to use Latin-1 encoding rather than
# locale.getpreferredencoding falling back to sys.stdin.encoding
# falling back to utf-8, because gpg itself uses latin-1 as the default
# encoding.
self.encoding = 'latin-1'
if gnupghome and not os.path.isdir(self.gnupghome):
os.makedirs(self.gnupghome,0x1C0)
try:
p = self._open_subprocess(["--version"])
except OSError:
msg = 'Unable to run gpg - it may not be available.'
logger.exception(msg)
raise OSError(msg)
result = self.result_map['verify'](self) # any result will do for this
self._collect_output(p, result, stdin=p.stdin)
if p.returncode != 0: # pragma: no cover
raise ValueError("Error invoking gpg: %s: %s" % (p.returncode,
result.stderr))
m = VERSION_RE.match(result.data)
if not m: # pragma: no cover
self.version = None
else:
dot = '.'.encode('ascii')
self.version = tuple([int(s) for s in m.groups()[0].split(dot)])
def make_args(self, args, passphrase):
"""
Make a list of command line elements for GPG. The value of ``args``
will be appended. The ``passphrase`` argument needs to be True if
a passphrase will be sent to GPG, else False.
"""
cmd = [self.gpgbinary, '--status-fd', '2', '--no-tty']
cmd.extend(['--debug', 'ipc'])
if passphrase and hasattr(self, 'version'):
if self.version >= (2, 1):
cmd[1:1] = ['--pinentry-mode', 'loopback']
cmd.extend(['--fixed-list-mode', '--batch', '--with-colons'])
if self.gnupghome:
cmd.extend(['--homedir', no_quote(self.gnupghome)])
if self.keyring:
cmd.append('--no-default-keyring')
for fn in self.keyring:
cmd.extend(['--keyring', no_quote(fn)])
if self.secret_keyring:
for fn in self.secret_keyring:
cmd.extend(['--secret-keyring', no_quote(fn)])
if passphrase:
cmd.extend(['--passphrase-fd', '0'])
if self.use_agent: # pragma: no cover
cmd.append('--use-agent')
if self.options:
cmd.extend(self.options)
cmd.extend(args)
return cmd
def _open_subprocess(self, args, passphrase=False):
# Internal method: open a pipe to a GPG subprocess and return
# the file objects for communicating with it.
# def debug_print(cmd):
# result = []
# for c in cmd:
# if ' ' not in c:
# result.append(c)
# else:
# if '"' not in c:
# result.append('"%s"' % c)
# elif "'" not in c:
# result.append("'%s'" % c)
# else:
# result.append(c) # give up
# return ' '.join(cmd)
from subprocess import list2cmdline as debug_print
cmd = self.make_args(args, passphrase)
if self.verbose: # pragma: no cover
print(debug_print(cmd))
if not STARTUPINFO:
si = None
else: # pragma: no cover
si = STARTUPINFO()
si.dwFlags = STARTF_USESHOWWINDOW
si.wShowWindow = SW_HIDE
result = Popen(cmd, shell=False, stdin=PIPE, stdout=PIPE, stderr=PIPE,
startupinfo=si)
logger.debug("%s: %s", result.pid, debug_print(cmd))
return result
def _read_response(self, stream, result):
# Internal method: reads all the stderr output from GPG, taking notice
# only of lines that begin with the magic [GNUPG:] prefix.
#
# Calls methods on the response object for each valid token found,
# with the arg being the remainder of the status line.
lines = []
while True:
line = stream.readline()
if len(line) == 0:
break
lines.append(line)
line = line.rstrip()
if self.verbose: # pragma: no cover
print(line)
logger.debug("%s", line)
if line[0:9] == '[GNUPG:] ':
# Chop off the prefix
line = line[9:]
L = line.split(None, 1)
keyword = L[0]
if len(L) > 1:
value = L[1]
else:
value = ""
result.handle_status(keyword, value)
result.stderr = ''.join(lines)
def _read_data(self, stream, result, on_data=None):
# Read the contents of the file from GPG's stdout
chunks = []
while True:
data = stream.read(1024)
if len(data) == 0:
break
logger.debug("chunk: %r" % data[:256])
chunks.append(data)
if on_data:
on_data(data)
if _py3k:
# Join using b'' or '', as appropriate
result.data = type(data)().join(chunks)
else:
result.data = ''.join(chunks)
def _collect_output(self, process, result, writer=None, stdin=None):
"""
Drain the subprocesses output streams, writing the collected output
to the result. If a writer thread (writing to the subprocess) is given,
make sure it's joined before returning. If a stdin stream is given,
close it before returning.
"""
stderr = codecs.getreader(self.encoding)(process.stderr)
rr = threading.Thread(target=self._read_response, args=(stderr, result))
rr.setDaemon(True)
logger.debug('stderr reader: %r', rr)
rr.start()
stdout = process.stdout
dr = threading.Thread(target=self._read_data, args=(stdout, result, self.on_data))
dr.setDaemon(True)
logger.debug('stdout reader: %r', dr)
dr.start()
dr.join()
rr.join()
if writer is not None:
writer.join()
process.wait()
if stdin is not None:
try:
stdin.close()
except IOError: # pragma: no cover
pass
stderr.close()
stdout.close()
def _handle_io(self, args, fileobj, result, passphrase=None, binary=False):
"Handle a call to GPG - pass input data, collect output data"
# Handle a basic data call - pass data to GPG, handle the output
# including status information. Garbage In, Garbage Out :)
p = self._open_subprocess(args, passphrase is not None)
if not binary: # pragma: no cover
stdin = codecs.getwriter(self.encoding)(p.stdin)
else:
stdin = p.stdin
if passphrase:
_write_passphrase(stdin, passphrase, self.encoding)
writer = _threaded_copy_data(fileobj, stdin)
self._collect_output(p, result, writer, stdin)
return result
#
# SIGNATURE METHODS
#
def sign(self, message, **kwargs):
"""sign message"""
f = _make_binary_stream(message, self.encoding)
result = self.sign_file(f, **kwargs)
f.close()
return result
def set_output_without_confirmation(self, args, output):
"If writing to a file which exists, avoid a confirmation message."
if os.path.exists(output):
# We need to avoid an overwrite confirmation message
args.extend(['--yes'])
args.extend(['--output', no_quote(output)])
def sign_file(self, file, keyid=None, passphrase=None, clearsign=True,
detach=False, binary=False, output=None, extra_args=None):
"""sign file"""
logger.debug("sign_file: %s", file)
if binary: # pragma: no cover
args = ['-s']
else:
args = ['-sa']
# You can't specify detach-sign and clearsign together: gpg ignores
# the detach-sign in that case.
if detach:
args.append("--detach-sign")
elif clearsign:
args.append("--clearsign")
if keyid:
args.extend(['--default-key', no_quote(keyid)])
if output: # write the output to a file with the specified name
self.set_output_without_confirmation(args, output)
if extra_args:
args.extend(extra_args)
result = self.result_map['sign'](self)
#We could use _handle_io here except for the fact that if the
#passphrase is bad, gpg bails and you can't write the message.
p = self._open_subprocess(args, passphrase is not None)
try:
stdin = p.stdin
if passphrase:
_write_passphrase(stdin, passphrase, self.encoding)
writer = _threaded_copy_data(file, stdin)
except IOError: # pragma: no cover
logging.exception("error writing message")
writer = None
self._collect_output(p, result, writer, stdin)
return result
def verify(self, data, **kwargs):
"""Verify the signature on the contents of the string 'data'
>>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg')
>>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome="keys")
>>> input = gpg.gen_key_input(passphrase='foo')
>>> key = gpg.gen_key(input)
>>> assert key
>>> sig = gpg.sign('hello',keyid=key.fingerprint,passphrase='bar')
>>> assert not sig
>>> sig = gpg.sign('hello',keyid=key.fingerprint,passphrase='foo')
>>> assert sig
>>> verify = gpg.verify(sig.data)
>>> assert verify
"""
f = _make_binary_stream(data, self.encoding)
result = self.verify_file(f, **kwargs)
f.close()
return result
def verify_file(self, file, data_filename=None, close_file=True, extra_args=None):
"Verify the signature on the contents of the file-like object 'file'"
logger.debug('verify_file: %r, %r', file, data_filename)
result = self.result_map['verify'](self)
args = ['--verify']
if extra_args:
args.extend(extra_args)
if data_filename is None:
self._handle_io(args, file, result, binary=True)
else:
logger.debug('Handling detached verification')
import tempfile
fd, fn = tempfile.mkstemp(prefix='pygpg')
s = file.read()
if close_file:
file.close()
logger.debug('Wrote to temp file: %r', s)
os.write(fd, s)
os.close(fd)
args.append(no_quote(fn))
args.append(no_quote(data_filename))
try:
p = self._open_subprocess(args)
self._collect_output(p, result, stdin=p.stdin)
finally:
os.unlink(fn)
return result
def verify_data(self, sig_filename, data, extra_args=None):
"Verify the signature in sig_filename against data in memory"
logger.debug('verify_data: %r, %r ...', sig_filename, data[:16])
result = self.result_map['verify'](self)
args = ['--verify']
if extra_args:
args.extend(extra_args)
args.extend([no_quote(sig_filename), '-'])
stream = _make_memory_stream(data)
self._handle_io(args, stream, result, binary=True)
return result
#
# KEY MANAGEMENT
#
def import_keys(self, key_data):
"""
Import the key_data into our keyring.
"""
result = self.result_map['import'](self)
logger.debug('import_keys: %r', key_data[:256])
data = _make_binary_stream(key_data, self.encoding)
self._handle_io(['--import'], data, result, binary=True)
logger.debug('import_keys result: %r', result.__dict__)
data.close()
return result
def recv_keys(self, keyserver, *keyids):
"""Import a key from a keyserver
>>> import shutil
>>> shutil.rmtree("keys", ignore_errors=True)
>>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg')
>>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome="keys")
>>> os.chmod('keys', 0x1C0)
>>> result = gpg.recv_keys('pgp.mit.edu', '92905378')
>>> assert result
"""
result = self.result_map['import'](self)
logger.debug('recv_keys: %r', keyids)
data = _make_binary_stream("", self.encoding)
#data = ""
args = ['--keyserver', no_quote(keyserver), '--recv-keys']
args.extend([no_quote(k) for k in keyids])
self._handle_io(args, data, result, binary=True)
logger.debug('recv_keys result: %r', result.__dict__)
data.close()
return result
def send_keys(self, keyserver, *keyids):
"""Send a key to a keyserver.
Note: it's not practical to test this function without sending
arbitrary data to live keyservers.
"""
result = self.result_map['send'](self)
logger.debug('send_keys: %r', keyids)
data = _make_binary_stream('', self.encoding)
#data = ""
args = ['--keyserver', no_quote(keyserver), '--send-keys']
args.extend([no_quote(k) for k in keyids])
self._handle_io(args, data, result, binary=True)
logger.debug('send_keys result: %r', result.__dict__)
data.close()
return result
def delete_keys(self, fingerprints, secret=False, passphrase=None):
which='key'
if secret: # pragma: no cover
if self.version >= (2, 1) and passphrase is None:
raise ValueError('For GnuPG >= 2.1, deleting secret keys '
'needs a passphrase to be provided')
which='secret-key'
if _is_sequence(fingerprints): # pragma: no cover
fingerprints = [no_quote(s) for s in fingerprints]
else:
fingerprints = [no_quote(fingerprints)]
args = ['--delete-%s' % which]
args.extend(fingerprints)
result = self.result_map['delete'](self)
if not secret or self.version < (2, 1):
p = self._open_subprocess(args)
self._collect_output(p, result, stdin=p.stdin)
else:
# Need to send in a passphrase.
f = _make_binary_stream('', self.encoding)
try:
self._handle_io(args, f, result, passphrase=passphrase,
binary=True)
finally:
f.close()
return result
def export_keys(self, keyids, secret=False, armor=True, minimal=False,
passphrase=None):
"""
Export the indicated keys. A 'keyid' is anything gpg accepts.
Since GnuPG 2.1, you can't export secret keys without providing a
passphrase.
"""
which=''
if secret:
which='-secret-key'
if self.version >= (2, 1) and passphrase is None:
raise ValueError('For GnuPG >= 2.1, exporting secret keys '
'needs a passphrase to be provided')
if _is_sequence(keyids):
keyids = [no_quote(k) for k in keyids]
else:
keyids = [no_quote(keyids)]
args = ['--export%s' % which]
if armor:
args.append('--armor')
if minimal: # pragma: no cover
args.extend(['--export-options','export-minimal'])
args.extend(keyids)
# gpg --export produces no status-fd output; stdout will be
# empty in case of failure
#stdout, stderr = p.communicate()
result = self.result_map['export'](self)
if not secret or self.version < (2, 1):
p = self._open_subprocess(args)
self._collect_output(p, result, stdin=p.stdin)
else:
# Need to send in a passphrase.
f = _make_binary_stream('', self.encoding)
try:
self._handle_io(args, f, result, passphrase=passphrase,
binary=True)
finally:
f.close()
logger.debug('export_keys result: %r', result.data)
# Issue #49: Return bytes if armor not specified, else text
result = result.data
if armor:
result = result.decode(self.encoding, self.decode_errors)
return result
def _get_list_output(self, p, kind):
# Get the response information
result = self.result_map[kind](self)
self._collect_output(p, result, stdin=p.stdin)
lines = result.data.decode(self.encoding,
self.decode_errors).splitlines()
valid_keywords = 'pub uid sec fpr sub ssb sig'.split()
for line in lines:
if self.verbose: # pragma: no cover
print(line)
logger.debug("line: %r", line.rstrip())
if not line: # pragma: no cover
break
L = line.strip().split(':')
if not L: # pragma: no cover
continue
keyword = L[0]
if keyword in valid_keywords:
getattr(result, keyword)(L)
return result
def list_keys(self, secret=False, keys=None, sigs=False):
""" list the keys currently in the keyring
>>> import shutil
>>> shutil.rmtree("keys", ignore_errors=True)
>>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg')
>>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome="keys")
>>> input = gpg.gen_key_input(passphrase='foo')
>>> result = gpg.gen_key(input)
>>> fp1 = result.fingerprint
>>> result = gpg.gen_key(input)
>>> fp2 = result.fingerprint
>>> pubkeys = gpg.list_keys()
>>> assert fp1 in pubkeys.fingerprints
>>> assert fp2 in pubkeys.fingerprints
"""
if sigs:
which = 'sigs'
else: which='keys'
if secret:
which='secret-keys'
args = ['--list-%s' % which,
'--fingerprint', '--fingerprint'] # get subkey FPs, too
if keys:
if isinstance(keys, string_types):
keys = [keys]
args.extend(keys)
p = self._open_subprocess(args)
return self._get_list_output(p, 'list')
def scan_keys(self, filename):
"""
List details of an ascii armored or binary key file
without first importing it to the local keyring.
The function achieves this on modern GnuPG by running:
$ gpg --dry-run --import-options import-show --import
On older versions, it does the *much* riskier:
$ gpg --with-fingerprint --with-colons filename
"""
if self.version >= (2, 1):
args = ['--dry-run', '--import-options', 'import-show', '--import']
else:
logger.warning('Trying to list packets, but if the file is not a '
'keyring, might accidentally decrypt')
args = ['--with-fingerprint', '--with-colons', '--fixed-list-mode']
args.append(no_quote(filename))
p = self._open_subprocess(args)
return self._get_list_output(p, 'scan')
def search_keys(self, query, keyserver='pgp.mit.edu'):
""" search keyserver by query (using --search-keys option)
>>> import shutil
>>> shutil.rmtree('keys', ignore_errors=True)
>>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg')
>>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome='keys')
>>> os.chmod('keys', 0x1C0)
>>> result = gpg.search_keys('<vinay_sajip@hotmail.com>')
>>> assert result, 'Failed using default keyserver'
>>> #keyserver = 'keyserver.ubuntu.com'
>>> #result = gpg.search_keys('<vinay_sajip@hotmail.com>', keyserver)
>>> #assert result, 'Failed using keyserver.ubuntu.com'
"""
query = query.strip()
if HEX_DIGITS_RE.match(query):
query = '0x' + query
args = ['--fingerprint',
'--keyserver', no_quote(keyserver), '--search-keys',
no_quote(query)]
p = self._open_subprocess(args)
# Get the response information
result = self.result_map['search'](self)
self._collect_output(p, result, stdin=p.stdin)
lines = result.data.decode(self.encoding,
self.decode_errors).splitlines()
valid_keywords = ['pub', 'uid']
for line in lines:
if self.verbose: # pragma: no cover
print(line)
logger.debug('line: %r', line.rstrip())
if not line: # sometimes get blank lines on Windows
continue
L = line.strip().split(':')
if not L: # pragma: no cover
continue
keyword = L[0]
if keyword in valid_keywords:
getattr(result, keyword)(L)
return result
def gen_key(self, input):
"""Generate a key; you might use gen_key_input() to create the
control input.
>>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg')
>>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome="keys")
>>> input = gpg.gen_key_input(passphrase='foo')
>>> result = gpg.gen_key(input)
>>> assert result
>>> result = gpg.gen_key('foo')
>>> assert not result
"""
args = ["--gen-key"]
result = self.result_map['generate'](self)
f = _make_binary_stream(input, self.encoding)
self._handle_io(args, f, result, binary=True)
f.close()
return result
def gen_key_input(self, **kwargs):
"""
Generate --gen-key input per gpg doc/DETAILS
"""
parms = {}
for key, val in list(kwargs.items()):
key = key.replace('_','-').title()
if str(val).strip(): # skip empty strings
parms[key] = val
parms.setdefault('Key-Type','RSA')
parms.setdefault('Key-Length',2048)
parms.setdefault('Name-Real', "Autogenerated Key")
logname = (os.environ.get('LOGNAME') or os.environ.get('USERNAME') or
'unspecified')
hostname = socket.gethostname()
parms.setdefault('Name-Email', "%s@%s" % (logname.replace(' ', '_'),
hostname))
out = "Key-Type: %s\n" % parms.pop('Key-Type')
for key, val in list(parms.items()):
out += "%s: %s\n" % (key, val)
out += "%commit\n"
return out
# Key-Type: RSA
# Key-Length: 1024
# Name-Real: ISdlink Server on %s
# Name-Comment: Created by %s
# Name-Email: isdlink@%s
# Expire-Date: 0
# %commit
#
#
# Key-Type: DSA
# Key-Length: 1024
# Subkey-Type: ELG-E
# Subkey-Length: 1024
# Name-Real: Joe Tester
# Name-Comment: with stupid passphrase
# Name-Email: joe@foo.bar
# Expire-Date: 0
# Passphrase: abc
# %pubring foo.pub
# %secring foo.sec
# %commit
#
# ENCRYPTION
#
def encrypt_file(self, file, recipients, sign=None,
always_trust=False, passphrase=None,
armor=True, output=None, symmetric=False, extra_args=None):
"Encrypt the message read from the file-like object 'file'"
args = ['--encrypt']
if symmetric:
# can't be False or None - could be True or a cipher algo value
# such as AES256
args = ['--symmetric']
if symmetric is not True:
args.extend(['--cipher-algo', no_quote(symmetric)])
# else use the default, currently CAST5
else:
if not recipients:
raise ValueError('No recipients specified with asymmetric '
'encryption')
if not _is_sequence(recipients):
recipients = (recipients,)
for recipient in recipients:
args.extend(['--recipient', no_quote(recipient)])
if armor: # create ascii-armored output - False for binary output
args.append('--armor')
if output: # write the output to a file with the specified name
self.set_output_without_confirmation(args, output)
if sign is True: # pragma: no cover
args.append('--sign')
elif sign: # pragma: no cover
args.extend(['--sign', '--default-key', no_quote(sign)])
if always_trust: # pragma: no cover
args.append('--always-trust')
if extra_args:
args.extend(extra_args)
result = self.result_map['crypt'](self)
self._handle_io(args, file, result, passphrase=passphrase, binary=True)
logger.debug('encrypt result: %r', result.data)
return result
def encrypt(self, data, recipients, **kwargs):
"""Encrypt the message contained in the string 'data'
>>> import shutil
>>> if os.path.exists("keys"):
... shutil.rmtree("keys", ignore_errors=True)
>>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg')
>>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome="keys")
>>> input = gpg.gen_key_input(name_email='user1@test', passphrase='pp1')
>>> result = gpg.gen_key(input)
>>> fp1 = result.fingerprint
>>> input = gpg.gen_key_input(name_email='user2@test', passphrase='pp2')
>>> result = gpg.gen_key(input)
>>> fp2 = result.fingerprint
>>> result = gpg.encrypt("hello",fp2)
>>> message = str(result)
>>> assert message != 'hello'
>>> result = gpg.decrypt(message, passphrase='pp2')
>>> assert result
>>> str(result)
'hello'
>>> result = gpg.encrypt("hello again", fp1)
>>> message = str(result)
>>> result = gpg.decrypt(message, passphrase='bar')
>>> result.status in ('decryption failed', 'bad passphrase')
True
>>> assert not result
>>> result = gpg.decrypt(message, passphrase='pp1')
>>> result.status == 'decryption ok'
True
>>> str(result)
'hello again'
>>> result = gpg.encrypt("signed hello", fp2, sign=fp1, passphrase='pp1')
>>> result.status == 'encryption ok'
True
>>> message = str(result)
>>> result = gpg.decrypt(message, passphrase='pp2')
>>> result.status == 'decryption ok'
True
>>> assert result.fingerprint == fp1
"""
data = _make_binary_stream(data, self.encoding)
result = self.encrypt_file(data, recipients, **kwargs)
data.close()
return result
def decrypt(self, message, **kwargs):
data = _make_binary_stream(message, self.encoding)
result = self.decrypt_file(data, **kwargs)
data.close()
return result
def decrypt_file(self, file, always_trust=False, passphrase=None,
output=None, extra_args=None):
args = ["--decrypt"]
if output: # write the output to a file with the specified name
self.set_output_without_confirmation(args, output)
if always_trust: # pragma: no cover
args.append("--always-trust")
if extra_args:
args.extend(extra_args)
result = self.result_map['crypt'](self)
self._handle_io(args, file, result, passphrase, binary=True)
logger.debug('decrypt result: %r', result.data)
return result
|
process-manager.py
|
###
# Multiprocessing manager test.
#
# License - MIT.
###
import os
from multiprocessing import Process, Manager, Lock
# process_function - Process test function.
def process_function(pLock, pDict):
# {
with pLock:
pDict['num'] -= 1
# }
# Main function.
def main():
# {
lock = Lock()
with Manager() as manager:
proc_dict = manager.dict({"num" : 10})
proc_list = []
for i in range(10):
process = Process(target = process_function, args = (lock, proc_dict))
process.start()
proc_list.append(process)
for p in proc_list:
p.join()
print(proc_dict)
# }
# Program entry.
if '__main__' == __name__:
main()
|
wtf.py
|
#***Essential Data and Imports (Do not modify, except USE_SPARK)
#DEFINE USE_SPARK
USE_SPARK = True
try:
from pyspark import SparkConf, SparkContext
except ImportError as e:
if USE_SPARK:
raise e
from datetime import datetime
import sys
class MasterURL():
test = "spark://143.106.73.43:7077"
prod = "spark://143.106.73.44:7077"
calzolari = "spark://143.106.73.61:7077"
local = "local[*]"
CREATION_TIME = datetime.utcnow()
#****Tweak Defines (Modify freely, just don't erase)
#DEFINE APP_NAME
APP_NAME = "WikitoFile Local"
#DEFINE CHOSEN_MASTER
CHOSEN_MASTER = MasterURL.test
#DEFINE OVERWRITE_STD
OVERWRITE_STDIO=True
#DEFINE FILE_STDOUT
FILE_STDOUT = "py_out.txt"
#DEFINE FILE_STDERR
FILE_STDERR = "py_err.txt"
#****Put Extra Imports Here
import wikipedia
import requests
import thread
import time
import threading
import sets
import json
#****Put Globals Here
PAGE_LIMIT = 4096
MULTITHREAD_LIMIT = 4
FLUSH_IO_BATCH = 64
RANDOM_TIMES = 5
FULL_WIKI_FILE = False
PRETTY_LINK_FILE = True
PRETTY_WIKI_FILE = True
LANGUAGES = ["de","en","fr","pt"]
#****Write Main Code:
def main(sc=None):
wikipedia.set_rate_limiting(True)
multiLimitRange = range(MULTITHREAD_LIMIT)
LANGUAGES.sort()
for language in LANGUAGES:
try:
wikipedia.set_lang(language)
allSet = sets.Set()
for i in xrange(RANDOM_TIMES):
try:
allSet.update(wikipedia.random(pages=10))
except wikipedia.exceptions.DisambiguationError as e:
allSet.update(e.options)
except Exception as e:
print >> sys.stderr, e
readySet = sets.Set()
readySet.update(allSet)
getPages_threads={i:threading.Thread(target=getPages,args=(language,allSet,readySet)) for i in multiLimitRange}
for i in multiLimitRange:
try:
getPages_threads[i].start()
except Exception as e:
print >> sys.stderr, e
for i in multiLimitRange:
try:
if getPages_threads[i].isAlive():
getPages_threads[i].join()
except Exception as e:
print >> sys.stderr, e
print "== %s: %d Done ==" % (language,len(allSet))
except wikipedia.exceptions.PageError as e:
print >> sys.stderr, e
except requests.exceptions.ConnectionError as e:
print >> sys.stderr, e
except wikipedia.exceptions.WikipediaException as e:
print >> sys.stderr, e
except Exception as e:
print >> sys.stderr, e
pass
def getPages(language,allSet,readySet):
#Creates a local dictionary of the pages acquired and per requests
wikidict = {}
linkdict = {}
#Try to acquire a page
i=0
try:
while addPage(wikidict,linkdict,allSet,readySet):
i +=1
if i%FLUSH_IO_BATCH == 0:
try:
threading.Thread(target=dictsToJson,args=(wikidict,linkdict,language)).start()
wikidict = {}
linkdict = {}
except Exception as e:
print >> sys.stderr, e
pass
except Exception as e:
print >> sys.stderr, e
dictsToJson(wikidict,linkdict,language)
IO_LOCK = threading.RLock()
DTJ_COUNTER = {}
def dictsToJson(wikidict,linkdict,language):
with IO_LOCK:
if DTJ_COUNTER.has_key(language):
DTJ_COUNTER[language] +=1
else:
DTJ_COUNTER[language] = 0
currCounter = DTJ_COUNTER[language]
dictToJson(wikidict,"wiki",language,currCounter,PRETTY_WIKI_FILE)
dictToJson(linkdict,"link",language,currCounter,PRETTY_LINK_FILE)
def dictToJson(dict,fileID,language="",uniqueID=0,prettyFile=True):
try:
#data\wiki_en_20161021_120000_000001.json
fjson = open ("data/%s/%s_%s_%08d.json" % (fileID, language, CREATION_TIME.strftime("%Y%m%d_%H%M%S") , uniqueID), "w")
if prettyFile:
json.dump(dict, fjson, sort_keys=True, indent=4)
else:
json.dump(dict, fjson)
fjson.close()
except Exception as e:
print >> sys.stderr, e
return False
finally:
return True
SET_LOCK = threading.RLock()
def queuePages(pageNames,allSet,readySet):
for pageName in pageNames:
if (len(allSet)<=PAGE_LIMIT):
with SET_LOCK:
if not pageName in allSet:
allSet.add(pageName)
readySet.add(pageName)
pass
def addPage(wikidict,linkdict,allSet,readySet):
pageName = ""
try:
pageName=readySet.pop().encode('utf-8')
except ValueError as e:
return False
except Exception as e:
print >> sys.stderr, e
return False;
try:
page = wikipedia.page(pageName)
links = page.links
if FULL_WIKI_FILE:
pagedict = {"pageName":pageName,"content":page.content,"links":links,"images":page.images,"categories":page.categories}
else:
pagedict = {"pageName":pageName,"content":page.content,"images":page.images,"categories":page.categories}
wikidict[pageName]=pagedict
linkdict[pageName]=links
queuePages(links,allSet,readySet)
except wikipedia.exceptions.DisambiguationError as e:
#print >> sys.stderr, e
queuePages(e.options,allSet,readySet)
except wikipedia.exceptions.PageError as e:
print >> sys.stderr, e
time.sleep(1)
except requests.exceptions.ConnectionError as e:
print >> sys.stderr, e
time.sleep(1)
except wikipedia.exceptions.WikipediaException as e:
print >> sys.stderr, e
time.sleep(1)
except Exception as e:
print >> sys.stderr, e
time.sleep(1)
return True;
#***Configure Out and Error, and print ExecutionID
def configPrint():
if OVERWRITE_STDIO:
sys.stderr = open(FILE_STDERR,"a")
sys.stdout = open(FILE_STDOUT,"a")
EXECUTION_ID = "\n\nAPP_NAME: %s\nAPP_ID: %s\nMASTER_URL: %s\nUTC: %s\n\n" % (APP_NAME, sc.applicationId, CHOSEN_MASTER, CREATION_TIME)
sys.stderr.write (EXECUTION_ID)
sys.stdout.write (EXECUTION_ID)
#***Executor of Code
if USE_SPARK:
#With Spark
if __name__ == "__main__":
# Configure Spark
conf = SparkConf().setMaster(CHOSEN_MASTER).setAppName(APP_NAME)
sc = SparkContext(conf=conf)
# Configure stdout and stderr
configPrint()
# Execute Main functionality
main(sc)
else:
#Without Spark
configPrint()
main()
|
wsgi_server.py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A WSGI server implementation using a shared thread pool."""
import collections
import errno
import httplib
import logging
import os
import select
import socket
import sys
import threading
import time
import google
from cherrypy import wsgiserver
from google.appengine.tools.devappserver2 import errors
from google.appengine.tools.devappserver2 import http_runtime_constants
from google.appengine.tools.devappserver2 import shutdown
from google.appengine.tools.devappserver2 import thread_executor
_HAS_POLL = hasattr(select, 'poll')
# TODO: the only reason we need to timeout is to pick up added or remove
# descriptors. But AFAICT, we only add descriptors at startup and remove them at
# shutdown so for the bulk of the run, the timeout is useless and just simply
# wastes CPU. For startup, if we wait to start the thread until after all
# WSGI servers are created, we are good (although may need to be careful in the
# runtime instances depending on when servers are created relative to the
# sandbox being enabled). For shutdown, more research is needed (one idea is
# simply not remove descriptors as the process is about to exit).
_READINESS_TIMEOUT_SECONDS = 1
_SECONDS_TO_MILLISECONDS = 1000
# Due to reports of failure to find a consistent port, trying a higher value
# to see if that reduces the problem sufficiently. If it doesn't we can try
# increasing it (on my circa 2010 desktop, it takes about 1/2 second per 1024
# tries) but it would probably be better to either figure out a better
# algorithm or make it possible for code to work with inconsistent ports.
_PORT_0_RETRIES = 2048
class BindError(errors.Error):
"""The server failed to bind its address."""
_THREAD_POOL = thread_executor.ThreadExecutor()
class _SharedCherryPyThreadPool(object):
"""A mimic of wsgiserver.ThreadPool that delegates to a shared thread pool."""
def __init__(self):
self._condition = threading.Condition()
self._connections = set() # Protected by self._condition.
def stop(self, timeout=5):
timeout_time = time.time() + timeout
with self._condition:
while self._connections and time.time() < timeout_time:
self._condition.wait(timeout_time - time.time())
for connection in self._connections:
self._shutdown_connection(connection)
@staticmethod
def _shutdown_connection(connection):
if not connection.rfile.closed:
connection.socket.shutdown(socket.SHUT_RD)
def put(self, obj):
with self._condition:
self._connections.add(obj)
_THREAD_POOL.submit(self._handle, obj)
def _handle(self, obj):
try:
obj.communicate()
finally:
obj.close()
with self._condition:
self._connections.remove(obj)
self._condition.notify()
class SelectThread(object):
"""A thread that selects on sockets and calls corresponding callbacks."""
def __init__(self):
self._lock = threading.Lock()
# self._file_descriptors is a frozenset and
# self._file_descriptor_to_callback is never mutated so they can be
# snapshotted by the select thread without needing to copy.
self._file_descriptors = frozenset()
self._file_descriptor_to_callback = {}
self._select_thread = threading.Thread(
target=self._loop_forever, name='WSGI select')
self._select_thread.daemon = True
def start(self):
self._select_thread.start()
def add_socket(self, s, callback):
"""Add a new socket to watch.
Args:
s: A socket to select on.
callback: A callable with no args to be called when s is ready for a read.
"""
with self._lock:
self._file_descriptors = self._file_descriptors.union([s.fileno()])
new_file_descriptor_to_callback = self._file_descriptor_to_callback.copy()
new_file_descriptor_to_callback[s.fileno()] = callback
self._file_descriptor_to_callback = new_file_descriptor_to_callback
def remove_socket(self, s):
"""Remove a watched socket."""
with self._lock:
self._file_descriptors = self._file_descriptors.difference([s.fileno()])
new_file_descriptor_to_callback = self._file_descriptor_to_callback.copy()
del new_file_descriptor_to_callback[s.fileno()]
self._file_descriptor_to_callback = new_file_descriptor_to_callback
def _loop_forever(self):
while shutdown and not shutdown.shutting_down():
# Check shutdown as it may be gc-ed during shutdown. See
# http://stackoverflow.com/questions/17084260/imported-modules-become-none-when-running-a-function
self._select()
def _select(self):
with self._lock:
fds = self._file_descriptors
fd_to_callback = self._file_descriptor_to_callback
if fds:
if _HAS_POLL:
# With 100 file descriptors, it is approximately 5x slower to
# recreate and reinitialize the Poll object on every call to _select
# rather reuse one. But the absolute cost of contruction,
# initialization and calling poll(0) is ~25us so code simplicity
# wins.
poll = select.poll()
for fd in fds:
poll.register(fd, select.POLLIN)
ready_file_descriptors = [fd for fd, _ in poll.poll(
_READINESS_TIMEOUT_SECONDS * _SECONDS_TO_MILLISECONDS)]
else:
ready_file_descriptors, _, _ = select.select(fds, [], [],
_READINESS_TIMEOUT_SECONDS)
for fd in ready_file_descriptors:
fd_to_callback[fd]()
else:
# select([], [], [], 1) is not supported on Windows.
time.sleep(_READINESS_TIMEOUT_SECONDS)
_SELECT_THREAD = SelectThread()
_SELECT_THREAD.start()
class _SingleAddressWsgiServer(wsgiserver.CherryPyWSGIServer):
"""A WSGI server that uses a shared SelectThread and thread pool."""
def __init__(self, host, app):
"""Constructs a _SingleAddressWsgiServer.
Args:
host: A (hostname, port) tuple containing the hostname and port to bind.
The port can be 0 to allow any port.
app: A WSGI app to handle requests.
"""
super(_SingleAddressWsgiServer, self).__init__(host, self)
self._lock = threading.Lock()
self._app = app # Protected by _lock.
self._error = None # Protected by _lock.
self.requests = _SharedCherryPyThreadPool()
self.software = http_runtime_constants.SERVER_SOFTWARE
# Some servers, especially the API server, may receive many simultaneous
# requests so set the listen() backlog to something high to reduce the
# likelihood of refused connections.
self.request_queue_size = 100
def start(self):
"""Starts the _SingleAddressWsgiServer.
This is a modified version of the base class implementation. Changes:
- Removed unused functionality (Unix domain socket and SSL support).
- Raises BindError instead of socket.error.
- Uses _SharedCherryPyThreadPool instead of wsgiserver.ThreadPool.
- Calls _SELECT_THREAD.add_socket instead of looping forever.
Raises:
BindError: The address could not be bound.
"""
# AF_INET or AF_INET6 socket
# Get the correct address family for our host (allows IPv6 addresses)
host, port = self.bind_addr
try:
info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
except socket.gaierror:
if ':' in host:
info = [(socket.AF_INET6, socket.SOCK_STREAM, 0, '',
self.bind_addr + (0, 0))]
else:
info = [(socket.AF_INET, socket.SOCK_STREAM, 0, '', self.bind_addr)]
self.socket = None
for res in info:
af, socktype, proto, _, _ = res
try:
self.bind(af, socktype, proto)
except socket.error as socket_error:
if self.socket:
self.socket.close()
self.socket = None
continue
break
if not self.socket:
raise BindError('Unable to bind %s:%s' % self.bind_addr, socket_error)
# Timeout so KeyboardInterrupt can be caught on Win32
self.socket.settimeout(1)
self.socket.listen(self.request_queue_size)
self.ready = True
self._start_time = time.time()
_SELECT_THREAD.add_socket(self.socket, self.tick)
def quit(self):
"""Quits the _SingleAddressWsgiServer."""
_SELECT_THREAD.remove_socket(self.socket)
self.requests.stop(timeout=1)
@property
def port(self):
"""Returns the port that the server is bound to."""
return self.socket.getsockname()[1]
def set_app(self, app):
"""Sets the PEP-333 app to use to serve requests."""
with self._lock:
self._app = app
def set_error(self, error):
"""Sets the HTTP status code to serve for all requests."""
with self._lock:
self._error = error
self._app = None
def __call__(self, environ, start_response):
with self._lock:
app = self._app
error = self._error
if app:
return app(environ, start_response)
else:
start_response('%d %s' % (error, httplib.responses[error]), [])
return []
class WsgiServer(object):
def __init__(self, host, app):
"""Constructs a WsgiServer.
Args:
host: A (hostname, port) tuple containing the hostname and port to bind.
The port can be 0 to allow any port.
app: A WSGI app to handle requests.
"""
self.bind_addr = host
self._app = app
self._servers = []
def start(self):
"""Starts the WsgiServer.
This starts multiple _SingleAddressWsgiServers to bind the address in all
address families.
Raises:
BindError: The address could not be bound.
"""
host, port = self.bind_addr
try:
addrinfo = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
sockaddrs = [addr[-1] for addr in addrinfo]
host_ports = [sockaddr[:2] for sockaddr in sockaddrs]
# Remove duplicate addresses caused by bad hosts file. Retain the
# order to minimize behavior change (and so we don't have to tweak
# unit tests to deal with different order).
host_ports = list(collections.OrderedDict.fromkeys(host_ports))
except socket.gaierror:
host_ports = [self.bind_addr]
if port != 0:
self._start_all_fixed_port(host_ports)
else:
for _ in range(_PORT_0_RETRIES):
if self._start_all_dynamic_port(host_ports):
break
else:
raise BindError('Unable to find a consistent port for %s' % host)
def _start_all_fixed_port(self, host_ports):
"""Starts a server for each specified address with a fixed port.
Does the work of actually trying to create a _SingleAddressWsgiServer for
each specified address.
Args:
host_ports: An iterable of host, port tuples.
Raises:
BindError: The address could not be bound.
"""
for host, port in host_ports:
assert port != 0
server = _SingleAddressWsgiServer((host, port), self._app)
try:
server.start()
except BindError as bind_error:
# TODO: I'm not sure about the behavior of quietly ignoring an
# EADDRINUSE as long as the bind succeeds on at least one interface. I
# think we should either:
# - Fail (just like we do now when bind fails on every interface).
# - Retry on next highest port.
logging.debug('Failed to bind "%s:%s": %s', host, port, bind_error)
continue
else:
self._servers.append(server)
if not self._servers:
raise BindError('Unable to bind %s:%s' % self.bind_addr)
def _start_all_dynamic_port(self, host_ports):
"""Starts a server for each specified address with a dynamic port.
Does the work of actually trying to create a _SingleAddressWsgiServer for
each specified address.
Args:
host_ports: An iterable of host, port tuples.
Returns:
The list of all servers (also saved as self._servers). A non empty list
indicates success while an empty list indicates failure.
"""
port = 0
for host, _ in host_ports:
server = _SingleAddressWsgiServer((host, port), self._app)
try:
server.start()
if port == 0:
port = server.port
except BindError as bind_error:
if bind_error[1][0] == errno.EADDRINUSE:
# The port picked at random for first interface was not available
# on one of the other interfaces. Forget them and try again.
for server in self._servers:
server.quit()
self._servers = []
break
else:
# Ignore the interface if we get an error other than EADDRINUSE.
logging.debug('Failed to bind "%s:%s": %s', host, port, bind_error)
continue
else:
self._servers.append(server)
return self._servers
def quit(self):
"""Quits the WsgiServer."""
for server in self._servers:
server.quit()
@property
def host(self):
"""Returns the host that the server is bound to."""
return self._servers[0].socket.getsockname()[0]
@property
def port(self):
"""Returns the port that the server is bound to."""
return self._servers[0].socket.getsockname()[1]
def set_app(self, app):
"""Sets the PEP-333 app to use to serve requests."""
self._app = app
for server in self._servers:
server.set_app(app)
def set_error(self, error):
"""Sets the HTTP status code to serve for all requests."""
self._error = error
self._app = None
for server in self._servers:
server.set_error(error)
@property
def ready(self):
return all(server.ready for server in self._servers)
|
fmos_mastercode_bonsai.py
|
'''
FMOS MASTERCODE - Freely Moving Olfactory Search Mastercode
Written: Teresa Findley, tmfindley15@gmail.com
Last Updated: 10.27.2020 (Dorian Yeh)
--Records tracking data via OSC communication with custom code in Bonsai (open source computer vision software -- https://bonsai-rx.org/)
--Records signal data through NI USB-6009 data acquisition board
--Controls solenoid and beambreak hardware through Arduino Mega2560 & Teensyduino 2.0
'''
# [SET UP] #
##IMPORTS
##libraries
from numpy.random import choice
import numpy as np, cv2, os, sys
from timeit import default_timer as timer
import time, math, datetime, random
import OSC,threading, Queue
import nidaqmx, ctypes
import matplotlib.pyplot as plt
from nidaqmx.constants import AcquisitionType, Edge
from nidaqmx.stream_readers import AnalogMultiChannelReader
##local modules
from fmos_preferences_bonsai import *
import fmos_datamgt, fmos_tracking, fmos_serial
##INITIATE VARIABLES -- these are all state machine variables to be used throughout the session
session_num = 1; trial_num = 1; state = 1; prep_odor = True; iti_delay = iti_correct; #trial information
correct0=0; correct1=0; correct2=0; correct3=0; correct4=0; total0=0; total1 = 0; total2=0; total3=0; total4=0; #real time performance statistics
correct0L=0; correct1L=0; correct2L=0; correct3L=0; correct4L=0; total0L=0; total1L=0; total2L=0; total3L=0; total4L=0;
correct0R=0; correct1R=0; correct2R=0; correct3R=0; correct4R=0; total0R=0; total1R=0; total2R=0; total3R=0; total4R=0;
total_trials = 0; total_left = 0; total_right = 0; left_correct = 0; right_correct = 0 #real time performance statistics
total_correct = 0; fraction_correct = 0; fraction_left = 0; fraction_right = 0;
last_occupancy = 0; section_occupancy = 0; counter = 0; msg = 0 #real time Bonsai tracking & nose poke monitor
odor_calibration = np.genfromtxt('D:/FMON_Project/data/olfactometercalibration.txt', delimiter = ',') #odor calibration array
datapath,session_num = fmos_datamgt.CHK_directory(mouse_id,group_name,session_num) #update/create datapath & initiate data files
trialsummary_file = datapath + 'trialsummary.txt'; video_file = datapath + 'videolocation.txt'; timestamp_file = datapath + 'timestamp.txt'
notes_file = datapath + 'notes.txt'
ch0_file = datapath + ch0 + '.dat'; ch1_file = datapath + ch1 + '.dat' #NI signal files
#ch2_file = datapath + ch2 + '.dat'; ch3_file = datapath + ch3 + '.dat'
nx_file = datapath + 'nosex.dat'; ny_file = datapath + 'nosey.dat' #bonsai tracking files
hx_file = datapath + 'headx.dat'; hy_file = datapath + 'heady.dat'
cx_file = datapath + 'comx.dat'; cy_file = datapath + 'comy.dat'
ts_file = datapath + 'timestamp.dat' #timestamp file
receive_address = ('localhost', 6666); trackingcoords = OSC.OSCServer(receive_address); #bonsai tracking variables
qnosex = Queue.LifoQueue(0); qnosey = Queue.LifoQueue(0); #real time position input
nosex = np.zeros((1,1)); nosey = np.zeros((1,1));
headx = np.zeros((1,1)); heady = np.zeros((1,1))
comx = np.zeros((1,1)); comy = np.zeros((1,1))
ts = np.zeros((1,1));
signaldata = np.zeros((channel_num,buffersize),dtype=np.float64) #NI data collection reading variables
reader = AnalogMultiChannelReader(ni_data.in_stream)
##START UP PROCEDURES
section,section_center=fmos_tracking.calc_partitions() #real time tracking: gridline deliniation (depends on rig size)
triallist,odorconditionlist = fmos_datamgt.randomize_trials(random_groupsize,total_groupsize) #randomize trials
fmos_serial.close_all_valves() #turn off all hardware
#Create/Open Data Files
ch0_handle = open(ch0_file,'ab'); ch1_handle = open(ch1_file,'ab');
#ch2_handle = open(ch2_file,'ab'); ch3_handle = open(ch3_file,'ab');
nx_handle = open(nx_file,'ab'); ny_handle = open(ny_file,'ab'); hx_handle = open(hx_file,'ab')
hy_handle = open(hy_file,'ab'); cx_handle = open(cx_file,'ab'); cy_handle = open(cy_file,'ab')
ts_handle = open(ts_file,'ab')
#Bonsai Start Up
trackingcoords.addDefaultHandlers() #add default handlers to the server
def msg_handler(addr, tags, coords, source):
qnosex.put(coords[0]); qnosey.put(coords[1]); #real time storage of nose position
nosex[0,0] = coords[0]; nosey[0,0] = coords[1]
headx[0,0] = coords[2]; heady[0,0] = coords[3]
comx[0,0] = coords[4]; comy[0,0] = coords[5]
ts[0,0] = timer()-session_start;
nosex.tofile(nx_handle); nosey.tofile(ny_handle) #save nose, head, and body coordinates in real time
headx.tofile(hx_handle); heady.tofile(hy_handle)
comx.tofile(cx_handle); comy.tofile(cy_handle)
ts.tofile(ts_handle)
trackingcoords.addMsgHandler("/2python",msg_handler) #add msg handler function to server for between program communication
bonsaitracking = threading.Thread( target = trackingcoords.serve_forever ) #put tracking in continuous background thread
bonsaitracking.daemon = True
#NI Set Up
ni_data.ai_channels.add_ai_voltage_chan(channels) #add channels to server
ni_data.timing.cfg_samp_clk_timing(samplingrate, '',Edge.RISING,AcquisitionType.CONTINUOUS,uInt64(buffersize)) #instruct how to sample
def ni_handler(): #define background function to handle incoming NI data
while True:
reader.read_many_sample(signaldata,number_of_samples_per_channel= buffersize, timeout=10.0)
signaldata[0,:].tofile(ch0_handle); signaldata[1,:].tofile(ch1_handle);
#signaldata[2,:].tofile(ch2_handle); signaldata[3,:].tofile(ch3_handle);
nisignal = threading.Thread(target = ni_handler) #set handler function in background
nisignal.daemon = True
##INITIATE SESSION
print ("Subject " + str(mouse_id) + ", Session " + str(session_num)) #report session initiation
print ("System Ready. Initiating Data Collection...")
print ("Did you remember to turn on the NITROGEN??") #reminder for users
bonsaitracking.start(); #initiate waiting for Bonsai input
nose = [qnosex.get(),qnosey.get()]; #ask for input from Bonsai
#**********PROGRAM WILL NOT CONTINUE UNTIL IT RECEIVES INPUT...START BONSAI PROGRAM HERE**********#
session_start = timer() #session timer
ni_data.start(); nisignal.start(); #start NIDAQ sniff data collection
localtime = datetime.datetime.now(); #timestamp for locating videos saved locally through Bonsai
print ("Session Started.")
# [MAIN CODE] #
while True:
# [State *](occurs across all states in state machine)
#Nosepoke & Timer
while ard.inWaiting() > 0: #check nosepoke status
msg = fmos_serial.nose_poke_status(msg)
if timer() - session_start >= session_length: #end session at predetermined length
fmos_serial.close_all_valves()
reasonforend = "Auto Session End"
break
#Realtime Tracking
nose = [qnosex.get(),qnosey.get()]; #check nose position
section_occupancy = fmos_tracking.detect_mouse_partitions(nose,section_center, section_occupancy) #section occupancy
if show_active_stats == True: #real time trial statistics
frame = cv2.imread('D:/FMON_Project/data/statsbackground.jpeg')
height, width, depth = frame.shape #white background
fraction_correct = "T: "+str(correct0)+"/"+str(total0)+". "+str(correct4)+"/"+str(total4)+". "+str(correct1)+"/"+str(total1)+". "+str(correct2)+"/"+str(total2)+". "+str(correct3)+"/"+str(total3)+". " #session stats
fraction_left = "L: "+str(correct0L)+"/"+str(total0L)+". "+str(correct4L)+"/"+str(total4L)+". "+str(correct1L)+"/"+str(total1L)+". "+str(correct2L)+"/"+str(total2L)+". "+str(correct3L)+"/"+str(total3L)+"."
fraction_right = "R: "+str(correct0R)+"/"+str(total0R)+". "+str(correct4R)+"/"+str(total4R)+". "+str(correct1R)+"/"+str(total1R)+". "+str(correct2R)+"/"+str(total2R)+". "+str(correct3R)+"/"+str(total3R)+"."
#Stats Display
if group_name == 'abs-conc':
cv2.putText(frame,'xxxx 80-20(1%) 80-20(0.1%) CONTROL', (130,(height/2)-40), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,0))
if group_name == 'thresholding':
cv2.putText(frame,'xxxx 90-30 30-10 CONTROL', (130,(height/2)-40), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,0))
if group_name == 'non-spatial':
cv2.putText(frame,'xxxx Exp. xxxx CONTROL', (130,(height/2)-40), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,0))
fraction_left = "O(L): "+str(correct0L)+"/"+str(total0L)+". "+str(correct1L)+"/"+str(total1L)+". "+str(correct2L)+"/"+str(total2L)+". "+str(correct3L)+"/"+str(total3L)+". "
fraction_right = "MS(R): "+str(correct0R)+"/"+str(total0R)+". "+str(correct1R)+"/"+str(total1R)+". "+str(correct2R)+"/"+str(total2R)+". "+str(correct3R)+"/"+str(total3R)+". "
if group_name != 'abs-conc' and group_name != 'non-spatial' and group_name != 'thresholding':
cv2.putText(frame,'100-0 | 90-10 | 80-20 | 60-40 | CONTROL', (130,(height/2)-40), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,0))
if group_name == 'mineral-oil':
cv2.putText(frame,'M6.80-20 M6.50-50 M7.80-20 M7.50-50', (130,(height/2)-40), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,0))
cv2.putText(frame,fraction_correct, (80,(height/2)-20), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,0))
cv2.putText(frame,fraction_left,(80,(height/2)),cv2.FONT_HERSHEY_PLAIN,1,(0,0,0))
cv2.putText(frame,fraction_right,(80,(height/2)+20),cv2.FONT_HERSHEY_PLAIN,1,(0,0,0))
cv2.imshow('Session Statistics',frame)
##Manual Session Termination -- press 'q' to end session manually
if cv2.waitKey(1) & 0xFF == ord('q'):
fmos_serial.close_all_valves()
reasonforend = "Manual Exit"
break
# [State 1] TRIAL INITIATION
if state == 1:
#Odor Preparation
if prep_odor == True:
active_valve = triallist[trial_num-1] #side of odor delivery
concentration_setting = odorconditionlist[trial_num-1] #concentration difference of odor delivery
#Update Trial Values & MFC settings
low_valve, correctpoke,nameoftrialtype,correctindex,incorrectindex = fmos_datamgt.trial_values(active_valve)
HairR,LairR,HairL,LairL,Hn2R,Ln2R,Hn2L,Ln2L,activevial,lowvial = fmos_serial.MFC_settings(concentration_setting,odor_calibration,active_valve)
print ("Upcoming Trial: " + nameoftrialtype + ", " + str(concentration_setting)) #report upcoming trial
#turn on MFCs and Vials
if group_name != 'non-spatial':
if active_valve == 1:
tnsy.write("MFC " + str(active_valve) + " " + str(MFC_air) + " " + str(HairR) + "\r")
tnsy.write("MFC " + str(active_valve) + " " + str(MFC_n2) + " " + str(Hn2R) + "\r")
tnsy.write("MFC " + str(low_valve) + " " + str(MFC_air) + " " + str(LairL) + "\r")
tnsy.write("MFC " + str(low_valve) + " " + str(MFC_n2) + " " + str(Ln2L) + "\r")
if active_valve == 2:
tnsy.write("MFC " + str(active_valve) + " " + str(MFC_air) + " " + str(HairL) + "\r")
tnsy.write("MFC " + str(active_valve) + " " + str(MFC_n2) + " " + str(Hn2L) + "\r")
tnsy.write("MFC " + str(low_valve) + " " + str(MFC_air) + " " + str(LairR) + "\r")
tnsy.write("MFC " + str(low_valve) + " " + str(MFC_n2) + " " + str(Ln2R) + "\r")
tnsy.write("vialOn " + str(active_valve) + " " + str(activevial) + "\r")
tnsy.write("vialOn " + str(low_valve) + " " + str(lowvial) + "\r")
if group_name == 'non-spatial':
if non_spatial_condition == 'odor_concentration':
if active_valve == 1:
tnsy.write("MFC " + str(active_valve) + " " + str(MFC_air) + " " + str(HairR) + "\r")
tnsy.write("MFC " + str(active_valve) + " " + str(MFC_n2) + " " + str(Hn2R) + "\r")
tnsy.write("MFC " + str(low_valve) + " " + str(MFC_air) + " " + str(HairL) + "\r")
tnsy.write("MFC " + str(low_valve) + " " + str(MFC_n2) + " " + str(Hn2L) + "\r")
if active_valve == 2:
tnsy.write("MFC " + str(active_valve) + " " + str(MFC_air) + " " + str(LairL) + "\r")
tnsy.write("MFC " + str(active_valve) + " " + str(MFC_n2) + " " + str(Ln2L) + "\r")
tnsy.write("MFC " + str(low_valve) + " " + str(MFC_air) + " " + str(LairR) + "\r")
tnsy.write("MFC " + str(low_valve) + " " + str(MFC_n2) + " " + str(Ln2R) + "\r")
tnsy.write("vialOn " + str(active_valve) + " " + str(odor_vial) + "\r")
tnsy.write("vialOn " + str(low_valve) + " " + str(odor_vial) + "\r")
if non_spatial_condition == 'odor_identity':
tnsy.write("MFC " + str(active_valve) + " " + str(MFC_air) + " " + str(HairR) + "\r")
tnsy.write("MFC " + str(active_valve) + " " + str(MFC_n2) + " " + str(Hn2R) + "\r")
tnsy.write("MFC " + str(low_valve) + " " + str(MFC_air) + " " + str(HairL) + "\r")
tnsy.write("MFC " + str(low_valve) + " " + str(MFC_n2) + " " + str(Hn2L) + "\r")
if active_valve == 1:
tnsy.write("vialOn " + str(active_valve) + " " + str(odor_vial) + "\r")
tnsy.write("vialOn " + str(low_valve) + " " + str(odor_vial) + "\r")
if active_valve == 2:
tnsy.write("vialOn " + str(active_valve) + " " + str(odor_vial2) + "\r")
tnsy.write("vialOn " + str(low_valve) + " " + str(odor_vial2) + "\r")
iti_timeout_start = math.floor(timer()) #start vial timer
prep_odor = False #odor has been decided
#Trial Initiation
if (math.floor(timer()) >= math.floor(iti_timeout_start + iti_delay)): #vial mixing timer
if msg == 3:
tstart = timer() - session_start; #timestamp trial start (in ms)
tnsy.write("valve " + str(low_valve) + " 1 on\r") #turn on FVs
tnsy.write("valve " + str(active_valve) + " 1 on\r")
state = 2 #update trial variables
print (("Trial " + str(trial_num) + " Activated: " + nameoftrialtype)) #report trial start
# [State 2] TRIAL DECISION
if state == 2:
#Frame Count of Section Occupancy
if (section_occupancy == last_occupancy):
if (section_occupancy < 2):
counter = counter + 1
else: counter = 0; last_occupancy = section_occupancy
else: counter = 0; last_occupancy = section_occupancy
#Decision Status
if (counter == count_requirement):
if (section_occupancy == correctindex):
response = 1; answer = "Correct"
elif (section_occupancy == incorrectindex):
response = 0; answer = "Incorrect"
print("Response registered: " + answer) #report response
tnsy.write("valve " + str(active_valve) + " 1 off\r") #turn off final valves
# tnsy.write("valve " + str(low_valve) + " 1 off\r")
state = 3; counter = 0; #update trial statistics
# [State 3] REWARD DELIVERY
if state == 3:
#Correct Responses
if response == 1:
if msg == correctpoke:
if active_valve == 1: #Increment Active Statistics
if concentration_setting == 0:
total0 = total0 + 1; total0R = total0R + 1; correct0 = correct0 + 1; correct0R = correct0R + 1
if concentration_setting == 1:
total1 = total1 + 1; total1R = total1R + 1; correct1 = correct1 + 1; correct1R = correct1R + 1
if concentration_setting == 2:
total2 = total2 + 1; total2R = total2R + 1; correct2 = correct2 + 1; correct2R = correct2R + 1
if concentration_setting == 3:
total3 = total3 + 1; total3R = total3R + 1; correct3 = correct3 + 1; correct3R = correct3R + 1
#90-10 R correct-------------
if concentration_setting == 4:
total4 = total4 + 1; total4R = total4R + 1; correct4 = correct4 + 1; correct4R = correct4R + 1
if concentration_setting == 5:
total4 = total4 + 1; total4R = total4R + 1; correct4 = correct4 + 1; correct4R = correct4R + 1
#----------------------------
if active_valve == 2:
if concentration_setting == 0:
total0 = total0 + 1; total0L = total0L + 1; correct0 = correct0 + 1; correct0L = correct0L + 1
if concentration_setting == 1:
total1 = total1 + 1; total1L = total1L + 1; correct1 = correct1 + 1; correct1L = correct1L + 1
if concentration_setting == 2:
total2 = total2 + 1; total2L = total2L + 1; correct2 = correct2 + 1; correct2L = correct2L + 1
if concentration_setting == 3:
total3 = total3 + 1; total3L = total3L + 1; correct3 = correct3 + 1; correct3L = correct3L + 1
#90-10 L correct--------------------------
if concentration_setting == 4:
total4 = total4 + 1; total4L = total4L + 1; correct4 = correct4 + 1; correct4L = correct4L + 1
if concentration_setting == 5:
total4 = total4 + 1; total4L = total4L + 1; correct4 = correct4 + 1; correct4L = correct4L + 1
#-------------------------------
fmos_serial.deliver_reward(msg) #deliver reward
print("Reward Delivered.") #report reward delivery
tend = timer() - session_start #timestamp trial end & record trial summary info
fmos_datamgt.write_trialsummary(trialsummary_file,trial_num,concentration_setting, active_valve,response,tstart,tend)
state = 1; prep_odor = True; iti_delay = iti_correct;trial_num = trial_num + 1; #update trial variables
#Incorrect Responses
else:
if msg > 0:
if active_valve == 1: #Increment Active Statistics
if concentration_setting == 0:
total0 = total0 + 1; total0R = total0R + 1;
if concentration_setting == 1:
total1 = total1 + 1; total1R = total1R + 1;
if concentration_setting == 2:
total2 = total2 + 1; total2R = total2R + 1;
if concentration_setting == 3:
total3 = total3 + 1; total3R = total3R + 1;
#90-10 R wrong-----------------
if concentration_setting == 4:
total4 = total4 + 1; total4R = total4R + 1;
if concentration_setting == 5:
total4 = total4 + 1; total4R = total4R + 1;
#----------------------------
if active_valve == 2:
if concentration_setting == 0:
total0 = total0 + 1; total0L = total0L + 1;
if concentration_setting == 1:
total1 = total1 + 1; total1L = total1L + 1;
if concentration_setting == 2:
total2 = total2 + 1; total2L = total2L + 1;
if concentration_setting == 3:
total3 = total3 + 1; total3L = total3L + 1;
#90-10 L wrong-------------
if concentration_setting == 4:
total4 = total4 + 1; total4L = total4L + 1;
if concentration_setting == 5:
total4 = total4 + 1; total4L = total4L + 1;
#---------------------------
print("No Reward Delivered.") #report no reward
tend = timer() - session_start #timestamp trial end & record trial summary info
fmos_datamgt.write_trialsummary(trialsummary_file,trial_num,concentration_setting,active_valve,response,tstart,tend)
state = 1; prep_odor = True; trial_num = trial_num + 1; #update trial variables
if concentration_setting == 3:
iti_delay = iti_correct
else: iti_delay = iti_incorrect
# [SHUT DOWN] #
tnsy.write("vialOff " + str(right_valve) + " " + str(lowvial) + "\r")
tnsy.write("vialOff " + str(left_valve) + " " + str(lowvial) + "\r")
notepad = str(input("Please record notes here. Be precise and thorough. Write inside quotation marks with no space at the end.")) + '\n'
#Close All Data Files
ch0_handle.close();ch1_handle.close();
#ch2_handle.close();ch3_handle.close();
nx_handle.close();ny_handle.close();hx_handle.close();hy_handle.close();cx_handle.close();cy_handle.close(); ts_handle.close()
print ("Session Ended.") #report end of session
print ("Data Collection Ended") #report end of data collection
##EXIT PROGRAM
fmos_serial.close_all_valves(); cv2.destroyAllWindows(); ard.close(); tnsy.close()
fraction_correct = "T: "+str(correct0)+"/"+str(total0)+". "+str(correct4)+"/"+str(total4)+". "+str(correct1)+"/"+str(total1)+". "+str(correct2)+"/"+str(total2)+". "+str(correct3)+"/"+str(total3)+"." #session stats
fraction_left = "L: "+str(correct0L)+"/"+str(total0L)+". "+str(correct4L)+"/"+str(total4L)+". "+str(correct1L)+"/"+str(total1L)+". "+str(correct2L)+"/"+str(total2L)+". "+str(correct3L)+"/"+str(total3L)+"."
fraction_right = "R: "+str(correct0R)+"/"+str(total0R)+". "+str(correct4R)+"/"+str(total4R)+". "+str(correct1R)+"/"+str(total1R)+". "+str(correct2R)+"/"+str(total2R)+". "+str(correct3R)+"/"+str(total3R)+"."
if group_name == 'abs-conc' or group_name == 'non-spatial':
if group_name == 'abs-conc':
print (' xxxx 80-20(1%) 80-20(0.1%) CONTROL')
elif group_name == 'non-spatial':
print (' xxxx M.S. Octanol CONTROL')
else: print (' 100-0 90-10 80-20 60-40 CONTROL')
print (fraction_correct)
print (fraction_left)
print (fraction_right)
#Write Video Locator & Timestamp
fmos_datamgt.write_vidlocator(video_file,localtime)
performance_report = fraction_correct + '\n' + fraction_left + '\n' + fraction_right
fmos_datamgt.record_notes(notes_file,session_num,localtime,notepad, performance_report)
|
mainwindow.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder, the Scientific Python Development Environment
=====================================================
Developped and maintained by the Spyder Project
Contributors
Copyright © Spyder Project Contributors
Licensed under the terms of the MIT License
(see spyder/__init__.py for details)
"""
# =============================================================================
# Stdlib imports
# =============================================================================
from __future__ import print_function
import atexit
import errno
import gc
import os
import os.path as osp
import re
import shutil
import signal
import socket
import subprocess
import sys
import threading
import traceback
import importlib
#==============================================================================
# Keeping a reference to the original sys.exit before patching it
#==============================================================================
ORIGINAL_SYS_EXIT = sys.exit
#==============================================================================
# Check requirements
#==============================================================================
from spyder import requirements
requirements.check_path()
requirements.check_qt()
requirements.check_spyder_kernels()
#==============================================================================
# Windows only: support for hiding console window when started with python.exe
#==============================================================================
set_attached_console_visible = None
is_attached_console_visible = None
set_windows_appusermodelid = None
if os.name == 'nt':
from spyder.utils.windows import (set_attached_console_visible,
is_attached_console_visible,
set_windows_appusermodelid)
#==============================================================================
# Workaround: importing rope.base.project here, otherwise this module can't
# be imported if Spyder was executed from another folder than spyder
#==============================================================================
try:
import rope.base.project # analysis:ignore
except ImportError:
pass
#==============================================================================
# Qt imports
#==============================================================================
from qtpy import API, PYQT5
from qtpy.compat import from_qvariant
from qtpy.QtCore import (QByteArray, QCoreApplication, QPoint, QSize, Qt,
QThread, QTimer, QUrl, Signal, Slot)
from qtpy.QtGui import QColor, QDesktopServices, QIcon, QKeySequence, QPixmap
from qtpy.QtWidgets import (QAction, QApplication, QDockWidget, QMainWindow,
QMenu, QMessageBox, QShortcut, QSplashScreen,
QStyleFactory)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
from qtpy import QtSvg # analysis:ignore
# Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720
from qtpy import QtWebEngineWidgets # analysis:ignore
# To catch font errors in QtAwesome
from qtawesome.iconic_font import FontError
#==============================================================================
# Proper high DPI scaling is available in Qt >= 5.6.0. This attibute must
# be set before creating the application.
#==============================================================================
from spyder.config.main import CONF
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling,
CONF.get('main', 'high_dpi_scaling'))
#==============================================================================
# Create our QApplication instance here because it's needed to render the
# splash screen created below
#==============================================================================
from spyder.utils.qthelpers import qapplication, MENU_SEPARATOR
from spyder.config.base import get_image_path
MAIN_APP = qapplication()
if PYQT5:
APP_ICON = QIcon(get_image_path("spyder.svg"))
else:
APP_ICON = QIcon(get_image_path("spyder.png"))
MAIN_APP.setWindowIcon(APP_ICON)
#==============================================================================
# Create splash screen out of MainWindow to reduce perceived startup time.
#==============================================================================
from spyder.config.base import _, get_image_path, DEV, running_under_pytest
if not running_under_pytest():
SPLASH = QSplashScreen(QPixmap(get_image_path('splash.svg')))
SPLASH_FONT = SPLASH.font()
SPLASH_FONT.setPixelSize(10)
SPLASH.setFont(SPLASH_FONT)
SPLASH.show()
SPLASH.showMessage(_("Initializing..."), Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute, QColor(Qt.white))
QApplication.processEvents()
else:
SPLASH = None
#==============================================================================
# Local utility imports
#==============================================================================
from spyder import (__version__, __project_url__, __forum_url__,
__trouble_url__, __trouble_url_short__, get_versions)
from spyder.config.base import (get_conf_path, get_module_source_path, STDERR,
DEBUG, debug_print, MAC_APP_NAME, get_home_dir,
running_in_mac_app, get_module_path,
reset_config_files)
from spyder.config.main import OPEN_FILES_PORT
from spyder.config.utils import IMPORT_EXT, is_gtk_desktop
from spyder.app.cli_options import get_options
from spyder import dependencies
from spyder.py3compat import (is_text_string, to_text_string,
PY3, qbytearray_to_str, configparser as cp)
from spyder.utils import encoding, programs
from spyder.utils import icon_manager as ima
from spyder.utils.introspection import module_completion
from spyder.utils.programs import is_module_installed
from spyder.utils.misc import select_port, getcwd_or_home, get_python_executable
from spyder.widgets.fileswitcher import FileSwitcher
from spyder.plugins.lspmanager import LSPManager
#==============================================================================
# Local gui imports
#==============================================================================
# NOTE: Move (if possible) import's of widgets and plugins exactly where they
# are needed in MainWindow to speed up perceived startup time (i.e. the time
# from clicking the Spyder icon to showing the splash screen).
try:
from spyder.utils.environ import WinUserEnvDialog
except ImportError:
WinUserEnvDialog = None # analysis:ignore
from spyder.utils.qthelpers import (create_action, add_actions, get_icon,
add_shortcut_to_tooltip,
create_module_bookmark_actions,
create_program_action, DialogManager,
create_python_script_action, file_uri)
from spyder.config.gui import get_shortcut
from spyder.otherplugins import get_spyderplugins_mods
from spyder.app import tour
#==============================================================================
# Get the cwd before initializing WorkingDirectory, which sets it to the one
# used in the last session
#==============================================================================
CWD = getcwd_or_home()
#==============================================================================
# Spyder's main window widgets utilities
#==============================================================================
def get_python_doc_path():
"""
Return Python documentation path
(Windows: return the PythonXX.chm path if available)
"""
if os.name == 'nt':
doc_path = osp.join(sys.prefix, "Doc")
if not osp.isdir(doc_path):
return
python_chm = [path for path in os.listdir(doc_path)
if re.match(r"(?i)Python[0-9]{3,6}.chm", path)]
if python_chm:
return file_uri(osp.join(doc_path, python_chm[0]))
else:
vinf = sys.version_info
doc_path = '/usr/share/doc/python%d.%d/html' % (vinf[0], vinf[1])
python_doc = osp.join(doc_path, "index.html")
if osp.isfile(python_doc):
return file_uri(python_doc)
#==============================================================================
# Main Window
#==============================================================================
class MainWindow(QMainWindow):
"""Spyder main window"""
DOCKOPTIONS = QMainWindow.AllowTabbedDocks|QMainWindow.AllowNestedDocks
CURSORBLINK_OSDEFAULT = QApplication.cursorFlashTime()
SPYDER_PATH = get_conf_path('path')
SPYDER_NOT_ACTIVE_PATH = get_conf_path('not_active_path')
BOOKMARKS = (
('Python2', "https://docs.python.org/2/index.html",
_("Python2 documentation")),
('Python3', "https://docs.python.org/3/index.html",
_("Python3 documentation")),
('numpy', "https://docs.scipy.org/doc/",
_("Numpy and Scipy documentation")),
('matplotlib', "https://matplotlib.org/contents.html",
_("Matplotlib documentation")),
('PyQt5',
"http://pyqt.sourceforge.net/Docs/PyQt5/",
_("PyQt5 Reference Guide")),
('PyQt5',
"http://pyqt.sourceforge.net/Docs/PyQt5/class_reference.html",
_("PyQt5 API Reference")),
('winpython', "https://winpython.github.io/",
_("WinPython"))
)
DEFAULT_LAYOUTS = 4
# Signals
restore_scrollbar_position = Signal()
all_actions_defined = Signal()
sig_pythonpath_changed = Signal()
sig_open_external_file = Signal(str)
sig_resized = Signal("QResizeEvent") # related to interactive tour
sig_moved = Signal("QMoveEvent") # related to interactive tour
def __init__(self, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
if PYQT5:
# Enabling scaling for high dpi
qapp.setAttribute(Qt.AA_UseHighDpiPixmaps)
self.default_style = str(qapp.style().objectName())
self.dialog_manager = DialogManager()
self.init_workdir = options.working_directory
self.profile = options.profile
self.multithreaded = options.multithreaded
self.new_instance = options.new_instance
self.open_project = options.project
self.window_title = options.window_title
self.debug_print("Start of MainWindow constructor")
def signal_handler(signum, frame=None):
"""Handler for signals."""
sys.stdout.write('Handling signal: %s\n' % signum)
sys.stdout.flush()
QApplication.quit()
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(signal_handler, True)
except ImportError:
pass
else:
signal.signal(signal.SIGTERM, signal_handler)
if not DEV:
# Make spyder quit when presing ctrl+C in the console
# In DEV Ctrl+C doesn't quit, because it helps to
# capture the traceback when spyder freezes
signal.signal(signal.SIGINT, signal_handler)
# Use a custom Qt stylesheet
if sys.platform == 'darwin':
spy_path = get_module_source_path('spyder')
img_path = osp.join(spy_path, 'images')
mac_style = open(osp.join(spy_path, 'app', 'mac_stylesheet.qss')).read()
mac_style = mac_style.replace('$IMAGE_PATH', img_path)
self.setStyleSheet(mac_style)
# Create our TEMPDIR
if not osp.isdir(programs.TEMPDIR):
os.mkdir(programs.TEMPDIR)
# Shortcut management data
self.shortcut_data = []
# Loading Spyder path
self.path = []
self.not_active_path = []
self.project_path = []
if osp.isfile(self.SPYDER_PATH):
self.path, _x = encoding.readlines(self.SPYDER_PATH)
self.path = [name for name in self.path if osp.isdir(name)]
if osp.isfile(self.SPYDER_NOT_ACTIVE_PATH):
self.not_active_path, _x = \
encoding.readlines(self.SPYDER_NOT_ACTIVE_PATH)
self.not_active_path = \
[name for name in self.not_active_path if osp.isdir(name)]
self.remove_path_from_sys_path()
self.add_path_to_sys_path()
# Plugins
self.console = None
self.workingdirectory = None
self.editor = None
self.explorer = None
self.help = None
self.onlinehelp = None
self.projects = None
self.outlineexplorer = None
self.historylog = None
self.ipyconsole = None
self.variableexplorer = None
self.plots = None
self.findinfiles = None
self.thirdparty_plugins = []
# Tour # TODO: Should I consider it a plugin?? or?
self.tour = None
self.tours_available = None
# File switcher
self.fileswitcher = None
# Check for updates Thread and Worker, refereces needed to prevent
# segfaulting
self.check_updates_action = None
self.thread_updates = None
self.worker_updates = None
self.give_updates_feedback = True
# Preferences
from spyder.preferences.configdialog import (MainConfigPage,
ColorSchemeConfigPage)
from spyder.preferences.shortcuts import ShortcutsConfigPage
from spyder.preferences.runconfig import RunConfigPage
from spyder.preferences.maininterpreter import MainInterpreterConfigPage
self.general_prefs = [MainConfigPage, ShortcutsConfigPage,
ColorSchemeConfigPage, MainInterpreterConfigPage,
RunConfigPage]
self.prefs_index = None
self.prefs_dialog_size = None
# Quick Layouts and Dialogs
from spyder.preferences.layoutdialog import (LayoutSaveDialog,
LayoutSettingsDialog)
self.dialog_layout_save = LayoutSaveDialog
self.dialog_layout_settings = LayoutSettingsDialog
# Actions
self.lock_dockwidgets_action = None
self.show_toolbars_action = None
self.close_dockwidget_action = None
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.selectall_action = None
self.maximize_action = None
self.fullscreen_action = None
# Menu bars
self.file_menu = None
self.file_menu_actions = []
self.edit_menu = None
self.edit_menu_actions = []
self.search_menu = None
self.search_menu_actions = []
self.source_menu = None
self.source_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.debug_menu = None
self.debug_menu_actions = []
self.consoles_menu = None
self.consoles_menu_actions = []
self.projects_menu = None
self.projects_menu_actions = []
self.tools_menu = None
self.tools_menu_actions = []
self.external_tools_menu = None # We must keep a reference to this,
# otherwise the external tools menu is lost after leaving setup method
self.external_tools_menu_actions = []
self.view_menu = None
self.plugins_menu = None
self.plugins_menu_actions = []
self.toolbars_menu = None
self.help_menu = None
self.help_menu_actions = []
# Status bar widgets
self.mem_status = None
self.cpu_status = None
# Toolbars
self.visible_toolbars = []
self.toolbarslist = []
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.edit_toolbar = None
self.edit_toolbar_actions = []
self.search_toolbar = None
self.search_toolbar_actions = []
self.source_toolbar = None
self.source_toolbar_actions = []
self.run_toolbar = None
self.run_toolbar_actions = []
self.debug_toolbar = None
self.debug_toolbar_actions = []
self.layout_toolbar = None
self.layout_toolbar_actions = []
if running_under_pytest():
# Show errors in internal console when testing.
CONF.set('main', 'show_internal_errors', False)
# Set window title
self.set_window_title()
if set_windows_appusermodelid != None:
res = set_windows_appusermodelid()
debug_print("appusermodelid: " + str(res))
# Setting QTimer if running in travis
test_travis = os.environ.get('TEST_CI_APP', None)
if test_travis is not None:
global MAIN_APP
timer_shutdown_time = 30000
self.timer_shutdown = QTimer(self)
self.timer_shutdown.timeout.connect(MAIN_APP.quit)
self.timer_shutdown.start(timer_shutdown_time)
# Showing splash screen
self.splash = SPLASH
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.is_setting_up = True
self.dockwidgets_locked = CONF.get('main', 'panes_locked')
self.floating_dockwidgets = []
self.window_size = None
self.window_position = None
self.state_before_maximizing = None
self.current_quick_layout = None
self.previous_layout_settings = None # TODO: related to quick layouts
self.last_plugin = None
self.fullscreen_flag = None # isFullscreen does not work as expected
# The following flag remember the maximized state even when
# the window is in fullscreen mode:
self.maximized_flag = None
# The following flag is used to restore window's geometry when
# toggling out of fullscreen mode in Windows.
self.saved_normal_geometry = None
# To keep track of the last focused widget
self.last_focused_widget = None
self.previous_focused_widget = None
# Server to open external files on a single instance
# This is needed in order to handle socket creation problems.
# See issue 4132
if os.name == 'nt':
try:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
except OSError as e:
self.open_files_server = None
QMessageBox.warning(None, "Spyder",
_("An error occurred while creating a socket needed "
"by Spyder. Please, try to run as an Administrator "
"from cmd.exe the following command and then "
"restart your computer: <br><br><span "
"style=\'color: #555555\'><b>netsh winsock reset"
"</b></span><br>"))
else:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
self.apply_settings()
self.debug_print("End of MainWindow constructor")
def debug_print(self, message):
"""Debug prints"""
debug_print(message)
#---- Window setup
def create_toolbar(self, title, object_name, iconsize=24):
"""Create and return toolbar with *title* and *object_name*"""
toolbar = self.addToolBar(title)
toolbar.setObjectName(object_name)
toolbar.setIconSize(QSize(iconsize, iconsize))
self.toolbarslist.append(toolbar)
return toolbar
def setup(self):
"""Setup main window"""
self.debug_print("*** Start of MainWindow setup ***")
self.debug_print(" ..core actions")
self.close_dockwidget_action = create_action(self,
icon=ima.icon('DialogCloseButton'),
text=_("Close current pane"),
triggered=self.close_current_dockwidget,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.close_dockwidget_action, "_",
"Close pane")
self.lock_dockwidgets_action = create_action(self, _("Lock panes"),
toggled=self.toggle_lock_dockwidgets,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.lock_dockwidgets_action, "_",
"Lock unlock panes")
# custom layouts shortcuts
self.toggle_next_layout_action = create_action(self,
_("Use next layout"),
triggered=self.toggle_next_layout,
context=Qt.ApplicationShortcut)
self.toggle_previous_layout_action = create_action(self,
_("Use previous layout"),
triggered=self.toggle_previous_layout,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.toggle_next_layout_action, "_",
"Use next layout")
self.register_shortcut(self.toggle_previous_layout_action, "_",
"Use previous layout")
# File switcher shortcuts
self.file_switcher_action = create_action(
self,
_('File switcher...'),
icon=ima.icon('filelist'),
tip=_('Fast switch between files'),
triggered=self.open_fileswitcher,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.file_switcher_action, context="_",
name="File switcher")
self.symbol_finder_action = create_action(
self, _('Symbol finder...'),
icon=ima.icon('symbol_find'),
tip=_('Fast symbol search in file'),
triggered=self.open_symbolfinder,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.symbol_finder_action, context="_",
name="symbol finder", add_sc_to_tip=True)
self.file_toolbar_actions = [self.file_switcher_action,
self.symbol_finder_action]
def create_edit_action(text, tr_text, icon):
textseq = text.split(' ')
method_name = textseq[0].lower()+"".join(textseq[1:])
action = create_action(self, tr_text,
icon=icon,
triggered=self.global_callback,
data=method_name,
context=Qt.WidgetShortcut)
self.register_shortcut(action, "Editor", text)
return action
self.undo_action = create_edit_action('Undo', _('Undo'),
ima.icon('undo'))
self.redo_action = create_edit_action('Redo', _('Redo'),
ima.icon('redo'))
self.copy_action = create_edit_action('Copy', _('Copy'),
ima.icon('editcopy'))
self.cut_action = create_edit_action('Cut', _('Cut'),
ima.icon('editcut'))
self.paste_action = create_edit_action('Paste', _('Paste'),
ima.icon('editpaste'))
self.selectall_action = create_edit_action("Select All",
_("Select All"),
ima.icon('selectall'))
self.edit_menu_actions = [self.undo_action, self.redo_action,
None, self.cut_action, self.copy_action,
self.paste_action, self.selectall_action]
namespace = None
self.debug_print(" ..toolbars")
# File menu/toolbar
self.file_menu = self.menuBar().addMenu(_("&File"))
self.file_toolbar = self.create_toolbar(_("File toolbar"),
"file_toolbar")
# Edit menu/toolbar
self.edit_menu = self.menuBar().addMenu(_("&Edit"))
self.edit_toolbar = self.create_toolbar(_("Edit toolbar"),
"edit_toolbar")
# Search menu/toolbar
self.search_menu = self.menuBar().addMenu(_("&Search"))
self.search_toolbar = self.create_toolbar(_("Search toolbar"),
"search_toolbar")
# Source menu/toolbar
self.source_menu = self.menuBar().addMenu(_("Sour&ce"))
self.source_toolbar = self.create_toolbar(_("Source toolbar"),
"source_toolbar")
# Run menu/toolbar
self.run_menu = self.menuBar().addMenu(_("&Run"))
self.run_toolbar = self.create_toolbar(_("Run toolbar"),
"run_toolbar")
# Debug menu/toolbar
self.debug_menu = self.menuBar().addMenu(_("&Debug"))
self.debug_toolbar = self.create_toolbar(_("Debug toolbar"),
"debug_toolbar")
# Consoles menu/toolbar
self.consoles_menu = self.menuBar().addMenu(_("C&onsoles"))
# Projects menu
self.projects_menu = self.menuBar().addMenu(_("&Projects"))
self.projects_menu.aboutToShow.connect(self.valid_project)
# Tools menu
self.tools_menu = self.menuBar().addMenu(_("&Tools"))
# View menu
self.view_menu = self.menuBar().addMenu(_("&View"))
# Help menu
self.help_menu = self.menuBar().addMenu(_("&Help"))
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Spyder!"), 5000)
self.debug_print(" ..tools")
# Tools + External Tools
prefs_action = create_action(self, _("Pre&ferences"),
icon=ima.icon('configure'),
triggered=self.edit_preferences,
context=Qt.ApplicationShortcut)
self.register_shortcut(prefs_action, "_", "Preferences",
add_sc_to_tip=True)
spyder_path_action = create_action(self,
_("PYTHONPATH manager"),
None, icon=ima.icon('pythonpath'),
triggered=self.path_manager_callback,
tip=_("Python Path Manager"),
menurole=QAction.ApplicationSpecificRole)
update_modules_action = create_action(self,
_("Update module names list"),
triggered=lambda:
module_completion.reset(),
tip=_("Refresh list of module names "
"available in PYTHONPATH"))
reset_spyder_action = create_action(
self, _("Reset Spyder to factory defaults"),
triggered=self.reset_spyder)
self.tools_menu_actions = [prefs_action, spyder_path_action]
if WinUserEnvDialog is not None:
winenv_action = create_action(self,
_("Current user environment variables..."),
icon='win_env.png',
tip=_("Show and edit current user environment "
"variables in Windows registry "
"(i.e. for all sessions)"),
triggered=self.win_env)
self.tools_menu_actions.append(winenv_action)
self.tools_menu_actions += [reset_spyder_action, MENU_SEPARATOR,
update_modules_action]
# External Tools submenu
self.external_tools_menu = QMenu(_("External Tools"))
self.external_tools_menu_actions = []
# WinPython control panel
self.wp_action = create_action(self, _("WinPython control panel"),
icon=get_icon('winpython.svg'),
triggered=lambda:
programs.run_python_script('winpython', 'controlpanel'))
if os.name == 'nt' and is_module_installed('winpython'):
self.external_tools_menu_actions.append(self.wp_action)
# Qt-related tools
additact = []
for name in ("designer-qt4", "designer"):
qtdact = create_program_action(self, _("Qt Designer"),
name, 'qtdesigner.png')
if qtdact:
break
for name in ("linguist-qt4", "linguist"):
qtlact = create_program_action(self, _("Qt Linguist"),
"linguist", 'qtlinguist.png')
if qtlact:
break
args = ['-no-opengl'] if os.name == 'nt' else []
for act in (qtdact, qtlact):
if act:
additact.append(act)
if additact and is_module_installed('winpython'):
self.external_tools_menu_actions += [None] + additact
# Guidata and Sift
self.debug_print(" ..sift?")
gdgq_act = []
# Guidata and Guiqwt don't support PyQt5 yet and they fail
# with an AssertionError when imported using those bindings
# (see issue 2274)
try:
from guidata import configtools
from guidata import config # analysis:ignore
guidata_icon = configtools.get_icon('guidata.svg')
guidata_act = create_python_script_action(self,
_("guidata examples"), guidata_icon,
"guidata",
osp.join("tests", "__init__"))
gdgq_act += [guidata_act]
except:
pass
try:
from guidata import configtools
from guiqwt import config # analysis:ignore
guiqwt_icon = configtools.get_icon('guiqwt.svg')
guiqwt_act = create_python_script_action(self,
_("guiqwt examples"), guiqwt_icon, "guiqwt",
osp.join("tests", "__init__"))
if guiqwt_act:
gdgq_act += [guiqwt_act]
sift_icon = configtools.get_icon('sift.svg')
sift_act = create_python_script_action(self, _("Sift"),
sift_icon, "guiqwt", osp.join("tests", "sift"))
if sift_act:
gdgq_act += [sift_act]
except:
pass
if gdgq_act:
self.external_tools_menu_actions += [None] + gdgq_act
# ViTables
vitables_act = create_program_action(self, _("ViTables"),
"vitables", 'vitables.png')
if vitables_act:
self.external_tools_menu_actions += [None, vitables_act]
# Maximize current plugin
self.maximize_action = create_action(self, '',
triggered=self.maximize_dockwidget,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.maximize_action, "_", "Maximize pane")
self.__update_maximize_action()
# Fullscreen mode
self.fullscreen_action = create_action(self,
_("Fullscreen mode"),
triggered=self.toggle_fullscreen,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.fullscreen_action, "_",
"Fullscreen mode", add_sc_to_tip=True)
# Main toolbar
self.main_toolbar_actions = [self.maximize_action,
self.fullscreen_action,
None,
prefs_action, spyder_path_action]
self.main_toolbar = self.create_toolbar(_("Main toolbar"),
"main_toolbar")
# Internal console plugin
self.debug_print(" ..plugin: internal console")
from spyder.plugins.console.plugin import Console
self.console = Console(self, namespace, exitfunc=self.closing,
profile=self.profile,
multithreaded=self.multithreaded,
message=_("Spyder Internal Console\n\n"
"This console is used to report application\n"
"internal errors and to inspect Spyder\n"
"internals with the following commands:\n"
" spy.app, spy.window, dir(spy)\n\n"
"Please don't use it to run your code\n\n"))
self.console.register_plugin()
# Language Server Protocol Client initialization
self.set_splash(_("Creating LSP Manager..."))
self.lspmanager = LSPManager(self)
# Working directory plugin
self.debug_print(" ..plugin: working directory")
from spyder.plugins.workingdirectory.plugin import WorkingDirectory
self.workingdirectory = WorkingDirectory(self, self.init_workdir, main=self)
self.workingdirectory.register_plugin()
self.toolbarslist.append(self.workingdirectory.toolbar)
# Help plugin
if CONF.get('help', 'enable'):
self.set_splash(_("Loading help..."))
from spyder.plugins.help.plugin import Help
self.help = Help(self)
self.help.register_plugin()
# Outline explorer widget
if CONF.get('outline_explorer', 'enable'):
self.set_splash(_("Loading outline explorer..."))
from spyder.plugins.outlineexplorer.plugin import OutlineExplorer
self.outlineexplorer = OutlineExplorer(self)
self.outlineexplorer.register_plugin()
# Editor plugin
self.set_splash(_("Loading editor..."))
from spyder.plugins.editor.plugin import Editor
self.editor = Editor(self)
self.editor.register_plugin()
# Start LSP client
self.set_splash(_("Launching LSP Client..."))
self.lspmanager.start_lsp_client('python')
# Populating file menu entries
quit_action = create_action(self, _("&Quit"),
icon=ima.icon('exit'),
tip=_("Quit"),
triggered=self.console.quit,
context=Qt.ApplicationShortcut)
self.register_shortcut(quit_action, "_", "Quit")
restart_action = create_action(self, _("&Restart"),
icon=ima.icon('restart'),
tip=_("Restart"),
triggered=self.restart,
context=Qt.ApplicationShortcut)
self.register_shortcut(restart_action, "_", "Restart")
self.file_menu_actions += [self.file_switcher_action,
self.symbol_finder_action, None,
restart_action, quit_action]
self.set_splash("")
self.debug_print(" ..widgets")
# Namespace browser
self.set_splash(_("Loading namespace browser..."))
from spyder.plugins.variableexplorer.plugin import VariableExplorer
self.variableexplorer = VariableExplorer(self)
self.variableexplorer.register_plugin()
# Figure browser
self.set_splash(_("Loading figure browser..."))
from spyder.plugins.plots.plugin import Plots
self.plots = Plots(self)
self.plots.register_plugin()
# History log widget
if CONF.get('historylog', 'enable'):
self.set_splash(_("Loading history plugin..."))
from spyder.plugins.history.plugin import HistoryLog
self.historylog = HistoryLog(self)
self.historylog.register_plugin()
# IPython console
self.set_splash(_("Loading IPython console..."))
from spyder.plugins.ipythonconsole.plugin import IPythonConsole
self.ipyconsole = IPythonConsole(self)
self.ipyconsole.register_plugin()
# Explorer
if CONF.get('explorer', 'enable'):
self.set_splash(_("Loading file explorer..."))
from spyder.plugins.explorer.plugin import Explorer
self.explorer = Explorer(self)
self.explorer.register_plugin()
# Online help widget
try: # Qt >= v4.4
from spyder.plugins.onlinehelp.plugin import OnlineHelp
except ImportError: # Qt < v4.4
OnlineHelp = None # analysis:ignore
if CONF.get('onlinehelp', 'enable') and OnlineHelp is not None:
self.set_splash(_("Loading online help..."))
self.onlinehelp = OnlineHelp(self)
self.onlinehelp.register_plugin()
# Project explorer widget
self.set_splash(_("Loading project explorer..."))
from spyder.plugins.projects.plugin import Projects
self.projects = Projects(self)
self.projects.register_plugin()
self.project_path = self.projects.get_pythonpath(at_start=True)
# Find in files
if CONF.get('find_in_files', 'enable'):
from spyder.plugins.findinfiles.plugin import FindInFiles
self.findinfiles = FindInFiles(self)
self.findinfiles.register_plugin()
# Load other plugins (former external plugins)
# TODO: Use this bucle to load all internall plugins and remove
# duplicated code
other_plugins = ['breakpoints', 'profiler', 'pylint']
for plugin_name in other_plugins:
if CONF.get(plugin_name, 'enable'):
module = importlib.import_module(
'spyder.plugins.{}'.format(plugin_name))
plugin = module.PLUGIN_CLASS(self)
if plugin.check_compatibility()[0]:
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
# Third-party plugins
self.set_splash(_("Loading third-party plugins..."))
for mod in get_spyderplugins_mods():
try:
plugin = mod.PLUGIN_CLASS(self)
if plugin.check_compatibility()[0]:
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
except Exception as error:
print("%s: %s" % (mod, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
self.set_splash(_("Setting up main window..."))
# Help menu
trouble_action = create_action(self,
_("Troubleshooting..."),
triggered=self.trouble_guide)
dep_action = create_action(self, _("Dependencies..."),
triggered=self.show_dependencies,
icon=ima.icon('advanced'))
report_action = create_action(self,
_("Report issue..."),
icon=ima.icon('bug'),
triggered=self.report_issue)
support_action = create_action(self,
_("Spyder support..."),
triggered=self.google_group)
self.check_updates_action = create_action(self,
_("Check for updates..."),
triggered=self.check_updates)
# Spyder documentation
spyder_doc = 'https://docs.spyder-ide.org/'
doc_action = create_action(self, _("Spyder documentation"),
icon=ima.icon('DialogHelpButton'),
triggered=lambda:
programs.start_file(spyder_doc))
self.register_shortcut(doc_action, "_",
"spyder documentation")
if self.help is not None:
tut_action = create_action(self, _("Spyder tutorial"),
triggered=self.help.show_tutorial)
else:
tut_action = None
shortcuts_action = create_action(self, _("Shortcuts Summary"),
shortcut="Meta+F1",
triggered=self.show_shortcuts_dialog)
#----- Tours
self.tour = tour.AnimatedTour(self)
self.tours_menu = QMenu(_("Interactive tours"))
self.tour_menu_actions = []
# TODO: Only show intro tour for now. When we are close to finish
# 3.0, we will finish and show the other tour
self.tours_available = tour.get_tours(0)
for i, tour_available in enumerate(self.tours_available):
self.tours_available[i]['last'] = 0
tour_name = tour_available['name']
def trigger(i=i, self=self): # closure needed!
return lambda: self.show_tour(i)
temp_action = create_action(self, tour_name, tip="",
triggered=trigger())
self.tour_menu_actions += [temp_action]
self.tours_menu.addActions(self.tour_menu_actions)
self.help_menu_actions = [doc_action, tut_action, shortcuts_action,
self.tours_menu,
MENU_SEPARATOR, trouble_action,
report_action, dep_action,
self.check_updates_action, support_action,
MENU_SEPARATOR]
# Python documentation
if get_python_doc_path() is not None:
pydoc_act = create_action(self, _("Python documentation"),
triggered=lambda:
programs.start_file(get_python_doc_path()))
self.help_menu_actions.append(pydoc_act)
# IPython documentation
if self.help is not None:
ipython_menu = QMenu(_("IPython documentation"), self)
intro_action = create_action(self, _("Intro to IPython"),
triggered=self.ipyconsole.show_intro)
quickref_action = create_action(self, _("Quick reference"),
triggered=self.ipyconsole.show_quickref)
guiref_action = create_action(self, _("Console help"),
triggered=self.ipyconsole.show_guiref)
add_actions(ipython_menu, (intro_action, guiref_action,
quickref_action))
self.help_menu_actions.append(ipython_menu)
# Windows-only: documentation located in sys.prefix/Doc
ipm_actions = []
def add_ipm_action(text, path):
"""Add installed Python module doc action to help submenu"""
# QAction.triggered works differently for PySide and PyQt
path = file_uri(path)
if not API == 'pyside':
slot=lambda _checked, path=path: programs.start_file(path)
else:
slot=lambda path=path: programs.start_file(path)
action = create_action(self, text,
icon='%s.png' % osp.splitext(path)[1][1:],
triggered=slot)
ipm_actions.append(action)
sysdocpth = osp.join(sys.prefix, 'Doc')
if osp.isdir(sysdocpth): # exists on Windows, except frozen dist.
for docfn in os.listdir(sysdocpth):
pt = r'([a-zA-Z\_]*)(doc)?(-dev)?(-ref)?(-user)?.(chm|pdf)'
match = re.match(pt, docfn)
if match is not None:
pname = match.groups()[0]
if pname not in ('Python', ):
add_ipm_action(pname, osp.join(sysdocpth, docfn))
# Installed Python modules submenu (Windows only)
if ipm_actions:
pymods_menu = QMenu(_("Installed Python modules"), self)
add_actions(pymods_menu, ipm_actions)
self.help_menu_actions.append(pymods_menu)
# Online documentation
web_resources = QMenu(_("Online documentation"))
webres_actions = create_module_bookmark_actions(self,
self.BOOKMARKS)
webres_actions.insert(2, None)
webres_actions.insert(5, None)
webres_actions.insert(8, None)
add_actions(web_resources, webres_actions)
self.help_menu_actions.append(web_resources)
# Qt assistant link
if sys.platform.startswith('linux') and not PYQT5:
qta_exe = "assistant-qt4"
else:
qta_exe = "assistant"
qta_act = create_program_action(self, _("Qt documentation"),
qta_exe)
if qta_act:
self.help_menu_actions += [qta_act, None]
# About Spyder
about_action = create_action(self,
_("About %s...") % "Spyder",
icon=ima.icon('MessageBoxInformation'),
triggered=self.about)
self.help_menu_actions += [MENU_SEPARATOR, about_action]
# Status bar widgets
from spyder.widgets.status import MemoryStatus, CPUStatus
self.mem_status = MemoryStatus(self, status)
self.cpu_status = CPUStatus(self, status)
self.apply_statusbar_settings()
# ----- View
# View menu
self.plugins_menu = QMenu(_("Panes"), self)
self.toolbars_menu = QMenu(_("Toolbars"), self)
self.quick_layout_menu = QMenu(_("Window layouts"), self)
self.quick_layout_set_menu()
self.view_menu.addMenu(self.plugins_menu) # Panes
add_actions(self.view_menu, (self.lock_dockwidgets_action,
self.close_dockwidget_action,
self.maximize_action,
MENU_SEPARATOR))
self.show_toolbars_action = create_action(self,
_("Show toolbars"),
triggered=self.show_toolbars,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.show_toolbars_action, "_",
"Show toolbars")
self.view_menu.addMenu(self.toolbars_menu)
self.view_menu.addAction(self.show_toolbars_action)
add_actions(self.view_menu, (MENU_SEPARATOR,
self.quick_layout_menu,
self.toggle_previous_layout_action,
self.toggle_next_layout_action,
MENU_SEPARATOR,
self.fullscreen_action))
if set_attached_console_visible is not None:
cmd_act = create_action(self,
_("Attached console window (debugging)"),
toggled=set_attached_console_visible)
cmd_act.setChecked(is_attached_console_visible())
add_actions(self.view_menu, (MENU_SEPARATOR, cmd_act))
# Adding external tools action to "Tools" menu
if self.external_tools_menu_actions:
external_tools_act = create_action(self, _("External Tools"))
external_tools_act.setMenu(self.external_tools_menu)
self.tools_menu_actions += [None, external_tools_act]
# Filling out menu/toolbar entries:
add_actions(self.file_menu, self.file_menu_actions)
add_actions(self.edit_menu, self.edit_menu_actions)
add_actions(self.search_menu, self.search_menu_actions)
add_actions(self.source_menu, self.source_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
add_actions(self.debug_menu, self.debug_menu_actions)
add_actions(self.consoles_menu, self.consoles_menu_actions)
add_actions(self.projects_menu, self.projects_menu_actions)
add_actions(self.tools_menu, self.tools_menu_actions)
add_actions(self.external_tools_menu,
self.external_tools_menu_actions)
add_actions(self.help_menu, self.help_menu_actions)
add_actions(self.main_toolbar, self.main_toolbar_actions)
add_actions(self.file_toolbar, self.file_toolbar_actions)
add_actions(self.edit_toolbar, self.edit_toolbar_actions)
add_actions(self.search_toolbar, self.search_toolbar_actions)
add_actions(self.source_toolbar, self.source_toolbar_actions)
add_actions(self.debug_toolbar, self.debug_toolbar_actions)
add_actions(self.run_toolbar, self.run_toolbar_actions)
# Apply all defined shortcuts (plugins + 3rd-party plugins)
self.apply_shortcuts()
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.all_actions_defined.emit()
# Window set-up
self.debug_print("Setting up window...")
self.setup_layout(default=False)
# Show and hide shortcuts in menus for Mac.
# This is a workaround because we can't disable shortcuts
# by setting context=Qt.WidgetShortcut there
if sys.platform == 'darwin':
for name in ['file', 'edit', 'search', 'source', 'run', 'debug',
'projects', 'tools', 'plugins']:
menu_object = getattr(self, name + '_menu')
menu_object.aboutToShow.connect(
lambda name=name: self.show_shortcuts(name))
menu_object.aboutToHide.connect(
lambda name=name: self.hide_shortcuts(name))
if self.splash is not None:
self.splash.hide()
# Enabling tear off for all menus except help menu
if CONF.get('main', 'tear_off_menus'):
for child in self.menuBar().children():
if isinstance(child, QMenu) and child != self.help_menu:
child.setTearOffEnabled(True)
# Menu about to show
for child in self.menuBar().children():
if isinstance(child, QMenu):
try:
child.aboutToShow.connect(self.update_edit_menu)
except TypeError:
pass
self.debug_print("*** End of MainWindow setup ***")
self.is_starting_up = False
def post_visible_setup(self):
"""Actions to be performed only after the main window's `show` method
was triggered"""
self.restore_scrollbar_position.emit()
# Remove our temporary dir
atexit.register(self.remove_tmpdir)
# [Workaround for Issue 880]
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow,
# then set them again as floating windows here.
for widget in self.floating_dockwidgets:
widget.setFloating(True)
# In MacOS X 10.7 our app is not displayed after initialized (I don't
# know why because this doesn't happen when started from the terminal),
# so we need to resort to this hack to make it appear.
if running_in_mac_app():
idx = __file__.index(MAC_APP_NAME)
app_path = __file__[:idx]
subprocess.call(['open', app_path + MAC_APP_NAME])
# Server to maintain just one Spyder instance and open files in it if
# the user tries to start other instances with
# $ spyder foo.py
if (CONF.get('main', 'single_instance') and not self.new_instance
and self.open_files_server):
t = threading.Thread(target=self.start_open_files_server)
t.setDaemon(True)
t.start()
# Connect the window to the signal emmited by the previous server
# when it gets a client connected to it
self.sig_open_external_file.connect(self.open_external_file)
# Create Plugins and toolbars submenus
self.create_plugins_menu()
self.create_toolbars_menu()
# Update toolbar visibility status
self.toolbars_visible = CONF.get('main', 'toolbars_visible')
self.load_last_visible_toolbars()
# Update lock status of dockidgets (panes)
self.lock_dockwidgets_action.setChecked(self.dockwidgets_locked)
self.apply_panes_settings()
# Hide Internal Console so that people don't use it instead of
# the External or IPython ones
if self.console.dockwidget.isVisible() and DEV is None:
self.console.toggle_view_action.setChecked(False)
self.console.dockwidget.hide()
# Show Help and Consoles by default
plugins_to_show = [self.ipyconsole]
if self.help is not None:
plugins_to_show.append(self.help)
for plugin in plugins_to_show:
if plugin.dockwidget.isVisible():
plugin.dockwidget.raise_()
# Show history file if no console is visible
if not self.ipyconsole.isvisible:
self.historylog.add_history(get_conf_path('history.py'))
if self.open_project:
self.projects.open_project(self.open_project)
else:
# Load last project if a project was active when Spyder
# was closed
self.projects.reopen_last_project()
# If no project is active, load last session
if self.projects.get_active_project() is None:
self.editor.setup_open_files()
# Check for spyder updates
if DEV is None and CONF.get('main', 'check_updates_on_startup'):
self.give_updates_feedback = False
self.check_updates(startup=True)
# Show dialog with missing dependencies
self.report_missing_dependencies()
# Raise the menuBar to the top of the main window widget's stack
# (Fixes issue 3887)
self.menuBar().raise_()
self.is_setting_up = False
def set_window_title(self):
"""Set window title."""
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if DEBUG:
title += u" [DEBUG MODE %d]" % DEBUG
if self.window_title is not None:
title += u' -- ' + to_text_string(self.window_title)
if self.projects is not None:
path = self.projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u'~')
title = u'{0} - {1}'.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title)
def report_missing_dependencies(self):
"""Show a QMessageBox with a list of missing hard dependencies"""
missing_deps = dependencies.missing_dependencies()
if missing_deps:
QMessageBox.critical(self, _('Error'),
_("<b>You have missing dependencies!</b>"
"<br><br><tt>%s</tt><br><br>"
"<b>Please install them to avoid this message.</b>"
"<br><br>"
"<i>Note</i>: Spyder could work without some of these "
"dependencies, however to have a smooth experience when "
"using Spyder we <i>strongly</i> recommend you to install "
"all the listed missing dependencies.<br><br>"
"Failing to install these dependencies might result in bugs. "
"Please be sure that any found bugs are not the direct "
"result of missing dependencies, prior to reporting a new "
"issue."
) % missing_deps, QMessageBox.Ok)
def load_window_settings(self, prefix, default=False, section='main'):
"""Load window layout settings from userconfig-based configuration
with *prefix*, under *section*
default: if True, do not restore inner layout"""
get_func = CONF.get_default if default else CONF.get
window_size = get_func(section, prefix+'size')
prefs_dialog_size = get_func(section, prefix+'prefs_dialog_size')
if default:
hexstate = None
else:
hexstate = get_func(section, prefix+'state', None)
pos = get_func(section, prefix+'position')
# It's necessary to verify if the window/position value is valid
# with the current screen. See issue 3748
width = pos[0]
height = pos[1]
screen_shape = QApplication.desktop().geometry()
current_width = screen_shape.width()
current_height = screen_shape.height()
if current_width < width or current_height < height:
pos = CONF.get_default(section, prefix+'position')
is_maximized = get_func(section, prefix+'is_maximized')
is_fullscreen = get_func(section, prefix+'is_fullscreen')
return hexstate, window_size, prefs_dialog_size, pos, is_maximized, \
is_fullscreen
def get_window_settings(self):
"""Return current window settings
Symetric to the 'set_window_settings' setter"""
window_size = (self.window_size.width(), self.window_size.height())
is_fullscreen = self.isFullScreen()
if is_fullscreen:
is_maximized = self.maximized_flag
else:
is_maximized = self.isMaximized()
pos = (self.window_position.x(), self.window_position.y())
prefs_dialog_size = (self.prefs_dialog_size.width(),
self.prefs_dialog_size.height())
hexstate = qbytearray_to_str(self.saveState())
return (hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen)
def set_window_settings(self, hexstate, window_size, prefs_dialog_size,
pos, is_maximized, is_fullscreen):
"""Set window settings
Symetric to the 'get_window_settings' accessor"""
self.setUpdatesEnabled(False)
self.window_size = QSize(window_size[0], window_size[1]) # width,height
self.prefs_dialog_size = QSize(prefs_dialog_size[0],
prefs_dialog_size[1]) # width,height
self.window_position = QPoint(pos[0], pos[1]) # x,y
self.setWindowState(Qt.WindowNoState)
self.resize(self.window_size)
self.move(self.window_position)
# Window layout
if hexstate:
self.restoreState( QByteArray().fromHex(
str(hexstate).encode('utf-8')) )
# [Workaround for Issue 880]
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow.
for widget in self.children():
if isinstance(widget, QDockWidget) and widget.isFloating():
self.floating_dockwidgets.append(widget)
widget.setFloating(False)
# Is fullscreen?
if is_fullscreen:
self.setWindowState(Qt.WindowFullScreen)
self.__update_fullscreen_action()
# Is maximized?
if is_fullscreen:
self.maximized_flag = is_maximized
elif is_maximized:
self.setWindowState(Qt.WindowMaximized)
self.setUpdatesEnabled(True)
def save_current_window_settings(self, prefix, section='main',
none_state=False):
"""Save current window settings with *prefix* in
the userconfig-based configuration, under *section*"""
win_size = self.window_size
prefs_size = self.prefs_dialog_size
CONF.set(section, prefix+'size', (win_size.width(), win_size.height()))
CONF.set(section, prefix+'prefs_dialog_size',
(prefs_size.width(), prefs_size.height()))
CONF.set(section, prefix+'is_maximized', self.isMaximized())
CONF.set(section, prefix+'is_fullscreen', self.isFullScreen())
pos = self.window_position
CONF.set(section, prefix+'position', (pos.x(), pos.y()))
self.maximize_dockwidget(restore=True)# Restore non-maximized layout
if none_state:
CONF.set(section, prefix + 'state', None)
else:
qba = self.saveState()
CONF.set(section, prefix + 'state', qbytearray_to_str(qba))
CONF.set(section, prefix+'statusbar',
not self.statusBar().isHidden())
def tabify_plugins(self, first, second):
"""Tabify plugin dockwigdets"""
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
# --- Layouts
def setup_layout(self, default=False):
"""Setup window layout"""
prefix = 'window' + '/'
settings = self.load_window_settings(prefix, default)
hexstate = settings[0]
self.first_spyder_run = False
if hexstate is None:
# First Spyder execution:
self.setWindowState(Qt.WindowMaximized)
self.first_spyder_run = True
self.setup_default_layouts('default', settings)
# Now that the initial setup is done, copy the window settings,
# except for the hexstate in the quick layouts sections for the
# default layouts.
# Order and name of the default layouts is found in config.py
section = 'quick_layouts'
get_func = CONF.get_default if default else CONF.get
order = get_func(section, 'order')
# restore the original defaults if reset layouts is called
if default:
CONF.set(section, 'active', order)
CONF.set(section, 'order', order)
CONF.set(section, 'names', order)
for index, name, in enumerate(order):
prefix = 'layout_{0}/'.format(index)
self.save_current_window_settings(prefix, section,
none_state=True)
# store the initial layout as the default in spyder
prefix = 'layout_default/'
section = 'quick_layouts'
self.save_current_window_settings(prefix, section, none_state=True)
self.current_quick_layout = 'default'
# Regenerate menu
self.quick_layout_set_menu()
self.set_window_settings(*settings)
for plugin in self.widgetlist:
try:
plugin.initialize_plugin_in_mainwindow_layout()
except Exception as error:
print("%s: %s" % (plugin, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
def setup_default_layouts(self, index, settings):
"""Setup default layouts when run for the first time"""
self.set_window_settings(*settings)
self.setUpdatesEnabled(False)
# IMPORTANT: order has to be the same as defined in the config file
MATLAB, RSTUDIO, VERTICAL, HORIZONTAL = range(self.DEFAULT_LAYOUTS)
# define widgets locally
editor = self.editor
console_ipy = self.ipyconsole
console_int = self.console
outline = self.outlineexplorer
explorer_project = self.projects
explorer_file = self.explorer
explorer_variable = self.variableexplorer
plots = self.plots
history = self.historylog
finder = self.findinfiles
help_plugin = self.help
helper = self.onlinehelp
plugins = self.thirdparty_plugins
global_hidden_widgets = [finder, console_int, explorer_project,
helper] + plugins
global_hidden_toolbars = [self.source_toolbar, self.edit_toolbar,
self.search_toolbar]
# Layout definition
# layouts are organized by columns, each colum is organized by rows
# widths have to add 1.0, height per column have to add 1.0
# Spyder Default Initial Layout
s_layout = {'widgets': [
# column 0
[[explorer_project]],
# column 1
[[editor]],
# column 2
[[outline]],
# column 3
[[help_plugin, explorer_variable, plots, helper,
explorer_file, finder] + plugins,
[console_int, console_ipy, history]]
],
'width fraction': [0.0, # column 0 width
0.55, # column 1 width
0.0, # column 2 width
0.45], # column 3 width
'height fraction': [[1.0], # column 0, row heights
[1.0], # column 1, row heights
[1.0], # column 2, row heights
[0.46, 0.54]], # column 3, row heights
'hidden widgets': [outline],
'hidden toolbars': [],
}
r_layout = {'widgets': [
# column 0
[[editor],
[console_ipy, console_int]],
# column 1
[[explorer_variable, plots, history, outline,
finder] + plugins,
[explorer_file, explorer_project, help_plugin, helper]]
],
'width fraction': [0.55, # column 0 width
0.45], # column 1 width
'height fraction': [[0.55, 0.45], # column 0, row heights
[0.55, 0.45]], # column 1, row heights
'hidden widgets': [outline],
'hidden toolbars': [],
}
# Matlab
m_layout = {'widgets': [
# column 0
[[explorer_file, explorer_project],
[outline]],
# column 1
[[editor],
[console_ipy, console_int]],
# column 2
[[explorer_variable, plots, finder] + plugins,
[history, help_plugin, helper]]
],
'width fraction': [0.20, # column 0 width
0.40, # column 1 width
0.40], # column 2 width
'height fraction': [[0.55, 0.45], # column 0, row heights
[0.55, 0.45], # column 1, row heights
[0.55, 0.45]], # column 2, row heights
'hidden widgets': [],
'hidden toolbars': [],
}
# Vertically split
v_layout = {'widgets': [
# column 0
[[editor],
[console_ipy, console_int, explorer_file,
explorer_project, help_plugin, explorer_variable, plots,
history, outline, finder, helper] + plugins]
],
'width fraction': [1.0], # column 0 width
'height fraction': [[0.55, 0.45]], # column 0, row heights
'hidden widgets': [outline],
'hidden toolbars': [],
}
# Horizontally split
h_layout = {'widgets': [
# column 0
[[editor]],
# column 1
[[console_ipy, console_int, explorer_file,
explorer_project, help_plugin, explorer_variable, plots,
history, outline, finder, helper] + plugins]
],
'width fraction': [0.55, # column 0 width
0.45], # column 1 width
'height fraction': [[1.0], # column 0, row heights
[1.0]], # column 1, row heights
'hidden widgets': [outline],
'hidden toolbars': []
}
# Layout selection
layouts = {'default': s_layout,
RSTUDIO: r_layout,
MATLAB: m_layout,
VERTICAL: v_layout,
HORIZONTAL: h_layout}
layout = layouts[index]
widgets_layout = layout['widgets']
widgets = []
for column in widgets_layout :
for row in column:
for widget in row:
if widget is not None:
widgets.append(widget)
# Make every widget visible
for widget in widgets:
widget.toggle_view(True)
action = widget.toggle_view_action
action.setChecked(widget.dockwidget.isVisible())
# Set the widgets horizontally
for i in range(len(widgets) - 1):
first, second = widgets[i], widgets[i+1]
if first is not None and second is not None:
self.splitDockWidget(first.dockwidget, second.dockwidget,
Qt.Horizontal)
# Arrange rows vertically
for column in widgets_layout :
for i in range(len(column) - 1):
first_row, second_row = column[i], column[i+1]
if first_row is not None and second_row is not None:
self.splitDockWidget(first_row[0].dockwidget,
second_row[0].dockwidget,
Qt.Vertical)
# Tabify
for column in widgets_layout :
for row in column:
for i in range(len(row) - 1):
first, second = row[i], row[i+1]
if first is not None and second is not None:
self.tabify_plugins(first, second)
# Raise front widget per row
row[0].dockwidget.show()
row[0].dockwidget.raise_()
# Hide toolbars
hidden_toolbars = global_hidden_toolbars + layout['hidden toolbars']
for toolbar in hidden_toolbars:
if toolbar is not None:
toolbar.close()
# Hide widgets
hidden_widgets = global_hidden_widgets + layout['hidden widgets']
for widget in hidden_widgets:
if widget is not None:
widget.dockwidget.close()
# set the width and height
self._layout_widget_info = []
width, height = self.window_size.width(), self.window_size.height()
# fix column width
# for c in range(len(widgets_layout)):
# widget = widgets_layout[c][0][0].dockwidget
# min_width, max_width = widget.minimumWidth(), widget.maximumWidth()
# info = {'widget': widget,
# 'min width': min_width,
# 'max width': max_width}
# self._layout_widget_info.append(info)
# new_width = int(layout['width fraction'][c] * width * 0.95)
# widget.setMinimumWidth(new_width)
# widget.setMaximumWidth(new_width)
# widget.updateGeometry()
# fix column height
for c, column in enumerate(widgets_layout):
for r in range(len(column) - 1):
widget = column[r][0]
dockwidget = widget.dockwidget
dock_min_h = dockwidget.minimumHeight()
dock_max_h = dockwidget.maximumHeight()
info = {'widget': widget,
'dock min height': dock_min_h,
'dock max height': dock_max_h}
self._layout_widget_info.append(info)
# The 0.95 factor is to adjust height based on usefull
# estimated area in the window
new_height = int(layout['height fraction'][c][r]*height*0.95)
dockwidget.setMinimumHeight(new_height)
dockwidget.setMaximumHeight(new_height)
self._custom_layout_timer = QTimer(self)
self._custom_layout_timer.timeout.connect(self.layout_fix_timer)
self._custom_layout_timer.setSingleShot(True)
self._custom_layout_timer.start(5000)
def layout_fix_timer(self):
"""Fixes the height of docks after a new layout is set."""
info = self._layout_widget_info
for i in info:
dockwidget = i['widget'].dockwidget
if 'dock min width' in i:
dockwidget.setMinimumWidth(i['dock min width'])
dockwidget.setMaximumWidth(i['dock max width'])
if 'dock min height' in i:
dockwidget.setMinimumHeight(i['dock min height'])
dockwidget.setMaximumHeight(i['dock max height'])
dockwidget.updateGeometry()
self.setUpdatesEnabled(True)
@Slot()
def toggle_previous_layout(self):
""" """
self.toggle_layout('previous')
@Slot()
def toggle_next_layout(self):
""" """
self.toggle_layout('next')
def toggle_layout(self, direction='next'):
""" """
get = CONF.get
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
if len(active) == 0:
return
layout_index = ['default']
for name in order:
if name in active:
layout_index.append(names.index(name))
current_layout = self.current_quick_layout
dic = {'next': 1, 'previous': -1}
if current_layout is None:
# Start from default
current_layout = 'default'
if current_layout in layout_index:
current_index = layout_index.index(current_layout)
else:
current_index = 0
new_index = (current_index + dic[direction]) % len(layout_index)
self.quick_layout_switch(layout_index[new_index])
def quick_layout_set_menu(self):
""" """
get = CONF.get
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
ql_actions = []
ql_actions = [create_action(self, _('Spyder Default Layout'),
triggered=lambda:
self.quick_layout_switch('default'))]
for name in order:
if name in active:
index = names.index(name)
# closure required so lambda works with the default parameter
def trigger(i=index, self=self):
return lambda: self.quick_layout_switch(i)
qli_act = create_action(self, name, triggered=trigger())
# closure above replaces the following which stopped working
# qli_act = create_action(self, name, triggered=lambda i=index:
# self.quick_layout_switch(i)
ql_actions += [qli_act]
self.ql_save = create_action(self, _("Save current layout"),
triggered=lambda:
self.quick_layout_save(),
context=Qt.ApplicationShortcut)
self.ql_preferences = create_action(self, _("Layout preferences"),
triggered=lambda:
self.quick_layout_settings(),
context=Qt.ApplicationShortcut)
self.ql_reset = create_action(self, _('Reset to spyder default'),
triggered=self.reset_window_layout)
self.register_shortcut(self.ql_save, "_", "Save current layout")
self.register_shortcut(self.ql_preferences, "_", "Layout preferences")
ql_actions += [None]
ql_actions += [self.ql_save, self.ql_preferences, self.ql_reset]
self.quick_layout_menu.clear()
add_actions(self.quick_layout_menu, ql_actions)
if len(order) == 0:
self.ql_preferences.setEnabled(False)
else:
self.ql_preferences.setEnabled(True)
@Slot()
def reset_window_layout(self):
"""Reset window layout to default"""
answer = QMessageBox.warning(self, _("Warning"),
_("Window layout will be reset to default settings: "
"this affects window position, size and dockwidgets.\n"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.setup_layout(default=True)
def quick_layout_save(self):
"""Save layout dialog"""
get = CONF.get
set_ = CONF.set
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
dlg = self.dialog_layout_save(self, names)
if dlg.exec_():
name = dlg.combo_box.currentText()
if name in names:
answer = QMessageBox.warning(self, _("Warning"),
_("Layout <b>%s</b> will be \
overwritten. Do you want to \
continue?") % name,
QMessageBox.Yes | QMessageBox.No)
index = order.index(name)
else:
answer = True
if None in names:
index = names.index(None)
names[index] = name
else:
index = len(names)
names.append(name)
order.append(name)
# Always make active a new layout even if it overwrites an inactive
# layout
if name not in active:
active.append(name)
if answer:
self.save_current_window_settings('layout_{}/'.format(index),
section='quick_layouts')
set_('quick_layouts', 'names', names)
set_('quick_layouts', 'order', order)
set_('quick_layouts', 'active', active)
self.quick_layout_set_menu()
def quick_layout_settings(self):
"""Layout settings dialog"""
get = CONF.get
set_ = CONF.set
section = 'quick_layouts'
names = get(section, 'names')
order = get(section, 'order')
active = get(section, 'active')
dlg = self.dialog_layout_settings(self, names, order, active)
if dlg.exec_():
set_(section, 'names', dlg.names)
set_(section, 'order', dlg.order)
set_(section, 'active', dlg.active)
self.quick_layout_set_menu()
def quick_layout_switch(self, index):
"""Switch to quick layout number *index*"""
section = 'quick_layouts'
try:
settings = self.load_window_settings('layout_{}/'.format(index),
section=section)
(hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen) = settings
# The defaults layouts will always be regenerated unless there was
# an overwrite, either by rewriting with same name, or by deleting
# and then creating a new one
if hexstate is None:
# The value for hexstate shouldn't be None for a custom saved
# layout (ie, where the index is greater than the number of
# defaults). See issue 6202.
if index != 'default' and index >= self.DEFAULT_LAYOUTS:
QMessageBox.critical(
self, _("Warning"),
_("Error opening the custom layout. Please close"
" Spyder and try again. If the issue persists,"
" then you must use 'Reset to Spyder default' "
"from the layout menu."))
return
self.setup_default_layouts(index, settings)
except cp.NoOptionError:
QMessageBox.critical(self, _("Warning"),
_("Quick switch layout #%s has not yet "
"been defined.") % str(index))
return
# TODO: is there any real use in calling the previous layout
# setting?
# self.previous_layout_settings = self.get_window_settings()
self.set_window_settings(*settings)
self.current_quick_layout = index
# make sure the flags are correctly set for visible panes
for plugin in self.widgetlist:
action = plugin.toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
# --- Show/Hide toolbars
def _update_show_toolbars_action(self):
"""Update the text displayed in the menu entry."""
if self.toolbars_visible:
text = _("Hide toolbars")
tip = _("Hide toolbars")
else:
text = _("Show toolbars")
tip = _("Show toolbars")
self.show_toolbars_action.setText(text)
self.show_toolbars_action.setToolTip(tip)
def save_visible_toolbars(self):
"""Saves the name of the visible toolbars in the .ini file."""
toolbars = []
for toolbar in self.visible_toolbars:
toolbars.append(toolbar.objectName())
CONF.set('main', 'last_visible_toolbars', toolbars)
def get_visible_toolbars(self):
"""Collects the visible toolbars."""
toolbars = []
for toolbar in self.toolbarslist:
if toolbar.toggleViewAction().isChecked():
toolbars.append(toolbar)
self.visible_toolbars = toolbars
def load_last_visible_toolbars(self):
"""Loads the last visible toolbars from the .ini file."""
toolbars_names = CONF.get('main', 'last_visible_toolbars', default=[])
if toolbars_names:
dic = {}
for toolbar in self.toolbarslist:
dic[toolbar.objectName()] = toolbar
toolbars = []
for name in toolbars_names:
if name in dic:
toolbars.append(dic[name])
self.visible_toolbars = toolbars
else:
self.get_visible_toolbars()
self._update_show_toolbars_action()
@Slot()
def show_toolbars(self):
"""Show/Hides toolbars."""
value = not self.toolbars_visible
CONF.set('main', 'toolbars_visible', value)
if value:
self.save_visible_toolbars()
else:
self.get_visible_toolbars()
for toolbar in self.visible_toolbars:
toolbar.toggleViewAction().setChecked(value)
toolbar.setVisible(value)
self.toolbars_visible = value
self._update_show_toolbars_action()
# --- Other
def valid_project(self):
"""Handle an invalid active project."""
try:
path = self.projects.get_active_project_path()
except AttributeError:
return
if bool(path):
if not self.projects.is_valid_project(path):
if path:
QMessageBox.critical(
self,
_('Error'),
_("<b>{}</b> is no longer a valid Spyder project! "
"Since it is the current active project, it will "
"be closed automatically.").format(path))
self.projects.close_project()
def free_memory(self):
"""Free memory after event."""
gc.collect()
def plugin_focus_changed(self):
"""Focus has changed from one plugin to another"""
self.update_edit_menu()
self.update_search_menu()
def show_shortcuts(self, menu):
"""Show action shortcuts in menu"""
for element in getattr(self, menu + '_menu_actions'):
if element and isinstance(element, QAction):
if element._shown_shortcut is not None:
element.setShortcut(element._shown_shortcut)
def hide_shortcuts(self, menu):
"""Hide action shortcuts in menu"""
for element in getattr(self, menu + '_menu_actions'):
if element and isinstance(element, QAction):
if element._shown_shortcut is not None:
element.setShortcut(QKeySequence())
def get_focus_widget_properties(self):
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
widget = QApplication.focusWidget()
from spyder.plugins.console.widgets.shell import ShellBaseWidget
from spyder.plugins.editor.widgets.editor import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
# if focused widget isn't valid try the last focused
if not isinstance(widget, (ShellBaseWidget, TextEditBaseWidget,
ControlWidget)):
widget = self.previous_focused_widget
textedit_properties = None
if isinstance(widget, (ShellBaseWidget, TextEditBaseWidget,
ControlWidget)):
console = isinstance(widget, (ShellBaseWidget, ControlWidget))
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
def update_edit_menu(self):
"""Update edit menu"""
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
#!!! Below this line, widget is expected to be a QPlainTextEdit instance
console, not_readonly, readwrite_editor = textedit_properties
# Editor has focus and there is no file opened in it
if not console and not_readonly and not self.editor.is_file_opened():
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
def update_search_menu(self):
"""Update search menu"""
if self.menuBar().hasFocus():
return
widget, textedit_properties = self.get_focus_widget_properties()
for action in self.editor.search_menu_actions:
try:
action.setEnabled(self.editor.isAncestorOf(widget))
except RuntimeError:
pass
if textedit_properties is None: # widget is not an editor/console
return
#!!! Below this line, widget is expected to be a QPlainTextEdit instance
_x, _y, readwrite_editor = textedit_properties
# Disable the replace action for read-only files
self.search_menu_actions[3].setEnabled(readwrite_editor)
def create_plugins_menu(self):
order = ['editor', 'console', 'ipython_console', 'variable_explorer',
'help', None, 'explorer', 'outline_explorer',
'project_explorer', 'find_in_files', None, 'historylog',
'profiler', 'breakpoints', 'pylint', None,
'onlinehelp', 'internal_console']
for plugin in self.widgetlist:
action = plugin.toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
try:
name = plugin.CONF_SECTION
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
actions = order[:]
for action in order:
if type(action) is str:
actions.remove(action)
self.plugins_menu_actions = actions
add_actions(self.plugins_menu, actions)
def create_toolbars_menu(self):
order = ['file_toolbar', 'run_toolbar', 'debug_toolbar',
'main_toolbar', 'Global working directory', None,
'search_toolbar', 'edit_toolbar', 'source_toolbar']
for toolbar in self.toolbarslist:
action = toolbar.toggleViewAction()
name = toolbar.objectName()
try:
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
add_actions(self.toolbars_menu, order)
def createPopupMenu(self):
menu = QMenu('', self)
actions = self.help_menu_actions[:3] + \
[None, self.help_menu_actions[-1]]
add_actions(menu, actions)
return menu
def set_splash(self, message):
"""Set splash message"""
if self.splash is None:
return
if message:
self.debug_print(message)
self.splash.show()
self.splash.showMessage(message, Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute, QColor(Qt.white))
QApplication.processEvents()
def remove_tmpdir(self):
"""Remove Spyder temporary directory"""
if CONF.get('main', 'single_instance') and not self.new_instance:
shutil.rmtree(programs.TEMPDIR, ignore_errors=True)
def closeEvent(self, event):
"""closeEvent reimplementation"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
# To be used by the tour to be able to resize
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event)
def hideEvent(self, event):
"""Reimplement Qt method"""
try:
for plugin in self.widgetlist:
if plugin.isAncestorOf(self.last_focused_widget):
plugin.visibility_changed(True)
QMainWindow.hideEvent(self, event)
except RuntimeError:
QMainWindow.hideEvent(self, event)
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
self.previous_focused_widget = old
def closing(self, cancelable=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder',
'Do you really want to exit?',
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False
prefix = 'window' + '/'
self.save_current_window_settings(prefix)
if CONF.get('main', 'single_instance') and self.open_files_server:
self.open_files_server.close()
for plugin in self.thirdparty_plugins:
if not plugin.closing_plugin(cancelable):
return False
for widget in self.widgetlist:
if not widget.closing_plugin(cancelable):
return False
self.dialog_manager.close_all()
if self.toolbars_visible:
self.save_visible_toolbars()
self.lspmanager.closing_plugin(cancelable)
self.already_closed = True
return True
def add_dockwidget(self, child):
"""Add QDockWidget and toggleViewAction"""
dockwidget, location = child.create_dockwidget()
if CONF.get('main', 'vertical_dockwidget_titlebars'):
dockwidget.setFeatures(dockwidget.features()|
QDockWidget.DockWidgetVerticalTitleBar)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(child)
@Slot()
def close_current_dockwidget(self):
widget = QApplication.focusWidget()
for plugin in self.widgetlist:
if plugin.isAncestorOf(widget):
plugin.dockwidget.hide()
break
def toggle_lock_dockwidgets(self, value):
"""Lock/Unlock dockwidgets"""
self.dockwidgets_locked = value
self.apply_panes_settings()
CONF.set('main', 'panes_locked', value)
def __update_maximize_action(self):
if self.state_before_maximizing is None:
text = _("Maximize current pane")
tip = _("Maximize current pane")
icon = ima.icon('maximize')
else:
text = _("Restore current pane")
tip = _("Restore pane to its original size")
icon = ima.icon('unmaximize')
self.maximize_action.setText(text)
self.maximize_action.setIcon(icon)
self.maximize_action.setToolTip(tip)
@Slot()
@Slot(bool)
def maximize_dockwidget(self, restore=False):
"""Shortcut: Ctrl+Alt+Shift+M
First call: maximize current dockwidget
Second call (or restore=True): restore original window layout"""
if self.state_before_maximizing is None:
if restore:
return
# Select plugin to maximize
self.state_before_maximizing = self.saveState()
focus_widget = QApplication.focusWidget()
for plugin in self.widgetlist:
plugin.dockwidget.hide()
if plugin.isAncestorOf(focus_widget):
self.last_plugin = plugin
# Only plugins that have a dockwidget are part of widgetlist,
# so last_plugin can be None after the above "for" cycle.
# For example, this happens if, after Spyder has started, focus
# is set to the Working directory toolbar (which doesn't have
# a dockwidget) and then you press the Maximize button
if self.last_plugin is None:
# Using the Editor as default plugin to maximize
self.last_plugin = self.editor
# Maximize last_plugin
self.last_plugin.dockwidget.toggleViewAction().setDisabled(True)
self.setCentralWidget(self.last_plugin)
self.last_plugin.ismaximized = True
# Workaround to solve an issue with editor's outline explorer:
# (otherwise the whole plugin is hidden and so is the outline explorer
# and the latter won't be refreshed if not visible)
self.last_plugin.show()
self.last_plugin.visibility_changed(True)
if self.last_plugin is self.editor:
# Automatically show the outline if the editor was maximized:
self.addDockWidget(Qt.RightDockWidgetArea,
self.outlineexplorer.dockwidget)
self.outlineexplorer.dockwidget.show()
else:
# Restore original layout (before maximizing current dockwidget)
self.last_plugin.dockwidget.setWidget(self.last_plugin)
self.last_plugin.dockwidget.toggleViewAction().setEnabled(True)
self.setCentralWidget(None)
self.last_plugin.ismaximized = False
self.restoreState(self.state_before_maximizing)
self.state_before_maximizing = None
self.last_plugin.get_focus_widget().setFocus()
self.__update_maximize_action()
def __update_fullscreen_action(self):
if self.fullscreen_flag:
icon = ima.icon('window_nofullscreen')
else:
icon = ima.icon('window_fullscreen')
if is_text_string(icon):
icon = get_icon(icon)
self.fullscreen_action.setIcon(icon)
@Slot()
def toggle_fullscreen(self):
if self.fullscreen_flag:
self.fullscreen_flag = False
if os.name == 'nt':
self.setWindowFlags(
self.windowFlags()
^ (Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint))
self.setGeometry(self.saved_normal_geometry)
self.showNormal()
if self.maximized_flag:
self.showMaximized()
else:
self.maximized_flag = self.isMaximized()
self.fullscreen_flag = True
self.saved_normal_geometry = self.normalGeometry()
if os.name == 'nt':
# Due to limitations of the Windows DWM, compositing is not
# handled correctly for OpenGL based windows when going into
# full screen mode, so we need to use this workaround.
# See Issue #4291.
self.setWindowFlags(self.windowFlags()
| Qt.FramelessWindowHint
| Qt.WindowStaysOnTopHint)
r = QApplication.desktop().screenGeometry()
self.setGeometry(
r.left() - 1, r.top() - 1, r.width() + 2, r.height() + 2)
self.showNormal()
else:
self.showFullScreen()
self.__update_fullscreen_action()
def add_to_toolbar(self, toolbar, widget):
"""Add widget actions to toolbar"""
actions = widget.toolbar_actions
if actions is not None:
add_actions(toolbar, actions)
@Slot()
def about(self):
"""About Spyder"""
versions = get_versions()
# Show Mercurial revision for development version
revlink = ''
if versions['revision']:
rev = versions['revision']
revlink = " (<a href='https://github.com/spyder-ide/spyder/"\
"commit/%s'>Commit: %s</a>)" % (rev, rev)
msgBox = QMessageBox(self)
msgBox.setText(
"""<b>Spyder %s</b> %s
<br>The Scientific Python Development Environment
<br>Copyright © The Spyder Project Contributors
<br>Licensed under the terms of the MIT License
<p>Created by Pierre Raybaut.
<br>Developed and maintained by the
<a href="%s/blob/master/AUTHORS">Spyder Project Contributors</a>.
<br>Many thanks to all the Spyder beta testers and regular users.
<p>For help with Spyder errors and crashes, please read our
<a href="%s">Troubleshooting page</a>, and for bug reports and
feature requests, visit our <a href="%s">Github website</a>.
For project discussion, see our <a href="%s">Google Group</a>.
<p>This project is part of a larger effort to promote and
facilitate the use of Python for scientific and engineering
software development. The popular Python distributions
<a href="https://www.anaconda.com/download/">Anaconda</a> and
<a href="https://winpython.github.io/">WinPython</a>
also contribute to this plan.
<p>Python %s %dbits, Qt %s, %s %s on %s
<p><small>Most of the icons for the Spyder 2 theme come from the
Crystal Project (© 2006-2007 Everaldo Coelho).
Other icons for that theme come from
<a href="http://p.yusukekamiyamane.com/">
Yusuke Kamiyamane</a> (all rights reserved) and from
<a href="http://www.oxygen-icons.org/">
The Oxygen icon theme</a></small>.
"""
% (versions['spyder'], revlink, __project_url__, __trouble_url__,
__project_url__, __forum_url__, versions['python'],
versions['bitness'], versions['qt'], versions['qt_api'],
versions['qt_api_ver'], versions['system'])
)
msgBox.setWindowTitle(_("About %s") % "Spyder")
msgBox.setStandardButtons(QMessageBox.Ok)
msgBox.setIconPixmap(APP_ICON.pixmap(QSize(64, 64)))
msgBox.setTextInteractionFlags(Qt.TextSelectableByMouse)
msgBox.exec_()
@Slot()
def show_dependencies(self):
"""Show Spyder's Dependencies dialog box"""
from spyder.widgets.dependencies import DependenciesDialog
dlg = DependenciesDialog(None)
dlg.set_data(dependencies.DEPENDENCIES)
dlg.exec_()
def render_issue(self, description='', traceback=''):
"""Render issue before sending it to Github"""
# Get component versions
versions = get_versions()
# Get git revision for development version
revision = ''
if versions['revision']:
revision = versions['revision']
# Make a description header in case no description is supplied
if not description:
description = "### What steps reproduce the problem?"
# Make error section from traceback and add appropriate reminder header
if traceback:
error_section = ("### Traceback\n"
"```python-traceback\n"
"{}\n"
"```".format(traceback))
else:
error_section = ''
issue_template = """\
## Description
{description}
{error_section}
## Versions
* Spyder version: {spyder_version} {commit}
* Python version: {python_version}
* Qt version: {qt_version}
* {qt_api_name} version: {qt_api_version}
* Operating System: {os_name} {os_version}
### Dependencies
```
{dependencies}
```
""".format(description=description,
error_section=error_section,
spyder_version=versions['spyder'],
commit=revision,
python_version=versions['python'],
qt_version=versions['qt'],
qt_api_name=versions['qt_api'],
qt_api_version=versions['qt_api_ver'],
os_name=versions['system'],
os_version=versions['release'],
dependencies=dependencies.status())
return issue_template
@Slot()
def report_issue(self, body=None, title=None, open_webpage=False):
"""Report a Spyder issue to github, generating body text if needed."""
if body is None:
from spyder.widgets.reporterror import SpyderErrorDialog
report_dlg = SpyderErrorDialog(self, is_report=True)
report_dlg.show()
else:
if open_webpage:
if PY3:
from urllib.parse import quote
else:
from urllib import quote # analysis:ignore
from qtpy.QtCore import QUrlQuery
url = QUrl(__project_url__ + '/issues/new')
query = QUrlQuery()
query.addQueryItem("body", quote(body))
if title:
query.addQueryItem("title", quote(title))
url.setQuery(query)
QDesktopServices.openUrl(url)
@Slot()
def trouble_guide(self):
"""Open Spyder troubleshooting guide in a web browser."""
url = QUrl(__trouble_url__)
QDesktopServices.openUrl(url)
@Slot()
def google_group(self):
"""Open Spyder Google Group in a web browser."""
url = QUrl(__forum_url__)
QDesktopServices.openUrl(url)
@Slot()
def global_callback(self):
"""Global callback"""
widget = QApplication.focusWidget()
action = self.sender()
callback = from_qvariant(action.data(), to_text_string)
from spyder.plugins.editor.widgets.editor import TextEditBaseWidget
# If focused widget isn't valid try the last focused
if not isinstance(widget, TextEditBaseWidget):
widget = self.previous_focused_widget
if isinstance(widget, TextEditBaseWidget):
getattr(widget, callback)()
def redirect_internalshell_stdio(self, state):
if state:
self.console.shell.interpreter.redirect_stds()
else:
self.console.shell.interpreter.restore_stds()
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
if CONF.get('main_interpreter', 'default'):
executable = get_python_executable()
else:
executable = CONF.get('main_interpreter', 'executable')
programs.run_python_script_in_terminal(
fname, wdir, args, interact, debug, python_args,
executable)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name)
def execute_in_external_console(self, lines, focus_to_editor):
"""
Execute lines in IPython console and eventually set focus
to the Editor.
"""
console = self.ipyconsole
console.visibility_changed(True)
console.raise_()
console.execute_code(lines)
if focus_to_editor:
self.editor.visibility_changed(True)
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if encoding.is_text_file(fname):
self.editor.load(fname)
elif self.variableexplorer is not None and ext in IMPORT_EXT:
self.variableexplorer.import_data(fname)
elif not external:
fname = file_uri(fname)
programs.start_file(fname)
def open_external_file(self, fname):
"""
Open external files that can be handled either by the Editor or the
variable explorer inside Spyder.
"""
fname = encoding.to_unicode_from_fs(fname)
if osp.isfile(fname):
self.open_file(fname, external=True)
elif osp.isfile(osp.join(CWD, fname)):
self.open_file(osp.join(CWD, fname), external=True)
# ---- PYTHONPATH management, etc.
def get_spyder_pythonpath(self):
"""Return Spyder PYTHONPATH"""
active_path = [p for p in self.path if p not in self.not_active_path]
return active_path + self.project_path
def add_path_to_sys_path(self):
"""Add Spyder path to sys.path"""
for path in reversed(self.get_spyder_pythonpath()):
sys.path.insert(1, path)
def remove_path_from_sys_path(self):
"""Remove Spyder path from sys.path"""
for path in self.path + self.project_path:
while path in sys.path:
sys.path.remove(path)
@Slot()
def path_manager_callback(self):
"""Spyder path manager"""
from spyder.widgets.pathmanager import PathManager
self.remove_path_from_sys_path()
project_path = self.projects.get_pythonpath()
dialog = PathManager(self, self.path, project_path,
self.not_active_path, sync=True)
dialog.redirect_stdio.connect(self.redirect_internalshell_stdio)
dialog.exec_()
self.add_path_to_sys_path()
try:
encoding.writelines(self.path, self.SPYDER_PATH) # Saving path
encoding.writelines(self.not_active_path,
self.SPYDER_NOT_ACTIVE_PATH)
except EnvironmentError:
pass
self.sig_pythonpath_changed.emit()
def pythonpath_changed(self):
"""Projects PYTHONPATH contribution has changed"""
self.remove_path_from_sys_path()
self.project_path = self.projects.get_pythonpath()
self.add_path_to_sys_path()
self.sig_pythonpath_changed.emit()
@Slot()
def win_env(self):
"""Show Windows current user environment variables"""
self.dialog_manager.show(WinUserEnvDialog(self))
#---- Preferences
def apply_settings(self):
"""Apply settings changed in 'Preferences' dialog box"""
qapp = QApplication.instance()
# Set 'gtk+' as the default theme in Gtk-based desktops
# Fixes Issue 2036
if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()):
try:
qapp.setStyle('gtk+')
except:
pass
else:
style_name = CONF.get('main', 'windows_style',
self.default_style)
style = QStyleFactory.create(style_name)
if style is not None:
style.setProperty('name', style_name)
qapp.setStyle(style)
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
if CONF.get('main', 'animated_docks'):
default = default|QMainWindow.AnimatedDocks
self.setDockOptions(default)
self.apply_panes_settings()
self.apply_statusbar_settings()
if CONF.get('main', 'use_custom_cursor_blinking'):
qapp.setCursorFlashTime(CONF.get('main', 'custom_cursor_blinking'))
else:
qapp.setCursorFlashTime(self.CURSORBLINK_OSDEFAULT)
def apply_panes_settings(self):
"""Update dockwidgets features settings"""
# Update toggle action on menu
for child in self.widgetlist:
features = child.FEATURES
if CONF.get('main', 'vertical_dockwidget_titlebars'):
features = features | QDockWidget.DockWidgetVerticalTitleBar
if not self.dockwidgets_locked:
features = features | QDockWidget.DockWidgetMovable
child.dockwidget.setFeatures(features)
child.update_margins()
def apply_statusbar_settings(self):
"""Update status bar widgets settings"""
show_status_bar = CONF.get('main', 'show_status_bar')
self.statusBar().setVisible(show_status_bar)
if show_status_bar:
for widget, name in ((self.mem_status, 'memory_usage'),
(self.cpu_status, 'cpu_usage')):
if widget is not None:
widget.setVisible(CONF.get('main', '%s/enable' % name))
widget.set_interval(CONF.get('main', '%s/timeout' % name))
else:
return
@Slot()
def edit_preferences(self):
"""Edit Spyder preferences"""
from spyder.preferences.configdialog import ConfigDialog
dlg = ConfigDialog(self)
dlg.size_change.connect(self.set_prefs_size)
if self.prefs_dialog_size is not None:
dlg.resize(self.prefs_dialog_size)
for PrefPageClass in self.general_prefs:
widget = PrefPageClass(dlg, main=self)
widget.initialize()
dlg.add_page(widget)
for plugin in [self.workingdirectory, self.lspmanager, self.editor,
self.projects, self.ipyconsole,
self.historylog, self.help, self.variableexplorer,
self.onlinehelp, self.explorer, self.findinfiles
]+self.thirdparty_plugins:
if plugin is not None:
try:
widget = plugin.create_configwidget(dlg)
if widget is not None:
dlg.add_page(widget)
except Exception:
traceback.print_exc(file=sys.stderr)
if self.prefs_index is not None:
dlg.set_current_index(self.prefs_index)
dlg.show()
dlg.check_all_settings()
dlg.pages_widget.currentChanged.connect(self.__preference_page_changed)
dlg.exec_()
def __preference_page_changed(self, index):
"""Preference page index has changed"""
self.prefs_index = index
def set_prefs_size(self, size):
"""Save preferences dialog size"""
self.prefs_dialog_size = size
#---- Shortcuts
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_sc_to_tip=False):
"""
Register QAction or QShortcut to Spyder main application,
with shortcut (context, name, default)
"""
self.shortcut_data.append( (qaction_or_qshortcut, context,
name, add_sc_to_tip) )
def apply_shortcuts(self):
"""Apply shortcuts settings to all widgets/plugins"""
toberemoved = []
for index, (qobject, context, name,
add_sc_to_tip) in enumerate(self.shortcut_data):
keyseq = QKeySequence( get_shortcut(context, name) )
try:
if isinstance(qobject, QAction):
if sys.platform == 'darwin' and \
qobject._shown_shortcut == 'missing':
qobject._shown_shortcut = keyseq
else:
qobject.setShortcut(keyseq)
if add_sc_to_tip:
add_shortcut_to_tooltip(qobject, context, name)
elif isinstance(qobject, QShortcut):
qobject.setKey(keyseq)
except RuntimeError:
# Object has been deleted
toberemoved.append(index)
for index in sorted(toberemoved, reverse=True):
self.shortcut_data.pop(index)
@Slot()
def show_shortcuts_dialog(self):
from spyder.widgets.shortcutssummary import ShortcutsSummaryDialog
dlg = ShortcutsSummaryDialog(None)
dlg.exec_()
# -- Open files server
def start_open_files_server(self):
self.open_files_server.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
port = select_port(default_port=OPEN_FILES_PORT)
CONF.set('main', 'open_files_port', port)
self.open_files_server.bind(('127.0.0.1', port))
self.open_files_server.listen(20)
while 1: # 1 is faster than True
try:
req, dummy = self.open_files_server.accept()
except socket.error as e:
# See Issue 1275 for details on why errno EINTR is
# silently ignored here.
eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR
# To avoid a traceback after closing on Windows
if e.args[0] == eintr:
continue
# handle a connection abort on close error
enotsock = (errno.WSAENOTSOCK if os.name == 'nt'
else errno.ENOTSOCK)
if e.args[0] in [errno.ECONNABORTED, enotsock]:
return
raise
fname = req.recv(1024)
fname = fname.decode('utf-8')
self.sig_open_external_file.emit(fname)
req.sendall(b' ')
# ---- Quit and restart, and reset spyder defaults
@Slot()
def reset_spyder(self):
"""
Quit and reset Spyder and then Restart application.
"""
answer = QMessageBox.warning(self, _("Warning"),
_("Spyder will restart and reset to default settings: <br><br>"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.restart(reset=True)
@Slot()
def restart(self, reset=False):
"""
Quit and Restart Spyder application.
If reset True it allows to reset spyder on restart.
"""
# Get start path to use in restart script
spyder_start_directory = get_module_path('spyder')
restart_script = osp.join(spyder_start_directory, 'app', 'restart.py')
# Get any initial argument passed when spyder was started
# Note: Variables defined in bootstrap.py and spyder/app/start.py
env = os.environ.copy()
bootstrap_args = env.pop('SPYDER_BOOTSTRAP_ARGS', None)
spyder_args = env.pop('SPYDER_ARGS')
# Get current process and python running spyder
pid = os.getpid()
python = sys.executable
# Check if started with bootstrap.py
if bootstrap_args is not None:
spyder_args = bootstrap_args
is_bootstrap = True
else:
is_bootstrap = False
# Pass variables as environment variables (str) to restarter subprocess
env['SPYDER_ARGS'] = spyder_args
env['SPYDER_PID'] = str(pid)
env['SPYDER_IS_BOOTSTRAP'] = str(is_bootstrap)
env['SPYDER_RESET'] = str(reset)
if DEV:
if os.name == 'nt':
env['PYTHONPATH'] = ';'.join(sys.path)
else:
env['PYTHONPATH'] = ':'.join(sys.path)
# Build the command and popen arguments depending on the OS
if os.name == 'nt':
# Hide flashing command prompt
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
shell = False
else:
startupinfo = None
shell = True
command = '"{0}" "{1}"'
command = command.format(python, restart_script)
try:
if self.closing(True):
subprocess.Popen(command, shell=shell, env=env,
startupinfo=startupinfo)
self.console.quit()
except Exception as error:
# If there is an error with subprocess, Spyder should not quit and
# the error can be inspected in the internal console
print(error) # spyder: test-skip
print(command) # spyder: test-skip
# ---- Interactive Tours
def show_tour(self, index):
""" """
frames = self.tours_available[index]
self.tour.set_tour(index, frames, self)
self.tour.start_tour()
# ---- Global File Switcher
def open_fileswitcher(self, symbol=False):
"""Open file list management dialog box."""
if self.fileswitcher is not None and \
self.fileswitcher.is_visible:
self.fileswitcher.hide()
self.fileswitcher.is_visible = False
return
if symbol:
self.fileswitcher.plugin = self.editor
self.fileswitcher.set_search_text('@')
else:
self.fileswitcher.set_search_text('')
self.fileswitcher.show()
self.fileswitcher.is_visible = True
def open_symbolfinder(self):
"""Open symbol list management dialog box."""
self.open_fileswitcher(symbol=True)
def add_to_fileswitcher(self, plugin, tabs, data, icon):
"""Add a plugin to the File Switcher."""
if self.fileswitcher is None:
self.fileswitcher = FileSwitcher(self, plugin, tabs, data, icon)
else:
self.fileswitcher.add_plugin(plugin, tabs, data, icon)
self.fileswitcher.sig_goto_file.connect(
plugin.get_current_tab_manager().set_stack_index)
# ---- Check for Spyder Updates
def _check_updates_ready(self):
"""Called by WorkerUpdates when ready"""
from spyder.widgets.helperwidgets import MessageCheckBox
# feedback` = False is used on startup, so only positive feedback is
# given. `feedback` = True is used when after startup (when using the
# menu action, and gives feeback if updates are, or are not found.
feedback = self.give_updates_feedback
# Get results from worker
update_available = self.worker_updates.update_available
latest_release = self.worker_updates.latest_release
error_msg = self.worker_updates.error
url_r = __project_url__ + '/releases'
url_i = 'https://docs.spyder-ide.org/installation.html'
# Define the custom QMessageBox
box = MessageCheckBox(icon=QMessageBox.Information,
parent=self)
box.setWindowTitle(_("Spyder updates"))
box.set_checkbox_text(_("Check for updates on startup"))
box.setStandardButtons(QMessageBox.Ok)
box.setDefaultButton(QMessageBox.Ok)
# Adjust the checkbox depending on the stored configuration
section, option = 'main', 'check_updates_on_startup'
check_updates = CONF.get(section, option)
box.set_checked(check_updates)
if error_msg is not None:
msg = error_msg
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
else:
if update_available:
anaconda_msg = ''
if 'Anaconda' in sys.version or 'conda-forge' in sys.version:
anaconda_msg = _("<hr><b>IMPORTANT NOTE:</b> It seems "
"that you are using Spyder with "
"<b>Anaconda/Miniconda</b>. Please "
"<b>don't</b> use <code>pip</code> to "
"update it as that will probably break "
"your installation.<br><br>"
"Instead, please wait until new conda "
"packages are available and use "
"<code>conda</code> to perform the "
"update.<hr>")
msg = _("<b>Spyder %s is available!</b> <br><br>Please use "
"your package manager to update Spyder or go to our "
"<a href=\"%s\">Releases</a> page to download this "
"new version. <br><br>If you are not sure how to "
"proceed to update Spyder please refer to our "
" <a href=\"%s\">Installation</a> instructions."
"") % (latest_release, url_r, url_i)
msg += '<br>' + anaconda_msg
box.setText(msg)
box.set_check_visible(True)
box.exec_()
check_updates = box.is_checked()
elif feedback:
msg = _("Spyder is up to date.")
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
# Update checkbox based on user interaction
CONF.set(section, option, check_updates)
# Enable check_updates_action after the thread has finished
self.check_updates_action.setDisabled(False)
# Provide feeback when clicking menu if check on startup is on
self.give_updates_feedback = True
@Slot()
def check_updates(self, startup=False):
"""
Check for spyder updates on github releases using a QThread.
"""
from spyder.workers.updates import WorkerUpdates
# Disable check_updates_action while the thread is working
self.check_updates_action.setDisabled(True)
if self.thread_updates is not None:
self.thread_updates.terminate()
self.thread_updates = QThread(self)
self.worker_updates = WorkerUpdates(self, startup=startup)
self.worker_updates.sig_ready.connect(self._check_updates_ready)
self.worker_updates.sig_ready.connect(self.thread_updates.quit)
self.worker_updates.moveToThread(self.thread_updates)
self.thread_updates.started.connect(self.worker_updates.start)
self.thread_updates.start()
#==============================================================================
# Utilities to create the 'main' function
#==============================================================================
def initialize():
"""Initialize Qt, patching sys.exit and eventually setting up ETS"""
# This doesn't create our QApplication, just holds a reference to
# MAIN_APP, created above to show our splash screen as early as
# possible
app = qapplication()
# --- Set application icon
app.setWindowIcon(APP_ICON)
#----Monkey patching QApplication
class FakeQApplication(QApplication):
"""Spyder's fake QApplication"""
def __init__(self, args):
self = app # analysis:ignore
@staticmethod
def exec_():
"""Do nothing because the Qt mainloop is already running"""
pass
from qtpy import QtWidgets
QtWidgets.QApplication = FakeQApplication
# ----Monkey patching sys.exit
def fake_sys_exit(arg=[]):
pass
sys.exit = fake_sys_exit
# ----Monkey patching sys.excepthook to avoid crashes in PyQt 5.5+
if PYQT5:
def spy_excepthook(type_, value, tback):
sys.__excepthook__(type_, value, tback)
sys.excepthook = spy_excepthook
# Removing arguments from sys.argv as in standard Python interpreter
sys.argv = ['']
# Selecting Qt4 backend for Enthought Tool Suite (if installed)
try:
from enthought.etsconfig.api import ETSConfig
ETSConfig.toolkit = 'qt4'
except ImportError:
pass
return app
class Spy(object):
"""
Inspect Spyder internals
Attributes:
app Reference to main QApplication object
window Reference to spyder.MainWindow widget
"""
def __init__(self, app, window):
self.app = app
self.window = window
def __dir__(self):
return list(self.__dict__.keys()) +\
[x for x in dir(self.__class__) if x[0] != '_']
def versions(self):
return get_versions()
def run_spyder(app, options, args):
"""
Create and show Spyder's main window
Start QApplication event loop
"""
#TODO: insert here
# Main window
main = MainWindow(options)
try:
main.setup()
except BaseException:
if main.console is not None:
try:
main.console.shell.exit_interpreter()
except BaseException:
pass
raise
main.show()
main.post_visible_setup()
if main.console:
main.console.shell.interpreter.namespace['spy'] = \
Spy(app=app, window=main)
# Open external files passed as args
if args:
for a in args:
main.open_external_file(a)
# Don't show icons in menus for Mac
if sys.platform == 'darwin':
QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True)
# Open external files with our Mac app
if running_in_mac_app():
app.sig_open_external_file.connect(main.open_external_file)
# To give focus again to the last focused widget after restoring
# the window
app.focusChanged.connect(main.change_last_focused_widget)
if not running_under_pytest():
app.exec_()
return main
#==============================================================================
# Main
#==============================================================================
def main():
"""Main function"""
if running_under_pytest():
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
options = Mock()
options.working_directory = None
options.profile = False
options.multithreaded = False
options.new_instance = False
options.project = None
options.window_title = None
options.opengl_implementation = None
app = initialize()
window = run_spyder(app, options, None)
return window
# **** Collect command line options ****
# Note regarding Options:
# It's important to collect options before monkey patching sys.exit,
# otherwise, argparse won't be able to exit if --help option is passed
options, args = get_options()
if options.opengl_implementation:
if options.opengl_implementation == 'software':
QCoreApplication.setAttribute(Qt.AA_UseSoftwareOpenGL)
elif options.opengl_implementation == 'desktop':
QCoreApplication.setAttribute(Qt.AA_UseDesktopOpenGL)
else:
if CONF.get('main', 'opengl') == 'software':
QCoreApplication.setAttribute(Qt.AA_UseSoftwareOpenGL)
elif CONF.get('main', 'opengl') == 'desktop':
QCoreApplication.setAttribute(Qt.AA_UseDesktopOpenGL)
if options.show_console:
print("(Deprecated) --show console does nothing, now the default "
" behavior is to show the console, use --hide-console if you "
"want to hide it")
if set_attached_console_visible is not None:
set_attached_console_visible(not options.hide_console
or options.reset_config_files
or options.reset_to_defaults
or options.optimize or bool(DEBUG))
app = initialize()
if options.reset_config_files:
# <!> Remove all configuration files!
reset_config_files()
return
elif options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults(save=True)
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyder
programs.run_python_script(module="compileall",
args=[spyder.__path__[0]], p_args=['-O'])
return
# Show crash dialog
if CONF.get('main', 'crash', False) and not DEV:
CONF.set('main', 'crash', False)
if SPLASH is not None:
SPLASH.hide()
QMessageBox.information(
None, "Spyder",
"Spyder crashed during last session.<br><br>"
"If Spyder does not start at all and <u>before submitting a "
"bug report</u>, please try to reset settings to defaults by "
"running Spyder with the command line option '--reset':<br>"
"<span style=\'color: #555555\'><b>spyder --reset</b></span>"
"<br><br>"
"<span style=\'color: #ff5555\'><b>Warning:</b></span> "
"this command will remove all your Spyder configuration files "
"located in '%s').<br><br>"
"If Spyder still fails to launch, you should consult our "
"comprehensive <b><a href=\"%s\">Troubleshooting Guide</a></b>, "
"which when followed carefully solves the vast majority of "
"crashes; also, take "
"the time to search for <a href=\"%s\">known bugs</a> or "
"<a href=\"%s\">discussions</a> matching your situation before "
"submitting a report to our <a href=\"%s\">issue tracker</a>. "
"Your feedback will always be greatly appreciated."
"" % (get_conf_path(), __trouble_url__, __project_url__,
__forum_url__, __project_url__))
# Create main window
mainwindow = None
try:
mainwindow = run_spyder(app, options, args)
except FontError as fontError:
QMessageBox.information(None, "Spyder",
"Spyder was unable to load the <i>Spyder 3</i> "
"icon theme. That's why it's going to fallback to the "
"theme used in Spyder 2.<br><br>"
"For that, please close this window and start Spyder again.")
CONF.set('main', 'icon_theme', 'spyder 2')
except BaseException:
CONF.set('main', 'crash', True)
import traceback
traceback.print_exc(file=STDERR)
traceback.print_exc(file=open('spyder_crash.log', 'w'))
if mainwindow is None:
# An exception occured
if SPLASH is not None:
SPLASH.hide()
return
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
|
pipe.py
|
from multiprocessing import Process,Queue
import time
import random
def xiaofei(q):
while 1:
x = q.get()#取出队列中的内容
time.sleep(random.randrange(0, 11, 2))
if x:#判断取出的额内容是不是空,不是空打印
print("处理"+x)
else:#取出的内容是空,直接退出循环
break
def shengchan(q):
for i in range(10):
p = 'data'+ str(i)
time.sleep(random.randrange(0, 11, 2))
q.put(p)
print(p)
q.put(None)#当所有产品都被生产完之后给队列传一个空,用于取出时判断
if __name__ == '__main__':
q = Queue()#实例化队列,并且设置队列最大长度为6
s = Process(target=shengchan , args=(q,))#开启生产者进程
s.start()
x = Process(target=xiaofei , args=(q,))#开启消费者进程
x.start()
|
core.py
|
#!/usr/bin/env python
import math
import cv2
import numpy as np
import rospy
from std_msgs.msg import String
from self_driving_turtlebot3.msg import Traffic_light
from sensor_msgs.msg import CompressedImage
from sensor_msgs.msg import Image
from std_msgs.msg import Float32MultiArray
from self_driving_turtlebot3.msg import Stop_bar
from sensor_msgs.msg import LaserScan
from cv_bridge import CvBridge, CvBridgeError
import os
import threading
def converting_to_maze_mode():
os.system("rosnode kill calibration_and_birdeyeview")
os.system("rosnode kill car_barrier_detection")
os.system("rosnode kill lane_follower")
os.system("rosnode kill parking")
os.system("rosnode kill traffic_light2")
os.system("rosnode kill usb_cam")
thread = threading.Thread(target=os.system, args=('roslaunch self_driving_turtlebot3 mode_maze.launch',))
thread.setDaemon(True)
thread.start()
def converting_to_tracer_mode():
os.system("rosnode kill maze_pathfinder")
os.system("rosnode kill turtlebot3_slam_gmapping")
thread = threading.Thread(target=os.system, args=('roslaunch self_driving_turtlebot3 mode_tracer.launch',))
thread.setDaemon(True)
thread.start()
class Core():
def __init__(self):
self.selecting_sub_image = "raw" # you can choose image type "compressed", "raw"
self.image_show = 'no' # no ,yes
# subscribers
if self.selecting_sub_image == "compressed":
self._sub_1 = rospy.Subscriber('/image_calibrated_compressed', CompressedImage, self.monitoring, queue_size=1)
else:
self._sub_1 = rospy.Subscriber('/image_calibrated', Image, self.monitoring, queue_size=1)
self._sub_1 = rospy.Subscriber('/usb_cam/image_raw', Image, self.maze_check, queue_size=1)
self._sub_2 = rospy.Subscriber('/stop_bar', Stop_bar, self.receiver_stop_bar, queue_size=1)
self._sub_3 = rospy.Subscriber('/traffic_light', Traffic_light, self.receiver_traffic_light, queue_size=1)
self._sub_4 = rospy.Subscriber('/parking', String, self.receiver_parking, queue_size=1)
self._sub_5 = rospy.Subscriber('/scan', LaserScan, self.callback2, queue_size=1)
self._sub_6 = rospy.Subscriber('/maze', String, self.receiver_maze, queue_size=1)
self._sub_7 = rospy.Subscriber('/signal_sign', String, self.receiver_signal_sign, queue_size=1)
self._sub_8 = rospy.Subscriber('/objects', Float32MultiArray, self.receiver_object_find, queue_size=1)
self._pub_1 = rospy.Publisher('/command_lane_follower', String, queue_size=1)
self._pub_2 = rospy.Publisher('/command_maze', String, queue_size=1)
self._cv_bridge = CvBridge()
self.image = None
self.mode = 'lane_follower' # lane_follower, parking, maze_solver
self.state = 'go' # stop, go, slowdown : this is only used when lane_follower mode
self.state_maze = 'outside' # inside, outside
self.traffic_light_color = 'green'
self.traffic_light_detected = 'no'
self.traffic_light_x = 0
self.traffic_light_y = 0
self.stop_bar_state = 'go'
self.stop_bar_distance = None
self.stop_bar_detected = 'no'
self.stop_bar_point1_x = 0
self.stop_bar_point1_y = 0
self.stop_bar_point2_x = 0
self.stop_bar_point2_y = 0
self.parking = None
self.maze = None
self.signal_sign = None
self.find_object = None
self.wall_detected = None
self.count = 0
def maze_check(self, image_msg):
if self.selecting_sub_image == "compressed":
np_arr = np.fromstring(image_msg.data, np.uint8)
self.image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
else:
self.image = self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8")
if self.wall_detected == "yes":
mean = np.mean(self.image)
print mean
if mean < 200 and self.maze != "maze_start":
self.maze = "maze_start"
self.commander()
self._pub_2.publish(self.maze)
def monitoring(self, image_msg):
if self.selecting_sub_image == "compressed":
np_arr = np.fromstring(image_msg.data, np.uint8)
self.image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
else:
self.image = self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8")
print self.wall_detected
if self.wall_detected == "yes":
mean = np.mean(self.image)
print mean
if mean < 30:
self.maze = "maze_start"
self.commander()
if self.traffic_light_detected == 'yes':
#if self.image_show == 'yes':
#self.draw_traffic_light()
self.traffic_light_detected = 'no'
if self.stop_bar_detected == 'yes':
if self.image_show == 'yes':
self.draw_stop_bar()
self.stop_bar_detected = 'no'
#cv2.putText(self.image, self.state, (100, 100),cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
if self.image_show == 'yes':
cv2.imshow("monitoring", self.image), cv2.waitKey(1)
def commander(self):
if self.stop_bar_state == 'stop' or self.parking == 'parking_lot_detected' or (self.traffic_light_color == 'red' and self.traffic_light_x > 550) or self.maze == "maze_start":
self.state = 'stop'
elif self.stop_bar_state == 'slowdown' or self.signal_sign == 'WARNING' or self.find_object > 0:
self.state = 'slowdown'
elif self.traffic_light_color == 'fast':
self.state = 'fast'
else:
self.state = 'go'
self._pub_1.publish(self.state)
def receiver_stop_bar(self, stop_bar):
self.stop_bar_detected = 'yes'
self.stop_bar_distance = stop_bar.distance
self.stop_bar_state = stop_bar.state
self.stop_bar_point1_x = stop_bar.position1_x
self.stop_bar_point1_y = stop_bar.position1_y
self.stop_bar_point2_x = stop_bar.position2_x
self.stop_bar_point2_y = stop_bar.position2_y
self.commander()
def receiver_traffic_light(self, traffic_light):
self.traffic_light_color = traffic_light.color
self.traffic_light_x = traffic_light.position_x
self.traffic_light_y = traffic_light.position_y
self.traffic_light_detected = 'yes'
self.commander()
def receiver_parking(self, parking):
self.parking = parking.data
self.commander()
def receiver_maze(self, maze):
self.maze = maze.data
if self.maze == "maze_end":
self.commander()
def receiver_signal_sign(self, signal_sign):
self.signal_sign = signal_sign.data
self.commander()
def receiver_object_find(self, find_object):
if len(find_object.data) > 0:
self.find_object = 10
print 'yes'
elif self.find_object > 0:
self.find_object -= 1
print self.find_object
self.commander()
def draw_traffic_light(self):
self.image = cv2.circle(self.image, (self.traffic_light_x, self.traffic_light_y), 10, (0, 0, 255), thickness=3, lineType=8, shift=0)
if self.traffic_light_color == 'red':
cv2.putText(self.image, self.traffic_light_color, (self.traffic_light_x + self.traffic_light_w/3, self.traffic_light_y - 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
if self.traffic_light_color == 'green':
cv2.putText(self.image, self.traffic_light_color, (self.traffic_light_x + self.traffic_light_w/3, self.traffic_light_y - 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
def draw_stop_bar(self):
cv2.line(self.image, (self.stop_bar_point1_x, self.stop_bar_point1_y), (self.stop_bar_point2_x, self.stop_bar_point2_y), (255, 255, 0), 5)
cv2.putText(self.image, str(self.stop_bar_distance), ((self.stop_bar_point1_x + self.stop_bar_point2_x) / 2, (self.stop_bar_point1_y + self.stop_bar_point2_y)/2 - 50),cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
def main(self):
rospy.spin()
def callback2(self, scan):
return
scan_arr = np.zeros((180), np.float16)
for i in range(0, 90):
scan_arr[i] = scan.ranges[i]*math.sin((i))
count_between_distances = 0
distance1 = 0.1
distance2 = 0.4
for i in range(180):
if scan_arr[i] > distance1 and scan_arr[i] < distance2:
count_between_distances += 1
print count_between_distances
if count_between_distances > 13:
self.wall_detected = "yes"
else:
self.wall_detected = "no"
if __name__ == '__main__':
rospy.init_node('core')
node = Core()
node.main()
|
game_controller.py
|
import json
import queue
try:
from GUI import GUI
from camelot_error import CamelotError
from camelot_error_manager import CamelotErrorManager
from platform_IO_communication import PlatformIOCommunication
from camelot_action import CamelotAction
from camelot_world_state import CamelotWorldState
from utilities import parse_json, replace_all, get_action_list, str2bool
from camelot_input_multiplexer import CamelotInputMultiplexer
import shared_variables
except (ModuleNotFoundError, ImportError):
from .GUI import GUI
from .camelot_error import CamelotError
from .camelot_error_manager import CamelotErrorManager
from .platform_IO_communication import PlatformIOCommunication
from .camelot_action import CamelotAction
from .camelot_world_state import CamelotWorldState
from .utilities import parse_json, replace_all, get_action_list, str2bool
from .camelot_input_multiplexer import CamelotInputMultiplexer
from . import shared_variables
from ev_pddl.action import Action
from ev_pddl.PDDL import PDDL_Parser
from ev_pddl.domain import Domain
from ev_pddl.world_state import WorldState
import logging
import multiprocessing
import debugpy
import logging
import time
import jsonpickle
import copy
class GameController:
def __init__(self, GUI = True):
self._domain_path, self._problem_path = shared_variables.get_domain_and_problem_path()
shared_variables.action_list = get_action_list()
self._parser = PDDL_Parser()
self._domain = self._parser.parse_domain(domain_filename = self._domain_path)
self._problem = self._parser.parse_problem(problem_filename = self._problem_path)
self._camelot_action = CamelotAction()
self._player = ''
self.input_dict = {}
self.current_state = None
self.queueIn_GUI = multiprocessing.Queue()
self.queueOut_GUI = multiprocessing.Queue()
self._platform_communication = PlatformIOCommunication()
self.active_GUI = GUI
self.error_list = []
self._received_action_from_platform = None
def start_platform_communication(self):
"""
A method that is used to start the platform communication. It follows the communication controller steps.
"""
self._platform_communication.start()
logging.info("Platform communication started")
logging.info("Platform communication starting handshake phase 1")
message = self._platform_communication.communication_protocol_phase_messages['PHASE_2']['message_3'] + "Camelot"
logging.info("GameController: Sending message: " + message)
result = self._platform_communication.send_message(message, inizialization=True)
logging.info("GameController: received message: " + str(result))
if result['text'] == self._platform_communication.communication_protocol_phase_messages['PHASE_2']['message_4']:
logging.info("Platform communication handshake phase 1 finished")
return True
else:
raise Exception("Platform communication failed")
def _platform_communication_phase_3_4(self, domain: Domain, wolrd_state: WorldState):
"""
A method that is used to handle phase 3 and 4 of the communication protocol.
"""
logging.info("Platform communication starting handshake phase 3")
logging.info("Platform communication waiting for phase 3 to start")
while self._platform_communication.get_handshake_phase() != "PHASE_3":
time.sleep(0.1)
logging.info("Platform communication phase 3 started")
message_text = {
"text" : self._platform_communication.communication_protocol_phase_messages['PHASE_3']['message_6'],
"domain" : domain.to_PDDL(),
"problem" : wolrd_state.to_PDDL()
}
logging.info("GameController: Sending message: " + str(message_text))
result = self._platform_communication.send_message(message_text, inizialization=True)
logging.info("GameController: received message: " + str(result))
if result['text'] == self._platform_communication.communication_protocol_phase_messages['PHASE_4']['message_9']:
self._platform_communication.send_message_link = result['add_message_url'].replace('/', '')
logging.info("Send message link: /" + self._platform_communication.send_message_link)
self._platform_communication.receive_message_link = result['get_message_url'].replace('/', '')
logging.info("Receive message link: /" + self._platform_communication.receive_message_link)
logging.info("Platform communication setting urls finished")
logging.info("Platform communication handshake phase 4 finished")
return True
else:
raise Exception("Platform communication failed")
def _initialize(self):
"""
This method is used to initialize the components of the game controller that we don-t want to initialize in the init method,
but after when the first stage of the communication handshake finished.
"""
self.camelot_input_multiplex = CamelotInputMultiplexer()
self.error_manager = CamelotErrorManager()
def start_game(self, game_loop = True):
"""A method that is used to start the game loop
Parameters
----------
game_loop : default: True
Variable used for debugging purposes.
"""
self._initialize()
initial_state = CamelotWorldState(self._domain, self._problem, wait_for_actions= game_loop)
initial_state.create_camelot_env_from_problem()
initial_state.check_domain_actions_available_to_use()
self._platform_communication_phase_3_4(initial_state.domain, initial_state.world_state)
self._player = initial_state.find_player(self._problem)
self._create_ingame_actions(game_loop)
self._camelot_action.action("ShowMenu", wait=game_loop)
self.current_state = initial_state
self.GUI_process = multiprocessing.Process(target=GUI, args=(self.queueIn_GUI, self.queueOut_GUI))
if self.active_GUI:
self.GUI_process.start()
while game_loop:
received = self.camelot_input_multiplex.get_input_message()
if received == 'input Selected Start':
self._camelot_action.action("HideMenu")
self._camelot_action.action('EnableInput')
self._main_game_controller(game_loop)
def _create_ingame_actions(self, game_loop = True):
"""A method that is used to create the actions that are used in the game.
It parses the pddl_predicates_to_camelot.json and integrates the content in game.
"""
json_p = parse_json("pddl_predicates_to_camelot")
for item in self._problem.initial_state:
if item.predicate.name in json_p:
if item.predicate.name == "adjacent":
self._adjacent_predicate_handling(item, json_p, game_loop)
elif item.predicate.name == "stored":
self._stored_predicate_handling(item, json_p)
else:
sub_dict = {
'$param1$' : item.entities[0].name,
'$param2$' : self._player.name
}
# execute declaration part
for istr in json_p[item.predicate.name]['declaration']:
action_name, action_parameters, wait = self._get_camelot_action_parameters_from_json(istr, sub_dict)
self._camelot_action.action(action_name, action_parameters, wait=wait)
# prepare input dict
input_key = replace_all(json_p[item.predicate.name]['input']["message"], sub_dict)
self.input_dict[input_key] = []
# popolate input dict with istructions to use when input is called
for istr in json_p[item.predicate.name]['response']:
action_name, action_parameters, wait = self._get_camelot_action_parameters_from_json(istr, sub_dict)
action_dict = {
'action_name' : action_name,
'action_parameters' : action_parameters,
'wait' : wait
}
self.input_dict[input_key].append(action_dict)
def _adjacent_predicate_handling(self, item, json_p, game_loop = True):
"""A method that is used to manage the places declared on the domain
It declares the input function that is used from Camelot to enable an action to happen. In this case the action is the exit action.
It also creates the responce to the action, so when camelot triggers the input command the systems knows the responce.
Parameters
----------
game_loop : boolen, default - True
boolean used for debugging porpuses.
"""
sub_dict = {
'$param1$' : item.entities[0].name,
'$param2$' : self._player.name,
'$param3$' : item.entities[1].name,
}
# execute declaration part
for istr in json_p['adjacent']['declaration']:
action_name, action_parameters, wait = self._get_camelot_action_parameters_from_json(istr, sub_dict)
self._camelot_action.action(action_name, action_parameters, wait=wait)
# prepare input dict
loc, entry = item.entities[0].name.split('.')
if 'end' in entry.lower():
input_key = replace_all(json_p['adjacent']['input']['end'], sub_dict)
self.input_dict[input_key] = []
else:
input_key = replace_all(json_p['adjacent']['input']['door'], sub_dict)
self.input_dict[input_key] = []
# popolate input dict with istructions to use when input is called
for istr in json_p['adjacent']['response']:
action_name, action_parameters, wait = self._get_camelot_action_parameters_from_json(istr, sub_dict)
action_dict = {
'action_name' : action_name,
'action_parameters' : action_parameters,
'wait' : wait
}
self.input_dict[input_key].append(action_dict)
def _stored_predicate_handling(self, item, json_p):
"""A method that is used to popolate the input_dict with the actions that are used to manage the stored predicates.
"""
sub_dict = {
'$param1$' : item.entities[0].name,
'$param2$' : self._player.name,
'$param3$' : item.entities[1].name,
}
input_key = replace_all(json_p[item.predicate.name]['input']["message"], sub_dict)
self.input_dict[input_key] = []
for istr in json_p[item.predicate.name]['response']:
action_name, action_parameters, wait = self._get_camelot_action_parameters_from_json(istr, sub_dict)
action_dict = {
'action_name' : action_name,
'action_parameters' : action_parameters,
'wait' : wait
}
self.input_dict[input_key].append(action_dict)
def _get_camelot_action_parameters_from_json(self, istr : dict, sub_dict : dict):
"""
Utility method used to create the parameters of the camelot action using the json file.
"""
action_name = istr.get('action_name')
action_parameters = []
for item in istr.get('action_args'):
if item in sub_dict.keys():
action_parameters.append(sub_dict[item])
elif any(k in item for k in sub_dict.keys()):
action_parameters.append(replace_all(item, sub_dict))
elif item == "TRUE":
action_parameters.append(True)
elif item == "FALSE":
action_parameters.append(False)
else:
action_parameters.append(item)
wait = str2bool(istr.get('wait'))
return action_name, action_parameters, wait
def _main_game_controller(self, game_loop = True):
"""A method that is used as main game controller
Parameters
----------
game_loop : boolen, default - True
boolean used for debugging porpuses.
"""
exit = game_loop
if self._player != '':
self._camelot_action.action("SetCameraFocus",[self._player.name])
self._camelot_action.success_messages = queue.Queue()
self._camelot_action.debug = True
while exit:
self._input_handler()
self._success_message_handler()
self._location_handler()
self._incoming_messages_handler()
self._check_error_messages()
# self.queue_GUI.close()
# self.queue_GUI.join_thread()
# self.GUI_process.join()
def _success_message_handler(self):
"""A method that is used to handle the success message and update the world state
"""
try:
received = self._camelot_action.success_messages.get_nowait()
logging.info("GameController: Success message received: " + received)
self._apply_camelot_message(received)
except queue.Empty:
return False
return True
def _input_handler(self) -> bool:
"""
A method that is used to handle the input from Camelot.
Parameters
----------
None
Returns
-------
bool -> True if received message from input queue and responded to it; False if not.
"""
try:
received = self.camelot_input_multiplex.get_input_message(no_wait=True)
logging.info("GameController: got input message \"%s\"" %( received ))
if received in self.input_dict.keys():
for item in self.input_dict[received]:
action_name = item['action_name']
action_parameters = item['action_parameters']
wait = item['wait']
self._camelot_action.action(action_name, action_parameters, wait=wait)
elif received == "input Key Pause":
pass
except queue.Empty:
return False
return True
def _location_handler(self):
"""
A method that is used to handle the location inputs from Camelot.
Parameters
----------
None
"""
try:
#TODO: evolve to handle multiple location inputs
received = self.camelot_input_multiplex.get_location_message(no_wait=True)
logging.info("GameController: got location message \"%s\"" %( received ))
if received.startswith(shared_variables.location_message_prefix[2:]):
#self.queue_GUI.put(received)
self._apply_camelot_message(received)
except queue.Empty:
return False
except Exception as inst:
logging.exception("GameController: Exception in location handler: %s" %( inst ))
return False
return True
def _incoming_messages_handler(self):
"""
A method that is used to handle the incoming messages that can come from the GUI or the evaluation platform.
Parameters
----------
None
"""
try:
received = self.queueOut_GUI.get_nowait()
logging.debug("GameController: got external message \"%s\"" %( received ))
if "CI" in received:
# handle Camelot instruction
message = received["CI"]
self._camelot_action.send_camelot_instruction(message)
elif "PA" in received:
# handle PDDL action
message = received["PA"]
self._incoming_action_handler(message)
except queue.Empty:
pass
action = self._platform_communication.receive_message()
if action is not None:
action_text = action[0]['text']
logging.debug("GameController: got external message from platform: \"%s\"" %( action_text ))
self._incoming_action_handler(action_text)
def _check_error_messages(self):
"""
This method is used to check if there are any error messages.
"""
error_message = self.camelot_input_multiplex.get_error_message()
if error_message is not None:
error = CamelotError(error_message)
self.error_list.append(error)
def _incoming_action_handler(self, message):
"""
This method is used to handle the message that represents an action.
It first creates a PDDL action, and then generates the camelot instructions that need to be sent to camelot for execution.
Parameters
----------
message: str
The message that represents the action.
"""
# move-between-location(luca, Blacksmith, AlchemyShop, Blacksmith.Door, AlchemyShop.Door)
action = self.current_state.create_action_from_incoming_message(message)
self._received_action_from_platform = copy.deepcopy(action)
camelot_action_parameters = self._camelot_action.generate_camelot_action_parameters_from_action(action)
success = self._camelot_action.actions(camelot_action_parameters)
if success:
changed_relations = self.current_state.apply_action(action)
if action.name.startswith("instantiate_object"):
json_p = parse_json("pddl_predicates_to_camelot")
stored = [item[1] for item in changed_relations if item[0] == "new" and item[1].predicate.name == "stored"]
self._stored_predicate_handling(stored[0], json_p)
self.queueIn_GUI.put(self.current_state.world_state)
self._platform_communication.send_message(self._format_changed_relations_for_external_message(changed_relations))
def _apply_camelot_message(self, message):
"""
This method is used to apply a message that is received from Camelot to the current state.
Parameters
----------
message: str
The message that will be applied.
"""
changed_relations = self.current_state.apply_camelot_message(message, self._received_action_from_platform)
if len(changed_relations) > 0:
self.queueIn_GUI.put(self.current_state.world_state)
self._platform_communication.send_message(self._format_changed_relations_for_external_message(changed_relations))
def _format_changed_relations_for_external_message(self, changed_relations):
"""
This method is used to format a message for the external communication.
Parameters
----------
message: list
The list of relations that changed.
"""
relation_list = []
for item in changed_relations:
if type(item) == list:
for subitem in item:
i = (subitem[0], subitem[1].to_PDDL())
relation_list.append(i)
elif type(item) == tuple and len(item) == 2:
i = (item[0], item[1].to_PDDL())
relation_list.append(i)
else:
logging.debug("GameController(_format_changed_relations_for_external_message): Invalid relation: %s" %( item ))
json_message = jsonpickle.encode(relation_list)
return json_message
|
settings_20210906110948.py
|
"""
Django settings for First_Wish project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
import environ
import threading
import schedule
import time
from First_Wish_Main_App.views import decrease_day_count_and_send_bday_mails
env_path = os.path.join(os.path.dirname(__file__), '../.env')
environ.Env.read_env(env_path)
# schedule.every().day.at("11:00").do(decrease_day_count_and_send_bday_mails)
# ///////////////////////////////SCHEDULE THE ENABLE BUTTON STARTS////////////////////
# Schedule the task at 00:01 everyday
schedule.every().day.at("11:10").do(decrease_day_count_and_send_bday_mails)
# schedule.every().day.at("01:00").do(delete_task_and_add_store_datewise)
def func():
while True:
# print("======Runnning==========")
schedule.run_pending()
time.sleep(1)
t1 = threading.Thread(target=func)
t1.start()
# ///////////////////////////////SCHEDULE THE ENABLE BUTTON ENDS////////////////////
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
templates_path=os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY =os.environ.get('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'First_Wish_Main_App',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'First_Wish.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [templates_path],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'First_Wish.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
|
test.py
|
import threading, time
def nothing():
time.sleep(10)
t=threading.Thread(target=nothing)
t.start()
name=t.name
for i in range(100):
unfinished_thread_names = [t.name for t in threading.enumerate() if t.name in {name: 'nothing'}.keys()]
status = [tdescr for tname, tdescr in {name: 'nothing'}.items() if tname in unfinished_thread_names]
print(threading.enumerate(), unfinished_thread_names,status)
time.sleep(0.5)
|
3_threading.py
|
import threading
import time
a = 3
def func1(x,queque):
time.sleep(1)
print 'a :' + str(a)
queque[x] =5
return x*x
if __name__ == '__main__':
dict = dict()
t1 = threading.Thread(target=func1, args=(1,dict,))
t2 = threading.Thread(target=func1, args=(2,dict,))
t1.start()
t2.start()
t1.join()
t2.join()
print dict
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.